[PATCH 1/1] mm: Fix struct page layout on 32-bit systems

Jesper Dangaard Brouer brouer at redhat.com
Thu Apr 15 19:08:32 BST 2021


On Wed, 14 Apr 2021 21:56:39 +0000
David Laight <David.Laight at ACULAB.COM> wrote:

> From: Matthew Wilcox
> > Sent: 14 April 2021 22:36
> > 
> > On Wed, Apr 14, 2021 at 09:13:22PM +0200, Jesper Dangaard Brouer wrote:  
> > > (If others want to reproduce).  First I could not reproduce on ARM32.
> > > Then I found out that enabling CONFIG_XEN on ARCH=arm was needed to
> > > cause the issue by enabling CONFIG_ARCH_DMA_ADDR_T_64BIT.  
> > 
> > hmmm ... you should be able to provoke it by enabling ARM_LPAE,
> > which selects PHYS_ADDR_T_64BIT, and
> > 
> > config ARCH_DMA_ADDR_T_64BIT
> >         def_bool 64BIT || PHYS_ADDR_T_64BIT
> >   
> > >  struct page {
> > >         long unsigned int          flags;                /*     0     4 */
> > >
> > >         /* XXX 4 bytes hole, try to pack */
> > >
> > >         union {
> > >                 struct {
> > >                         struct list_head lru;            /*     8     8 */
> > >                         struct address_space * mapping;  /*    16     4 */
> > >                         long unsigned int index;         /*    20     4 */
> > >                         long unsigned int private;       /*    24     4 */
> > >                 };                                       /*     8    20 */
> > >                 struct {
> > >                         dma_addr_t dma_addr  
> 
> Adding __packed here will remove the 4 byte hole before the union
> and the compiler seems clever enough to know that anything following
> a 'long' must also be 'long' aligned.

Played with __packed in below patch, and I can confirm it seems to work.

> So you don't get anything horrid like byte accesses.
> On 64bit dma_addr will remain 64bit aligned.
> On arm32 dma_addr will be 32bit aligned - but forcing two 32bit access
> won't make any difference.

See below patch.  Where I swap32 the dma address to satisfy
page->compound having bit zero cleared. (It is the simplest fix I could
come up with).


[PATCH] page_pool: handling 32-bit archs with 64-bit dma_addr_t

From: Jesper Dangaard Brouer <brouer at redhat.com>

Workaround for storing 64-bit DMA-addr on 32-bit machines in struct
page.  The page->dma_addr share area with page->compound_head which
use bit zero to mark compound pages. This is okay, as DMA-addr are
aligned pointers which have bit zero cleared.

In the 32-bit case, page->compound_head is 32-bit.  Thus, when
dma_addr_t is 64-bit it will be located in top 32-bit.  Solve by
swapping dma_addr 32-bit segments.

Signed-off-by: Jesper Dangaard Brouer <brouer at redhat.com>
---
 include/linux/mm_types.h |    2 +-
 include/linux/types.h    |    1 +
 include/net/page_pool.h  |   21 ++++++++++++++++++++-
 net/core/page_pool.c     |    8 +++++---
 4 files changed, 27 insertions(+), 5 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6613b26a8894..27406e3b1e1b 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -100,7 +100,7 @@ struct page {
 			 * @dma_addr: might require a 64-bit value even on
 			 * 32-bit architectures.
 			 */
-			dma_addr_t dma_addr;
+			dma_addr_t dma_addr __packed;
 		};
 		struct {	/* slab, slob and slub */
 			union {
diff --git a/include/linux/types.h b/include/linux/types.h
index ac825ad90e44..65fd5d630016 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -141,6 +141,7 @@ typedef u64 blkcnt_t;
  */
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 typedef u64 dma_addr_t;
+//typedef u64 __attribute__((aligned(sizeof(void *)))) dma_addr_t;
 #else
 typedef u32 dma_addr_t;
 #endif
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index b5b195305346..c2329088665c 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -196,9 +196,28 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
 	page_pool_put_full_page(pool, page, true);
 }
 
+static inline
+dma_addr_t page_pool_dma_addr_read(dma_addr_t dma_addr)
+{
+	/* Workaround for storing 64-bit DMA-addr on 32-bit machines in struct
+	 * page.  The page->dma_addr share area with page->compound_head which
+	 * use bit zero to mark compound pages. This is okay, as DMA-addr are
+	 * aligned pointers which have bit zero cleared.
+	 *
+	 * In the 32-bit case, page->compound_head is 32-bit.  Thus, when
+	 * dma_addr_t is 64-bit it will be located in top 32-bit.  Solve by
+	 * swapping dma_addr 32-bit segments.
+	 */
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	if (sizeof(long unsigned int) == 4) /* 32-bit system */
+		dma_addr = (dma_addr << 32) | (dma_addr >> 32);
+#endif
+	return dma_addr;
+}
+
 static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
 {
-	return page->dma_addr;
+	return page_pool_dma_addr_read(page->dma_addr);
 }
 
 static inline bool is_page_pool_compiled_in(void)
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index ad8b0707af04..813598ea23f6 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -174,8 +174,10 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool,
 					  struct page *page,
 					  unsigned int dma_sync_size)
 {
+	dma_addr_t dma = page_pool_dma_addr_read(page->dma_addr);
+
 	dma_sync_size = min(dma_sync_size, pool->p.max_len);
-	dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
+	dma_sync_single_range_for_device(pool->p.dev, dma,
 					 pool->p.offset, dma_sync_size,
 					 pool->p.dma_dir);
 }
@@ -226,7 +228,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
 		put_page(page);
 		return NULL;
 	}
-	page->dma_addr = dma;
+	page->dma_addr = page_pool_dma_addr_read(dma);
 
 	if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
 		page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
@@ -294,7 +296,7 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
 		 */
 		goto skip_dma_unmap;
 
-	dma = page->dma_addr;
+	dma = page_pool_dma_addr_read(page->dma_addr);
 
 	/* When page is unmapped, it cannot be returned our pool */
 	dma_unmap_page_attrs(pool->p.dev, dma,


--
Best regards,
  Jesper Dangaard Brouer
  MSc.CS, Principal Kernel Engineer at Red Hat
  LinkedIn: http://www.linkedin.com/in/brouer




More information about the linux-arm-kernel mailing list