0f57e2ec6639838b5ecbff54f083472707cd55dd
[openwrt/staging/stintel.git] /
1 From: Yunsheng Lin <linyunsheng@huawei.com>
2 Date: Fri, 13 Oct 2023 14:48:21 +0800
3 Subject: [PATCH] page_pool: fragment API support for 32-bit arch with 64-bit
4 DMA
5
6 Currently page_pool_alloc_frag() is not supported in 32-bit
7 arch with 64-bit DMA because of the overlap issue between
8 pp_frag_count and dma_addr_upper in 'struct page' for those
9 arches, which seems to be quite common, see [1], which means
10 driver may need to handle it when using fragment API.
11
12 It is assumed that the combination of the above arch with an
13 address space >16TB does not exist, as all those arches have
14 64b equivalent, it seems logical to use the 64b version for a
15 system with a large address space. It is also assumed that dma
16 address is page aligned when we are dma mapping a page aligned
17 buffer, see [2].
18
19 That means we're storing 12 bits of 0 at the lower end for a
20 dma address, we can reuse those bits for the above arches to
21 support 32b+12b, which is 16TB of memory.
22
23 If we make a wrong assumption, a warning is emitted so that
24 user can report to us.
25
26 1. https://lore.kernel.org/all/20211117075652.58299-1-linyunsheng@huawei.com/
27 2. https://lore.kernel.org/all/20230818145145.4b357c89@kernel.org/
28
29 Tested-by: Alexander Lobakin <aleksander.lobakin@intel.com>
30 Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
31 CC: Lorenzo Bianconi <lorenzo@kernel.org>
32 CC: Alexander Duyck <alexander.duyck@gmail.com>
33 CC: Liang Chen <liangchen.linux@gmail.com>
34 CC: Guillaume Tucker <guillaume.tucker@collabora.com>
35 CC: Matthew Wilcox <willy@infradead.org>
36 CC: Linux-MM <linux-mm@kvack.org>
37 Link: https://lore.kernel.org/r/20231013064827.61135-2-linyunsheng@huawei.com
38 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
39 ---
40
41 --- a/include/linux/mm_types.h
42 +++ b/include/linux/mm_types.h
43 @@ -125,18 +125,7 @@ struct page {
44 struct page_pool *pp;
45 unsigned long _pp_mapping_pad;
46 unsigned long dma_addr;
47 - union {
48 - /**
49 - * dma_addr_upper: might require a 64-bit
50 - * value on 32-bit architectures.
51 - */
52 - unsigned long dma_addr_upper;
53 - /**
54 - * For frag page support, not supported in
55 - * 32-bit architectures with 64-bit DMA.
56 - */
57 - atomic_long_t pp_frag_count;
58 - };
59 + atomic_long_t pp_frag_count;
60 };
61 struct { /* Tail pages of compound page */
62 unsigned long compound_head; /* Bit zero is set */
63 --- a/include/net/page_pool/helpers.h
64 +++ b/include/net/page_pool/helpers.h
65 @@ -197,7 +197,7 @@ static inline void page_pool_recycle_dir
66 page_pool_put_full_page(pool, page, true);
67 }
68
69 -#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \
70 +#define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA \
71 (sizeof(dma_addr_t) > sizeof(unsigned long))
72
73 /**
74 @@ -211,17 +211,25 @@ static inline dma_addr_t page_pool_get_d
75 {
76 dma_addr_t ret = page->dma_addr;
77
78 - if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
79 - ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
80 + if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
81 + ret <<= PAGE_SHIFT;
82
83 return ret;
84 }
85
86 -static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
87 +static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
88 {
89 + if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
90 + page->dma_addr = addr >> PAGE_SHIFT;
91 +
92 + /* We assume page alignment to shave off bottom bits,
93 + * if this "compression" doesn't work we need to drop.
94 + */
95 + return addr != (dma_addr_t)page->dma_addr << PAGE_SHIFT;
96 + }
97 +
98 page->dma_addr = addr;
99 - if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
100 - page->dma_addr_upper = upper_32_bits(addr);
101 + return false;
102 }
103
104 static inline bool page_pool_put(struct page_pool *pool)
105 --- a/net/core/page_pool.c
106 +++ b/net/core/page_pool.c
107 @@ -211,10 +211,6 @@ static int page_pool_init(struct page_po
108 */
109 }
110
111 - if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT &&
112 - pool->p.flags & PP_FLAG_PAGE_FRAG)
113 - return -EINVAL;
114 -
115 #ifdef CONFIG_PAGE_POOL_STATS
116 pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
117 if (!pool->recycle_stats)
118 @@ -363,12 +359,20 @@ static bool page_pool_dma_map(struct pag
119 if (dma_mapping_error(pool->p.dev, dma))
120 return false;
121
122 - page_pool_set_dma_addr(page, dma);
123 + if (page_pool_set_dma_addr(page, dma))
124 + goto unmap_failed;
125
126 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
127 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
128
129 return true;
130 +
131 +unmap_failed:
132 + WARN_ON_ONCE("unexpected DMA address, please report to netdev@");
133 + dma_unmap_page_attrs(pool->p.dev, dma,
134 + PAGE_SIZE << pool->p.order, pool->p.dma_dir,
135 + DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
136 + return false;
137 }
138
139 static void page_pool_set_pp_info(struct page_pool *pool,