[PATCH net-next 02/12] mm: Provide a page_frag_cache allocator cleanup function
David Howells
dhowells at redhat.com
Wed May 24 08:33:01 PDT 2023
Provide a function to clean up a page_frag_cache allocator rather than
doing it manually each time.
Signed-off-by: David Howells <dhowells at redhat.com>
cc: "David S. Miller" <davem at davemloft.net>
cc: Eric Dumazet <edumazet at google.com>
cc: Jakub Kicinski <kuba at kernel.org>
cc: Paolo Abeni <pabeni at redhat.com>
cc: Jens Axboe <axboe at kernel.dk>
cc: Jeroen de Borst <jeroendb at google.com>
cc: Catherine Sullivan <csully at google.com>
cc: Shailend Chand <shailend at google.com>
cc: Felix Fietkau <nbd at nbd.name>
cc: John Crispin <john at phrozen.org>
cc: Sean Wang <sean.wang at mediatek.com>
cc: Mark Lee <Mark-MC.Lee at mediatek.com>
cc: Lorenzo Bianconi <lorenzo at kernel.org>
cc: Matthias Brugger <matthias.bgg at gmail.com>
cc: AngeloGioacchino Del Regno <angelogioacchino.delregno at collabora.com>
cc: Keith Busch <kbusch at kernel.org>
cc: Jens Axboe <axboe at fb.com>
cc: Christoph Hellwig <hch at lst.de>
cc: Sagi Grimberg <sagi at grimberg.me>
cc: Chaitanya Kulkarni <kch at nvidia.com>
cc: Andrew Morton <akpm at linux-foundation.org>
cc: Matthew Wilcox <willy at infradead.org>
cc: netdev at vger.kernel.org
cc: linux-arm-kernel at lists.infradead.org
cc: linux-mediatek at lists.infradead.org
cc: linux-nvme at lists.infradead.org
cc: linux-mm at kvack.org
---
drivers/net/ethernet/google/gve/gve_main.c | 11 ++---------
drivers/net/ethernet/mediatek/mtk_wed_wo.c | 17 ++---------------
drivers/nvme/host/tcp.c | 8 +-------
drivers/nvme/target/tcp.c | 5 +----
include/linux/gfp.h | 2 ++
mm/page_frag_alloc.c | 17 +++++++++++++++++
6 files changed, 25 insertions(+), 35 deletions(-)
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 8fb70db63b8b..55feab29bed9 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1251,17 +1251,10 @@ static void gve_unreg_xdp_info(struct gve_priv *priv)
static void gve_drain_page_cache(struct gve_priv *priv)
{
- struct page_frag_cache *nc;
int i;
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- nc = &priv->rx[i].page_cache;
- if (nc->va) {
- __page_frag_cache_drain(virt_to_page(nc->va),
- nc->pagecnt_bias);
- nc->va = NULL;
- }
- }
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
+ page_frag_cache_clear(&priv->rx[i].page_cache);
}
static int gve_open(struct net_device *dev)
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
index 69fba29055e9..d90fea2c7d04 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -286,7 +286,6 @@ mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
static void
mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
- struct page *page;
int i;
for (i = 0; i < q->n_desc; i++) {
@@ -298,19 +297,12 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
entry->buf = NULL;
}
- if (!q->cache.va)
- return;
-
- page = virt_to_page(q->cache.va);
- __page_frag_cache_drain(page, q->cache.pagecnt_bias);
- memset(&q->cache, 0, sizeof(q->cache));
+ page_frag_cache_clear(&q->cache);
}
static void
mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
- struct page *page;
-
for (;;) {
void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
@@ -320,12 +312,7 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
skb_free_frag(buf);
}
- if (!q->cache.va)
- return;
-
- page = virt_to_page(q->cache.va);
- __page_frag_cache_drain(page, q->cache.pagecnt_bias);
- memset(&q->cache, 0, sizeof(q->cache));
+ page_frag_cache_clear(&q->cache);
}
static void
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index bf0230442d57..dcc35f6bff8c 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1315,7 +1315,6 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
{
- struct page *page;
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
unsigned int noreclaim_flag;
@@ -1326,12 +1325,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
if (queue->hdr_digest || queue->data_digest)
nvme_tcp_free_crypto(queue);
- if (queue->pf_cache.va) {
- page = virt_to_head_page(queue->pf_cache.va);
- __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
- queue->pf_cache.va = NULL;
- }
-
+ page_frag_cache_clear(&queue->pf_cache);
noreclaim_flag = memalloc_noreclaim_save();
sock_release(queue->sock);
memalloc_noreclaim_restore(noreclaim_flag);
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index ed98df72c76b..984e6ce85dcd 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1464,7 +1464,6 @@ static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
static void nvmet_tcp_release_queue_work(struct work_struct *w)
{
- struct page *page;
struct nvmet_tcp_queue *queue =
container_of(w, struct nvmet_tcp_queue, release_work);
@@ -1486,9 +1485,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
if (queue->hdr_digest || queue->data_digest)
nvmet_tcp_free_crypto(queue);
ida_free(&nvmet_tcp_queue_ida, queue->idx);
-
- page = virt_to_head_page(queue->pf_cache.va);
- __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
+ page_frag_cache_clear(&queue->pf_cache);
kfree(queue);
}
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index ed8cb537c6a7..03504beb51e4 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -314,6 +314,8 @@ static inline void *page_frag_alloc(struct page_frag_cache *nc,
return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
}
+void page_frag_cache_clear(struct page_frag_cache *nc);
+
extern void page_frag_free(void *addr);
#define __free_page(page) __free_pages((page), 0)
diff --git a/mm/page_frag_alloc.c b/mm/page_frag_alloc.c
index bee95824ef8f..e02b81d68dc4 100644
--- a/mm/page_frag_alloc.c
+++ b/mm/page_frag_alloc.c
@@ -46,6 +46,23 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
}
EXPORT_SYMBOL(__page_frag_cache_drain);
+/**
+ * page_frag_cache_clear - Clear out a page fragment cache
+ * @nc: The cache to clear
+ *
+ * Discard any pages still cached in a page fragment cache.
+ */
+void page_frag_cache_clear(struct page_frag_cache *nc)
+{
+ if (nc->va) {
+ struct page *page = virt_to_head_page(nc->va);
+
+ __page_frag_cache_drain(page, nc->pagecnt_bias);
+ nc->va = NULL;
+ }
+}
+EXPORT_SYMBOL(page_frag_cache_clear);
+
void *page_frag_alloc_align(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask,
unsigned int align_mask)
More information about the Linux-nvme
mailing list