[PATCH net-next] page_pool: split types and declarations from page_pool.h

Simon Horman simon.horman at corigine.com
Mon Jul 24 08:14:01 PDT 2023


On Wed, Jul 19, 2023 at 08:13:37PM +0800, Yunsheng Lin wrote:

Hi Yunsheng,

...

> diff --git a/include/net/page_pool_types.h b/include/net/page_pool_types.h

...

> +struct page_pool {
> +	struct page_pool_params p;
> +
> +	struct delayed_work release_dw;
> +	void (*disconnect)(void *);
> +	unsigned long defer_start;
> +	unsigned long defer_warn;
> +
> +	u32 pages_state_hold_cnt;
> +	unsigned int frag_offset;
> +	struct page *frag_page;
> +	long frag_users;
> +
> +#ifdef CONFIG_PAGE_POOL_STATS
> +	/* these stats are incremented while in softirq context */
> +	struct page_pool_alloc_stats alloc_stats;
> +#endif
> +	u32 xdp_mem_id;
> +
> +	/*
> +	 * Data structure for allocation side
> +	 *
> +	 * Drivers allocation side usually already perform some kind
> +	 * of resource protection.  Piggyback on this protection, and
> +	 * require driver to protect allocation side.
> +	 *
> +	 * For NIC drivers this means, allocate a page_pool per
> +	 * RX-queue. As the RX-queue is already protected by
> +	 * Softirq/BH scheduling and napi_schedule. NAPI schedule
> +	 * guarantee that a single napi_struct will only be scheduled
> +	 * on a single CPU (see napi_schedule).
> +	 */
> +	struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
> +
> +	/* Data structure for storing recycled pages.
> +	 *
> +	 * Returning/freeing pages is more complicated synchronization
> +	 * wise, because free's can happen on remote CPUs, with no
> +	 * association with allocation resource.
> +	 *
> +	 * Use ptr_ring, as it separates consumer and producer
> +	 * effeciently, it a way that doesn't bounce cache-lines.

I know this is moved from elsewhere, but: effeciently -> efficiently

> +	 *
> +	 * TODO: Implement bulk return pages into this structure.
> +	 */
> +	struct ptr_ring ring;
> +
> +#ifdef CONFIG_PAGE_POOL_STATS
> +	/* recycle stats are per-cpu to avoid locking */
> +	struct page_pool_recycle_stats __percpu *recycle_stats;
> +#endif
> +	atomic_t pages_state_release_cnt;
> +
> +	/* A page_pool is strictly tied to a single RX-queue being
> +	 * protected by NAPI, due to above pp_alloc_cache. This
> +	 * refcnt serves purpose is to simplify drivers error handling.
> +	 */
> +	refcount_t user_cnt;
> +
> +	u64 destroy_cnt;
> +};

...



More information about the Linux-mediatek mailing list