diff mbox series

[net-next,v1,2/2] page_pool: make inflight returns more robust via blocking alloc cache

Message ID 157323722783.10408.5060384163093162553.stgit@firesoul
State Changes Requested
Delegated to: David Miller
Headers show
Series Change XDP lifetime guarantees for page_pool objects | expand

Commit Message

Jesper Dangaard Brouer Nov. 8, 2019, 6:20 p.m. UTC
When requesting page_pool shutdown, it's a requirement that consumer
RX-side have been disconnected, but __page_pool_request_shutdown()
have a loop that empty RX alloc cache each time. Producers can still
be inflight, but they MUST NOT return pages into RX alloc cache. Thus,
the RX alloc cache MUST remain empty after the first clearing, else it
is considered a bug. Lets make it more clear this is only cleared once.

This patch only empty RX alloc cache once and then block alloc cache.
The alloc cache is blocked via pretending it is full, and then also
poisoning the last element. This blocks producers from using fast-path,
and consumer (which is not allowed) will see a NULL pointer.

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
---
 include/net/page_pool.h |    2 ++
 net/core/page_pool.c    |   28 +++++++++++++++++++++-------
 2 files changed, 23 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 2cbcdbdec254..ab39b7955f9b 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -107,6 +107,8 @@  struct page_pool {
 	 * refcnt serves purpose is to simplify drivers error handling.
 	 */
 	refcount_t user_cnt;
+
+	bool shutdown_in_progress;
 };
 
 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 226f2eb30418..89feee635d08 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -357,7 +357,7 @@  void __page_pool_free(struct page_pool *pool)
 	if (!page_pool_put(pool))
 		return;
 
-	WARN(pool->alloc.count, "API usage violation");
+	WARN(!pool->shutdown_in_progress, "API usage violation");
 	WARN(!ptr_ring_empty(&pool->ring), "ptr_ring is not empty");
 
 	if (!__page_pool_safe_to_destroy(pool))
@@ -367,8 +367,6 @@  void __page_pool_free(struct page_pool *pool)
 
 	/* Make sure kernel will crash on use-after-free */
 	pool->ring.queue = NULL;
-	pool->alloc.cache[PP_ALLOC_CACHE_SIZE - 1] = NULL;
-	pool->alloc.count = PP_ALLOC_CACHE_SIZE;
 
 	if (pool->p.flags & PP_FLAG_DMA_MAP)
 		put_device(pool->p.dev);
@@ -377,22 +375,38 @@  void __page_pool_free(struct page_pool *pool)
 }
 EXPORT_SYMBOL(__page_pool_free);
 
-/* Request to shutdown: release pages cached by page_pool, and check
- * for in-flight pages
- */
-bool __page_pool_request_shutdown(struct page_pool *pool)
+/* Empty alloc cache once and block it */
+void page_pool_empty_alloc_cache_once(struct page_pool *pool)
 {
 	struct page *page;
 
+	if (pool->shutdown_in_progress)
+		return;
+
 	/* Empty alloc cache, assume caller made sure this is
 	 * no-longer in use, and page_pool_alloc_pages() cannot be
 	 * call concurrently.
 	 */
 	while (pool->alloc.count) {
 		page = pool->alloc.cache[--pool->alloc.count];
+		pool->alloc.cache[pool->alloc.count] = NULL;
 		__page_pool_return_page(pool, page);
 	}
 
+	/* Block alloc cache, pretend it's full and poison last element */
+	pool->alloc.cache[PP_ALLOC_CACHE_SIZE - 1] = NULL;
+	pool->alloc.count = PP_ALLOC_CACHE_SIZE;
+
+	pool->shutdown_in_progress = true;
+}
+
+/* Request to shutdown: release pages cached by page_pool, and check
+ * for in-flight pages. RX-alloc side MUST be stopped prior to this.
+ */
+bool __page_pool_request_shutdown(struct page_pool *pool)
+{
+	page_pool_empty_alloc_cache_once(pool);
+
 	/* No more consumers should exist, but producers could still
 	 * be in-flight.
 	 */