@@ -768,7 +768,8 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
* pointing to; otherwise, the DMA mapping needs to be destroyed and
* page freed
*/
-static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
+static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf,
+ int rx_buf_pgcnt)
{
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
@@ -779,7 +780,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((page_count(page) - pagecnt_bias) > 1))
+ if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
return false;
#else
#define ICE_LAST_OFFSET \
@@ -859,6 +860,15 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
new_buf->pagecnt_bias = old_buf->pagecnt_bias;
}
+static int ice_rx_buf_page_count(struct ice_rx_buf *rx_buf)
+{
+#if (PAGE_SIZE < 8192)
+ return page_count(rx_buf->page);
+#else
+ return 0;
+#endif
+}
+
/**
* ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
* @rx_ring: Rx descriptor ring to transact packets on
@@ -870,11 +880,13 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
*/
static struct ice_rx_buf *
ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
- const unsigned int size)
+ const unsigned int size,
+ int *rx_buf_pgcnt)
{
struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
+ *rx_buf_pgcnt = ice_rx_buf_page_count(rx_buf);
prefetchw(rx_buf->page);
*skb = rx_buf->skb;
@@ -1017,7 +1029,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* of the rx_buf. It will either recycle the buffer or unmap it and free
* the associated resources.
*/
-static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
{
u16 ntc = rx_ring->next_to_clean + 1;
@@ -1028,7 +1040,7 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
if (!rx_buf)
return;
- if (ice_can_reuse_rx_page(rx_buf)) {
+ if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
/* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
} else {
@@ -1088,6 +1100,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
unsigned int xdp_res, xdp_xmit = 0;
struct bpf_prog *xdp_prog = NULL;
struct xdp_buff xdp;
+ int rx_buf_pgcnt;
bool failure;
xdp.rxq = &rx_ring->xdp_rxq;
@@ -1125,7 +1138,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
dma_rmb();
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
- ice_put_rx_buf(rx_ring, NULL);
+ ice_put_rx_buf(rx_ring, NULL, 0);
cleaned_count++;
continue;
}
@@ -1134,7 +1147,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M;
/* retrieve a buffer from the ring */
- rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
+ rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt);
if (!size) {
xdp.data = NULL;
@@ -1174,7 +1187,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
total_rx_pkts++;
cleaned_count++;
- ice_put_rx_buf(rx_ring, rx_buf);
+ ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
continue;
construct_skb:
if (skb) {
@@ -1193,7 +1206,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
break;
}
- ice_put_rx_buf(rx_ring, rx_buf);
+ ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
cleaned_count++;
/* skip if it is NOP desc */