@@ -632,7 +632,6 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
/* since we are recycling buffers we should seldom need to alloc */
if (likely(page)) {
- rx_ring->rx_stats.page_reuse_count++;
return true;
}
@@ -1033,7 +1032,6 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
if (ice_can_reuse_rx_page(rx_buf)) {
/* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
- rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
@@ -193,7 +193,6 @@ struct ice_rxq_stats {
u64 non_eop_descs;
u64 alloc_page_failed;
u64 alloc_buf_failed;
- u64 page_reuse_count;
};
/* this enum matches hardware bits and is meant to be used by DYN_CTLN