@@ -941,6 +941,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
enum ice_xdp_cfg cfg_type);
int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
void ice_map_xdp_rings(struct ice_vsi *vsi);
+bool ice_rebuild_pending(struct ice_vsi *vsi);
int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
@@ -2994,7 +2994,7 @@ static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
* so it happens strictly before or after .ndo_bpf().
* In case it has happened before, we do not have anything attached to rings
*/
-static bool ice_rebuild_pending(struct ice_vsi *vsi)
+bool ice_rebuild_pending(struct ice_vsi *vsi)
{
return ice_is_reset_in_progress(vsi->back->state) &&
!vsi->rx_rings[0]->desc;
@@ -163,7 +163,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
struct ice_tx_ring *xdp_ring;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
- int timeout = 50;
int fail = 0;
int err;
@@ -175,13 +174,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
xdp_ring = vsi->xdp_rings[q_idx];
q_vector = rx_ring->q_vector;
- while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) {
- timeout--;
- if (!timeout)
- return -EBUSY;
- usleep_range(1000, 2000);
- }
-
synchronize_net();
netif_trans_update(vsi->netdev);
netif_carrier_off(vsi->netdev);
@@ -251,7 +243,6 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
synchronize_net();
netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
netif_carrier_on(vsi->netdev);
- clear_bit(ICE_CFG_BUSY, vsi->state);
return fail;
}
@@ -379,7 +370,8 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
return -EINVAL;
}
- if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
+ if_running = !ice_rebuild_pending(vsi) &&
+ netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
if (if_running) {
struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];