@@ -3102,7 +3102,7 @@ dp_netdev_pmd_flush_output_packets(struct dp_netdev_pmd_thread *pmd,
tx_qid = pmd->static_tx_qid;
}
- netdev_send(p->port->netdev, tx_qid, &p->output_pkts, true,
+ netdev_send(p->port->netdev, tx_qid, &p->output_pkts,
dynamic_txqs);
dp_packet_batch_init(&p->output_pkts);
}
@@ -680,7 +680,7 @@ netdev_bsd_rxq_drain(struct netdev_rxq *rxq_)
*/
static int
netdev_bsd_send(struct netdev *netdev_, int qid OVS_UNUSED,
- struct dp_packet_batch *batch, bool may_steal,
+ struct dp_packet_batch *batch,
bool concurrent_txq OVS_UNUSED)
{
struct netdev_bsd *dev = netdev_bsd_cast(netdev_);
@@ -731,7 +731,7 @@ netdev_bsd_send(struct netdev *netdev_, int qid OVS_UNUSED,
}
ovs_mutex_unlock(&dev->mutex);
- dp_packet_delete_batch(batch, may_steal);
+ dp_packet_delete_batch(batch, true);
return error;
}
@@ -1833,12 +1833,12 @@ dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch)
static int
netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
struct dp_packet_batch *batch,
- bool may_steal, bool concurrent_txq OVS_UNUSED)
+ bool concurrent_txq OVS_UNUSED)
{
- if (OVS_UNLIKELY(!may_steal || batch->packets[0]->source != DPBUF_DPDK)) {
+ if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) {
dpdk_do_tx_copy(netdev, qid, batch);
- dp_packet_delete_batch(batch, may_steal);
+ dp_packet_delete_batch(batch, true);
} else {
dp_packet_batch_apply_cutlen(batch);
__netdev_dpdk_vhost_send(netdev, qid, batch->packets, batch->count);
@@ -1848,11 +1848,11 @@ netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
static inline void
netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
- struct dp_packet_batch *batch, bool may_steal,
+ struct dp_packet_batch *batch,
bool concurrent_txq)
{
if (OVS_UNLIKELY(!(dev->flags & NETDEV_UP))) {
- dp_packet_delete_batch(batch, may_steal);
+ dp_packet_delete_batch(batch, true);
return;
}
@@ -1861,12 +1861,11 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
}
- if (OVS_UNLIKELY(!may_steal ||
- batch->packets[0]->source != DPBUF_DPDK)) {
+ if (OVS_UNLIKELY(batch->packets[0]->source != DPBUF_DPDK)) {
struct netdev *netdev = &dev->up;
dpdk_do_tx_copy(netdev, qid, batch);
- dp_packet_delete_batch(batch, may_steal);
+ dp_packet_delete_batch(batch, true);
} else {
int dropped;
int cnt = batch->count;
@@ -1894,12 +1893,11 @@ netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
static int
netdev_dpdk_eth_send(struct netdev *netdev, int qid,
- struct dp_packet_batch *batch, bool may_steal,
- bool concurrent_txq)
+ struct dp_packet_batch *batch, bool concurrent_txq)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
- netdev_dpdk_send__(dev, qid, batch, may_steal, concurrent_txq);
+ netdev_dpdk_send__(dev, qid, batch, concurrent_txq);
return 0;
}
@@ -2858,8 +2856,7 @@ dpdk_ring_open(const char dev_name[], dpdk_port_t *eth_port_id)
static int
netdev_dpdk_ring_send(struct netdev *netdev, int qid,
- struct dp_packet_batch *batch, bool may_steal,
- bool concurrent_txq)
+ struct dp_packet_batch *batch, bool concurrent_txq)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
unsigned i;
@@ -2872,7 +2869,7 @@ netdev_dpdk_ring_send(struct netdev *netdev, int qid,
dp_packet_rss_invalidate(batch->packets[i]);
}
- netdev_dpdk_send__(dev, qid, batch, may_steal, concurrent_txq);
+ netdev_dpdk_send__(dev, qid, batch, concurrent_txq);
return 0;
}
@@ -1056,7 +1056,7 @@ netdev_dummy_rxq_drain(struct netdev_rxq *rxq_)
static int
netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
- struct dp_packet_batch *batch, bool may_steal,
+ struct dp_packet_batch *batch,
bool concurrent_txq OVS_UNUSED)
{
struct netdev_dummy *dev = netdev_dummy_cast(netdev);
@@ -1128,7 +1128,7 @@ netdev_dummy_send(struct netdev *netdev, int qid OVS_UNUSED,
ovs_mutex_unlock(&dev->mutex);
}
- dp_packet_delete_batch(batch, may_steal);
+ dp_packet_delete_batch(batch, true);
return error;
}
@@ -1193,7 +1193,7 @@ netdev_linux_rxq_drain(struct netdev_rxq *rxq_)
* expected to do additional queuing of packets. */
static int
netdev_linux_send(struct netdev *netdev_, int qid OVS_UNUSED,
- struct dp_packet_batch *batch, bool may_steal,
+ struct dp_packet_batch *batch,
bool concurrent_txq OVS_UNUSED)
{
int error = 0;
@@ -1291,7 +1291,7 @@ netdev_linux_send(struct netdev *netdev_, int qid OVS_UNUSED,
}
free_batch:
- dp_packet_delete_batch(batch, may_steal);
+ dp_packet_delete_batch(batch, true);
return error;
@@ -343,9 +343,8 @@ struct netdev_class {
* If the function returns a non-zero value, some of the packets might have
* been sent anyway.
*
- * If 'may_steal' is false, the caller retains ownership of all the
- * packets. If 'may_steal' is true, the caller transfers ownership of all
- * the packets to the network device, regardless of success.
+ * The caller transfers ownership of all the packets to the network
+ * device, regardless of success.
*
* If 'concurrent_txq' is true, the caller may perform concurrent calls
* to netdev_send() with the same 'qid'. The netdev provider is responsible
@@ -365,7 +364,7 @@ struct netdev_class {
* datapath". It will also prevent the OVS implementation of bonding from
* working properly over 'netdev'.) */
int (*send)(struct netdev *netdev, int qid, struct dp_packet_batch *batch,
- bool may_steal, bool concurrent_txq);
+ bool concurrent_txq);
/* Registers with the poll loop to wake up from the next call to
* poll_block() when the packet transmission queue for 'netdev' has
@@ -747,9 +747,8 @@ netdev_get_pt_mode(const struct netdev *netdev)
* If the function returns a non-zero value, some of the packets might have
* been sent anyway.
*
- * If 'may_steal' is false, the caller retains ownership of all the packets.
- * If 'may_steal' is true, the caller transfers ownership of all the packets
- * to the network device, regardless of success.
+ * The caller transfers ownership of all the packets to the network device,
+ * regardless of success.
*
* If 'concurrent_txq' is true, the caller may perform concurrent calls
* to netdev_send() with the same 'qid'. The netdev provider is responsible
@@ -766,15 +765,12 @@ netdev_get_pt_mode(const struct netdev *netdev)
* cases this function will always return EOPNOTSUPP. */
int
netdev_send(struct netdev *netdev, int qid, struct dp_packet_batch *batch,
- bool may_steal, bool concurrent_txq)
+ bool concurrent_txq)
{
- int error = netdev->netdev_class->send(netdev, qid, batch, may_steal,
+ int error = netdev->netdev_class->send(netdev, qid, batch,
concurrent_txq);
if (!error) {
COVERAGE_INC(netdev_sent);
- if (!may_steal) {
- dp_packet_batch_reset_cutlen(batch);
- }
}
return error;
}
@@ -181,7 +181,7 @@ int netdev_rxq_drain(struct netdev_rxq *);
/* Packet transmission. */
int netdev_send(struct netdev *, int qid, struct dp_packet_batch *,
- bool may_steal, bool concurrent_txq);
+ bool concurrent_txq);
void netdev_send_wait(struct netdev *, int qid);
/* Flow offloading. */
Not needed anymore because 'may_steal' already handled on dpif-netdev layer and always true; Signed-off-by: Ilya Maximets <i.maximets@samsung.com> --- lib/dpif-netdev.c | 2 +- lib/netdev-bsd.c | 4 ++-- lib/netdev-dpdk.c | 25 +++++++++++-------------- lib/netdev-dummy.c | 4 ++-- lib/netdev-linux.c | 4 ++-- lib/netdev-provider.h | 7 +++---- lib/netdev.c | 12 ++++-------- lib/netdev.h | 2 +- 8 files changed, 26 insertions(+), 34 deletions(-)