@@ -386,6 +386,28 @@ igb_rss_parse_packet(IGBCore *core, struct NetRxPkt *pkt, bool tx,
info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash);
}
+static void
+igb_tx_insert_vlan(IGBCore *core, uint16_t qn, struct igb_tx *tx,
+ uint16_t vlan, bool insert_vlan)
+{
+ if (core->mac[MRQC] & 1) {
+ uint16_t pool = qn % IGB_NUM_VM_POOLS;
+
+ if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_DEFAULT) {
+ /* always insert default VLAN */
+ insert_vlan = true;
+ vlan = core->mac[VMVIR0 + pool] & 0xffff;
+ } else if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_NEVER) {
+ insert_vlan = false;
+ }
+ }
+
+ if (insert_vlan && e1000x_vlan_enabled(core->mac)) {
+ net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, vlan,
+ core->mac[VET] & 0xffff);
+ }
+}
+
static bool
igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx)
{
@@ -583,12 +605,11 @@ igb_process_tx_desc(IGBCore *core,
if (cmd_type_len & E1000_TXD_CMD_EOP) {
if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) {
- if (cmd_type_len & E1000_TXD_CMD_VLE) {
- idx = (tx->first_olinfo_status >> 4) & 1;
- uint16_t vlan = tx->ctx[idx].vlan_macip_lens >> 16;
- uint16_t vet = core->mac[VET] & 0xffff;
- net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, vlan, vet);
- }
+ idx = (tx->first_olinfo_status >> 4) & 1;
+ igb_tx_insert_vlan(core, queue_index, tx,
+ tx->ctx[idx].vlan_macip_lens >> 16,
+ !!(cmd_type_len & E1000_TXD_CMD_VLE));
+
if (igb_tx_pkt_send(core, tx, queue_index)) {
igb_on_tx_done_update_stats(core, tx->tx_pkt, queue_index);
}
@@ -1547,6 +1568,20 @@ igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt *pkt,
igb_update_rx_stats(core, rxi, size, total_size);
}
+static bool
+igb_rx_strip_vlan(IGBCore *core, const E1000E_RingInfo *rxi)
+{
+ if (core->mac[MRQC] & 1) {
+ uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS;
+ /* Sec 7.10.3.8: CTRL.VME is ignored, only VMOLR/RPLOLR is used */
+ return (net_rx_pkt_get_packet_type(core->rx_pkt) == ETH_PKT_MCAST) ?
+ core->mac[RPLOLR] & E1000_RPLOLR_STRVLAN :
+ core->mac[VMOLR0 + pool] & E1000_VMOLR_STRVLAN;
+ }
+
+ return e1000x_vlan_enabled(core->mac);
+}
+
static inline void
igb_rx_fix_l4_csum(IGBCore *core, struct NetRxPkt *pkt)
{
@@ -1627,10 +1662,7 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
ehdr = PKT_GET_ETH_HDR(filter_buf);
net_rx_pkt_set_packet_type(core->rx_pkt, get_eth_packet_type(ehdr));
-
- net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs,
- e1000x_vlan_enabled(core->mac),
- core->mac[VET] & 0xffff);
+ net_rx_pkt_set_protocols(core->rx_pkt, filter_buf, size);
queues = igb_receive_assign(core, ehdr, size, &rss_info, external_tx);
if (!queues) {
@@ -1638,9 +1670,6 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
return orig_size;
}
- total_size = net_rx_pkt_get_total_len(core->rx_pkt) +
- e1000x_fcs_len(core->mac);
-
for (i = 0; i < IGB_NUM_QUEUES; i++) {
if (!(queues & BIT(i)) ||
!(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) {
@@ -1649,6 +1678,13 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
igb_rx_ring_init(core, &rxr, i);
+ net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs,
+ igb_rx_strip_vlan(core, rxr.i),
+ core->mac[VET] & 0xffff);
+
+ total_size = net_rx_pkt_get_total_len(core->rx_pkt) +
+ e1000x_fcs_len(core->mac);
+
if (!igb_has_rxbufs(core, rxr.i, total_size)) {
n |= E1000_ICS_RXO;
trace_e1000e_rx_not_written_to_guest(rxr.i->idx);