Also add checks for RXDCTL/TXDCTL queue enable bits
Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
---
hw/net/igb_core.c | 42 +++++++++++++++++++++++++++++++-----------
hw/net/igb_regs.h | 3 ++-
2 files changed, 33 insertions(+), 12 deletions(-)
diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c
index 9bd53cc25f..6bca5459b9 100644
--- a/hw/net/igb_core.c
+++ b/hw/net/igb_core.c
@@ -778,6 +778,19 @@ igb_txdesc_writeback(IGBCore *core, dma_addr_t base,
return igb_tx_wb_eic(core, txi->idx);
}
+static inline bool
+igb_tx_enabled(IGBCore *core, const E1000E_RingInfo *txi)
+{
+ bool vmdq = core->mac[MRQC] & 1;
+ uint16_t qn = txi->idx;
+ uint16_t vfn = (qn > IGB_MAX_VF_FUNCTIONS) ?
+ (qn - IGB_MAX_VF_FUNCTIONS) : qn;
+
+ return (core->mac[TCTL] & E1000_TCTL_EN) &&
+ (vmdq ? (core->mac[VFTE] & BIT(vfn)) : true) &&
+ (core->mac[TXDCTL0 + (qn * 16)] & E1000_TXDCTL_QUEUE_ENABLE);
+}
+
static void
igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
{
@@ -787,8 +800,7 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
const E1000E_RingInfo *txi = txr->i;
uint32_t eic = 0;
- /* TODO: check if the queue itself is enabled too. */
- if (!(core->mac[TCTL] & E1000_TCTL_EN)) {
+ if (!igb_tx_enabled(core, txi)) {
trace_e1000e_tx_disabled();
return;
}
@@ -1003,6 +1015,7 @@ static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr,
queues = BIT(def_pl >> E1000_VT_CTL_DEFAULT_POOL_SHIFT);
}
+ queues &= core->mac[VFRE];
igb_rss_parse_packet(core, core->rx_pkt, external_tx != NULL, rss_info);
if (rss_info->queue & 1) {
queues <<= 8;
@@ -1486,7 +1499,7 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
static const int maximum_ethernet_hdr_len = (ETH_HLEN + 4);
uint16_t queues = 0;
- uint32_t n;
+ uint32_t n = 0;
uint8_t min_buf[ETH_ZLEN];
struct iovec min_iov;
struct eth_header *ehdr;
@@ -1566,26 +1579,22 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
}
igb_rx_ring_init(core, &rxr, i);
-
- trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
-
if (!igb_has_rxbufs(core, rxr.i, total_size)) {
retval = 0;
}
}
if (retval) {
- n = E1000_ICR_RXT0;
-
igb_rx_fix_l4_csum(core, core->rx_pkt);
for (i = 0; i < IGB_NUM_QUEUES; i++) {
- if (!(queues & BIT(i))) {
+ if (!(queues & BIT(i)) ||
+ !(core->mac[E1000_RXDCTL(i) >> 2] & E1000_RXDCTL_QUEUE_ENABLE)) {
continue;
}
igb_rx_ring_init(core, &rxr, i);
-
+ trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info);
/* Check if receive descriptor minimum threshold hit */
@@ -1594,6 +1603,9 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
}
core->mac[EICR] |= igb_rx_wb_eic(core, rxr.i->idx);
+
+ /* same as RXDW (rx descriptor written back)*/
+ n = E1000_ICR_RXT0;
}
trace_e1000e_rx_written_to_guest(n);
@@ -1981,9 +1993,16 @@ static void igb_set_vfmailbox(IGBCore *core, int index, uint32_t val)
static void igb_vf_reset(IGBCore *core, uint16_t vfn)
{
+ uint16_t qn0 = vfn;
+ uint16_t qn1 = vfn + IGB_MAX_VF_FUNCTIONS;
+
/* disable Rx and Tx for the VF*/
- core->mac[VFTE] &= ~BIT(vfn);
+ core->mac[RXDCTL0 + (qn0 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
+ core->mac[RXDCTL0 + (qn1 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
+ core->mac[TXDCTL0 + (qn0 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
+ core->mac[TXDCTL0 + (qn1 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
core->mac[VFRE] &= ~BIT(vfn);
+ core->mac[VFTE] &= ~BIT(vfn);
/* indicate VF reset to PF */
core->mac[VFLRE] |= BIT(vfn);
/* VFLRE and mailbox use the same interrupt cause */
@@ -3889,6 +3908,7 @@ igb_phy_reg_init[] = {
static const uint32_t igb_mac_reg_init[] = {
[LEDCTL] = 2 | (3 << 8) | BIT(15) | (6 << 16) | (7 << 24),
[EEMNGCTL] = BIT(31),
+ [TXDCTL0] = E1000_TXDCTL_QUEUE_ENABLE,
[RXDCTL0] = E1000_RXDCTL_QUEUE_ENABLE | (1 << 16),
[RXDCTL1] = 1 << 16,
[RXDCTL2] = 1 << 16,
diff --git a/hw/net/igb_regs.h b/hw/net/igb_regs.h
index ebf3e95023..084e751378 100644
--- a/hw/net/igb_regs.h
+++ b/hw/net/igb_regs.h
@@ -160,7 +160,8 @@ union e1000_adv_rx_desc {
#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
-/* Additional Receive Descriptor Control definitions */
+/* Additional RX/TX Descriptor Control definitions */
+#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */
#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
/* Direct Cache Access (DCA) definitions */
--
2.34.1
On 2023/01/28 22:46, Sriram Yagnaraman wrote:
> Also add checks for RXDCTL/TXDCTL queue enable bits
>
> Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
> ---
> hw/net/igb_core.c | 42 +++++++++++++++++++++++++++++++-----------
> hw/net/igb_regs.h | 3 ++-
> 2 files changed, 33 insertions(+), 12 deletions(-)
>
> diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c
> index 9bd53cc25f..6bca5459b9 100644
> --- a/hw/net/igb_core.c
> +++ b/hw/net/igb_core.c
> @@ -778,6 +778,19 @@ igb_txdesc_writeback(IGBCore *core, dma_addr_t base,
> return igb_tx_wb_eic(core, txi->idx);
> }
>
> +static inline bool
> +igb_tx_enabled(IGBCore *core, const E1000E_RingInfo *txi)
> +{
> + bool vmdq = core->mac[MRQC] & 1;
> + uint16_t qn = txi->idx;
> + uint16_t vfn = (qn > IGB_MAX_VF_FUNCTIONS) ?
> + (qn - IGB_MAX_VF_FUNCTIONS) : qn;
> +
> + return (core->mac[TCTL] & E1000_TCTL_EN) &&
> + (vmdq ? (core->mac[VFTE] & BIT(vfn)) : true) &&
Instead, do: (!vmdq || core->mac[VFTE] & BIT(vfn))
> + (core->mac[TXDCTL0 + (qn * 16)] & E1000_TXDCTL_QUEUE_ENABLE);
> +}
> +
> static void
> igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
> {
> @@ -787,8 +800,7 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
> const E1000E_RingInfo *txi = txr->i;
> uint32_t eic = 0;
>
> - /* TODO: check if the queue itself is enabled too. */
> - if (!(core->mac[TCTL] & E1000_TCTL_EN)) {
> + if (!igb_tx_enabled(core, txi)) {
> trace_e1000e_tx_disabled();
> return;
> }
> @@ -1003,6 +1015,7 @@ static uint16_t igb_receive_assign(IGBCore *core, const struct eth_header *ehdr,
> queues = BIT(def_pl >> E1000_VT_CTL_DEFAULT_POOL_SHIFT);
> }
>
> + queues &= core->mac[VFRE];
> igb_rss_parse_packet(core, core->rx_pkt, external_tx != NULL, rss_info);
> if (rss_info->queue & 1) {
> queues <<= 8;
> @@ -1486,7 +1499,7 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
> static const int maximum_ethernet_hdr_len = (ETH_HLEN + 4);
>
> uint16_t queues = 0;
> - uint32_t n;
> + uint32_t n = 0;
> uint8_t min_buf[ETH_ZLEN];
> struct iovec min_iov;
> struct eth_header *ehdr;
> @@ -1566,26 +1579,22 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
> }
>
> igb_rx_ring_init(core, &rxr, i);
> -
> - trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
> -
> if (!igb_has_rxbufs(core, rxr.i, total_size)) {
> retval = 0;
> }
This stops sending packet when a disabled queue has no space.
> }
>
> if (retval) {
> - n = E1000_ICR_RXT0;
> -
> igb_rx_fix_l4_csum(core, core->rx_pkt);
>
> for (i = 0; i < IGB_NUM_QUEUES; i++) {
> - if (!(queues & BIT(i))) {
> + if (!(queues & BIT(i)) ||
> + !(core->mac[E1000_RXDCTL(i) >> 2] & E1000_RXDCTL_QUEUE_ENABLE)) {
> continue;
> }
>
> igb_rx_ring_init(core, &rxr, i);
> -
> + trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
> igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info);
>
> /* Check if receive descriptor minimum threshold hit */
> @@ -1594,6 +1603,9 @@ igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
> }
>
> core->mac[EICR] |= igb_rx_wb_eic(core, rxr.i->idx);
> +
> + /* same as RXDW (rx descriptor written back)*/
> + n = E1000_ICR_RXT0;
> }
>
> trace_e1000e_rx_written_to_guest(n);
> @@ -1981,9 +1993,16 @@ static void igb_set_vfmailbox(IGBCore *core, int index, uint32_t val)
>
> static void igb_vf_reset(IGBCore *core, uint16_t vfn)
> {
> + uint16_t qn0 = vfn;
> + uint16_t qn1 = vfn + IGB_MAX_VF_FUNCTIONS;
> +
> /* disable Rx and Tx for the VF*/
> - core->mac[VFTE] &= ~BIT(vfn);
> + core->mac[RXDCTL0 + (qn0 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
> + core->mac[RXDCTL0 + (qn1 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
> + core->mac[TXDCTL0 + (qn0 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
> + core->mac[TXDCTL0 + (qn1 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
> core->mac[VFRE] &= ~BIT(vfn);
> + core->mac[VFTE] &= ~BIT(vfn);
> /* indicate VF reset to PF */
> core->mac[VFLRE] |= BIT(vfn);
> /* VFLRE and mailbox use the same interrupt cause */
> @@ -3889,6 +3908,7 @@ igb_phy_reg_init[] = {
> static const uint32_t igb_mac_reg_init[] = {
> [LEDCTL] = 2 | (3 << 8) | BIT(15) | (6 << 16) | (7 << 24),
> [EEMNGCTL] = BIT(31),
> + [TXDCTL0] = E1000_TXDCTL_QUEUE_ENABLE,
> [RXDCTL0] = E1000_RXDCTL_QUEUE_ENABLE | (1 << 16),
> [RXDCTL1] = 1 << 16,
> [RXDCTL2] = 1 << 16,
> diff --git a/hw/net/igb_regs.h b/hw/net/igb_regs.h
> index ebf3e95023..084e751378 100644
> --- a/hw/net/igb_regs.h
> +++ b/hw/net/igb_regs.h
> @@ -160,7 +160,8 @@ union e1000_adv_rx_desc {
> #define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
> #define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
>
> -/* Additional Receive Descriptor Control definitions */
> +/* Additional RX/TX Descriptor Control definitions */
> +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */
> #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
>
> /* Direct Cache Access (DCA) definitions */
> -----Original Message-----
> From: Akihiko Odaki <akihiko.odaki@daynix.com>
> Sent: Sunday, 29 January 2023 10:16
> To: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
> Cc: qemu-devel@nongnu.org; Jason Wang <jasowang@redhat.com>; Dmitry
> Fleytman <dmitry.fleytman@gmail.com>; Michael S . Tsirkin
> <mst@redhat.com>; Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
> Subject: Re: [PATCH 3/9] igb: implement VFRE and VFTE registers
>
> On 2023/01/28 22:46, Sriram Yagnaraman wrote:
> > Also add checks for RXDCTL/TXDCTL queue enable bits
> >
> > Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
> > ---
> > hw/net/igb_core.c | 42 +++++++++++++++++++++++++++++++-----------
> > hw/net/igb_regs.h | 3 ++-
> > 2 files changed, 33 insertions(+), 12 deletions(-)
> >
> > diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c index
> > 9bd53cc25f..6bca5459b9 100644
> > --- a/hw/net/igb_core.c
> > +++ b/hw/net/igb_core.c
> > @@ -778,6 +778,19 @@ igb_txdesc_writeback(IGBCore *core, dma_addr_t
> base,
> > return igb_tx_wb_eic(core, txi->idx);
> > }
> >
> > +static inline bool
> > +igb_tx_enabled(IGBCore *core, const E1000E_RingInfo *txi) {
> > + bool vmdq = core->mac[MRQC] & 1;
> > + uint16_t qn = txi->idx;
> > + uint16_t vfn = (qn > IGB_MAX_VF_FUNCTIONS) ?
> > + (qn - IGB_MAX_VF_FUNCTIONS) : qn;
> > +
> > + return (core->mac[TCTL] & E1000_TCTL_EN) &&
> > + (vmdq ? (core->mac[VFTE] & BIT(vfn)) : true) &&
>
> Instead, do: (!vmdq || core->mac[VFTE] & BIT(vfn))
>
> > + (core->mac[TXDCTL0 + (qn * 16)] & E1000_TXDCTL_QUEUE_ENABLE);
> > +}
> > +
> > static void
> > igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
> > {
> > @@ -787,8 +800,7 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing
> *txr)
> > const E1000E_RingInfo *txi = txr->i;
> > uint32_t eic = 0;
> >
> > - /* TODO: check if the queue itself is enabled too. */
> > - if (!(core->mac[TCTL] & E1000_TCTL_EN)) {
> > + if (!igb_tx_enabled(core, txi)) {
> > trace_e1000e_tx_disabled();
> > return;
> > }
> > @@ -1003,6 +1015,7 @@ static uint16_t igb_receive_assign(IGBCore *core,
> const struct eth_header *ehdr,
> > queues = BIT(def_pl >> E1000_VT_CTL_DEFAULT_POOL_SHIFT);
> > }
> >
> > + queues &= core->mac[VFRE];
> > igb_rss_parse_packet(core, core->rx_pkt, external_tx != NULL,
> rss_info);
> > if (rss_info->queue & 1) {
> > queues <<= 8;
> > @@ -1486,7 +1499,7 @@ igb_receive_internal(IGBCore *core, const struct
> iovec *iov, int iovcnt,
> > static const int maximum_ethernet_hdr_len = (ETH_HLEN + 4);
> >
> > uint16_t queues = 0;
> > - uint32_t n;
> > + uint32_t n = 0;
> > uint8_t min_buf[ETH_ZLEN];
> > struct iovec min_iov;
> > struct eth_header *ehdr;
> > @@ -1566,26 +1579,22 @@ igb_receive_internal(IGBCore *core, const
> struct iovec *iov, int iovcnt,
> > }
> >
> > igb_rx_ring_init(core, &rxr, i);
> > -
> > - trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
> > -
> > if (!igb_has_rxbufs(core, rxr.i, total_size)) {
> > retval = 0;
> > }
>
> This stops sending packet when a disabled queue has no space.
Yes, that is true, but I have refactored this part a bit in patchset 9 that fixes this.
>
> > }
> >
> > if (retval) {
> > - n = E1000_ICR_RXT0;
> > -
> > igb_rx_fix_l4_csum(core, core->rx_pkt);
> >
> > for (i = 0; i < IGB_NUM_QUEUES; i++) {
> > - if (!(queues & BIT(i))) {
> > + if (!(queues & BIT(i)) ||
> > + !(core->mac[E1000_RXDCTL(i) >> 2] &
> > + E1000_RXDCTL_QUEUE_ENABLE)) {
> > continue;
> > }
> >
> > igb_rx_ring_init(core, &rxr, i);
> > -
> > + trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
> > igb_write_packet_to_guest(core, core->rx_pkt, &rxr,
> > &rss_info);
> >
> > /* Check if receive descriptor minimum threshold hit */
> > @@ -1594,6 +1603,9 @@ igb_receive_internal(IGBCore *core, const struct
> iovec *iov, int iovcnt,
> > }
> >
> > core->mac[EICR] |= igb_rx_wb_eic(core, rxr.i->idx);
> > +
> > + /* same as RXDW (rx descriptor written back)*/
> > + n = E1000_ICR_RXT0;
> > }
> >
> > trace_e1000e_rx_written_to_guest(n);
> > @@ -1981,9 +1993,16 @@ static void igb_set_vfmailbox(IGBCore *core,
> > int index, uint32_t val)
> >
> > static void igb_vf_reset(IGBCore *core, uint16_t vfn)
> > {
> > + uint16_t qn0 = vfn;
> > + uint16_t qn1 = vfn + IGB_MAX_VF_FUNCTIONS;
> > +
> > /* disable Rx and Tx for the VF*/
> > - core->mac[VFTE] &= ~BIT(vfn);
> > + core->mac[RXDCTL0 + (qn0 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
> > + core->mac[RXDCTL0 + (qn1 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
> > + core->mac[TXDCTL0 + (qn0 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
> > + core->mac[TXDCTL0 + (qn1 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
> > core->mac[VFRE] &= ~BIT(vfn);
> > + core->mac[VFTE] &= ~BIT(vfn);
> > /* indicate VF reset to PF */
> > core->mac[VFLRE] |= BIT(vfn);
> > /* VFLRE and mailbox use the same interrupt cause */ @@ -3889,6
> > +3908,7 @@ igb_phy_reg_init[] = {
> > static const uint32_t igb_mac_reg_init[] = {
> > [LEDCTL] = 2 | (3 << 8) | BIT(15) | (6 << 16) | (7 << 24),
> > [EEMNGCTL] = BIT(31),
> > + [TXDCTL0] = E1000_TXDCTL_QUEUE_ENABLE,
> > [RXDCTL0] = E1000_RXDCTL_QUEUE_ENABLE | (1 << 16),
> > [RXDCTL1] = 1 << 16,
> > [RXDCTL2] = 1 << 16,
> > diff --git a/hw/net/igb_regs.h b/hw/net/igb_regs.h index
> > ebf3e95023..084e751378 100644
> > --- a/hw/net/igb_regs.h
> > +++ b/hw/net/igb_regs.h
> > @@ -160,7 +160,8 @@ union e1000_adv_rx_desc {
> > #define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
> > #define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
> >
> > -/* Additional Receive Descriptor Control definitions */
> > +/* Additional RX/TX Descriptor Control definitions */ #define
> > +E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue
> */
> > #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx
> > Queue */
> >
> > /* Direct Cache Access (DCA) definitions */
On 2023/01/30 19:16, Sriram Yagnaraman wrote:
>
>
>> -----Original Message-----
>> From: Akihiko Odaki <akihiko.odaki@daynix.com>
>> Sent: Sunday, 29 January 2023 10:16
>> To: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
>> Cc: qemu-devel@nongnu.org; Jason Wang <jasowang@redhat.com>; Dmitry
>> Fleytman <dmitry.fleytman@gmail.com>; Michael S . Tsirkin
>> <mst@redhat.com>; Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
>> Subject: Re: [PATCH 3/9] igb: implement VFRE and VFTE registers
>>
>> On 2023/01/28 22:46, Sriram Yagnaraman wrote:
>>> Also add checks for RXDCTL/TXDCTL queue enable bits
>>>
>>> Signed-off-by: Sriram Yagnaraman <sriram.yagnaraman@est.tech>
>>> ---
>>> hw/net/igb_core.c | 42 +++++++++++++++++++++++++++++++-----------
>>> hw/net/igb_regs.h | 3 ++-
>>> 2 files changed, 33 insertions(+), 12 deletions(-)
>>>
>>> diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c index
>>> 9bd53cc25f..6bca5459b9 100644
>>> --- a/hw/net/igb_core.c
>>> +++ b/hw/net/igb_core.c
>>> @@ -778,6 +778,19 @@ igb_txdesc_writeback(IGBCore *core, dma_addr_t
>> base,
>>> return igb_tx_wb_eic(core, txi->idx);
>>> }
>>>
>>> +static inline bool
>>> +igb_tx_enabled(IGBCore *core, const E1000E_RingInfo *txi) {
>>> + bool vmdq = core->mac[MRQC] & 1;
>>> + uint16_t qn = txi->idx;
>>> + uint16_t vfn = (qn > IGB_MAX_VF_FUNCTIONS) ?
>>> + (qn - IGB_MAX_VF_FUNCTIONS) : qn;
>>> +
>>> + return (core->mac[TCTL] & E1000_TCTL_EN) &&
>>> + (vmdq ? (core->mac[VFTE] & BIT(vfn)) : true) &&
>>
>> Instead, do: (!vmdq || core->mac[VFTE] & BIT(vfn))
>>
>>> + (core->mac[TXDCTL0 + (qn * 16)] & E1000_TXDCTL_QUEUE_ENABLE);
>>> +}
>>> +
>>> static void
>>> igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
>>> {
>>> @@ -787,8 +800,7 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing
>> *txr)
>>> const E1000E_RingInfo *txi = txr->i;
>>> uint32_t eic = 0;
>>>
>>> - /* TODO: check if the queue itself is enabled too. */
>>> - if (!(core->mac[TCTL] & E1000_TCTL_EN)) {
>>> + if (!igb_tx_enabled(core, txi)) {
>>> trace_e1000e_tx_disabled();
>>> return;
>>> }
>>> @@ -1003,6 +1015,7 @@ static uint16_t igb_receive_assign(IGBCore *core,
>> const struct eth_header *ehdr,
>>> queues = BIT(def_pl >> E1000_VT_CTL_DEFAULT_POOL_SHIFT);
>>> }
>>>
>>> + queues &= core->mac[VFRE];
>>> igb_rss_parse_packet(core, core->rx_pkt, external_tx != NULL,
>> rss_info);
>>> if (rss_info->queue & 1) {
>>> queues <<= 8;
>>> @@ -1486,7 +1499,7 @@ igb_receive_internal(IGBCore *core, const struct
>> iovec *iov, int iovcnt,
>>> static const int maximum_ethernet_hdr_len = (ETH_HLEN + 4);
>>>
>>> uint16_t queues = 0;
>>> - uint32_t n;
>>> + uint32_t n = 0;
>>> uint8_t min_buf[ETH_ZLEN];
>>> struct iovec min_iov;
>>> struct eth_header *ehdr;
>>> @@ -1566,26 +1579,22 @@ igb_receive_internal(IGBCore *core, const
>> struct iovec *iov, int iovcnt,
>>> }
>>>
>>> igb_rx_ring_init(core, &rxr, i);
>>> -
>>> - trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
>>> -
>>> if (!igb_has_rxbufs(core, rxr.i, total_size)) {
>>> retval = 0;
>>> }
>>
>> This stops sending packet when a disabled queue has no space.
>
> Yes, that is true, but I have refactored this part a bit in patchset 9 that fixes this.
I see. Please include the fix in this patch so it will be easier to
review and it won't prevent from bisecting.
>
>>
>>> }
>>>
>>> if (retval) {
>>> - n = E1000_ICR_RXT0;
>>> -
>>> igb_rx_fix_l4_csum(core, core->rx_pkt);
>>>
>>> for (i = 0; i < IGB_NUM_QUEUES; i++) {
>>> - if (!(queues & BIT(i))) {
>>> + if (!(queues & BIT(i)) ||
>>> + !(core->mac[E1000_RXDCTL(i) >> 2] &
>>> + E1000_RXDCTL_QUEUE_ENABLE)) {
>>> continue;
>>> }
>>>
>>> igb_rx_ring_init(core, &rxr, i);
>>> -
>>> + trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
>>> igb_write_packet_to_guest(core, core->rx_pkt, &rxr,
>>> &rss_info);
>>>
>>> /* Check if receive descriptor minimum threshold hit */
>>> @@ -1594,6 +1603,9 @@ igb_receive_internal(IGBCore *core, const struct
>> iovec *iov, int iovcnt,
>>> }
>>>
>>> core->mac[EICR] |= igb_rx_wb_eic(core, rxr.i->idx);
>>> +
>>> + /* same as RXDW (rx descriptor written back)*/
>>> + n = E1000_ICR_RXT0;
>>> }
>>>
>>> trace_e1000e_rx_written_to_guest(n);
>>> @@ -1981,9 +1993,16 @@ static void igb_set_vfmailbox(IGBCore *core,
>>> int index, uint32_t val)
>>>
>>> static void igb_vf_reset(IGBCore *core, uint16_t vfn)
>>> {
>>> + uint16_t qn0 = vfn;
>>> + uint16_t qn1 = vfn + IGB_MAX_VF_FUNCTIONS;
>>> +
>>> /* disable Rx and Tx for the VF*/
>>> - core->mac[VFTE] &= ~BIT(vfn);
>>> + core->mac[RXDCTL0 + (qn0 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
>>> + core->mac[RXDCTL0 + (qn1 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
>>> + core->mac[TXDCTL0 + (qn0 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
>>> + core->mac[TXDCTL0 + (qn1 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
>>> core->mac[VFRE] &= ~BIT(vfn);
>>> + core->mac[VFTE] &= ~BIT(vfn);
>>> /* indicate VF reset to PF */
>>> core->mac[VFLRE] |= BIT(vfn);
>>> /* VFLRE and mailbox use the same interrupt cause */ @@ -3889,6
>>> +3908,7 @@ igb_phy_reg_init[] = {
>>> static const uint32_t igb_mac_reg_init[] = {
>>> [LEDCTL] = 2 | (3 << 8) | BIT(15) | (6 << 16) | (7 << 24),
>>> [EEMNGCTL] = BIT(31),
>>> + [TXDCTL0] = E1000_TXDCTL_QUEUE_ENABLE,
>>> [RXDCTL0] = E1000_RXDCTL_QUEUE_ENABLE | (1 << 16),
>>> [RXDCTL1] = 1 << 16,
>>> [RXDCTL2] = 1 << 16,
>>> diff --git a/hw/net/igb_regs.h b/hw/net/igb_regs.h index
>>> ebf3e95023..084e751378 100644
>>> --- a/hw/net/igb_regs.h
>>> +++ b/hw/net/igb_regs.h
>>> @@ -160,7 +160,8 @@ union e1000_adv_rx_desc {
>>> #define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
>>> #define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
>>>
>>> -/* Additional Receive Descriptor Control definitions */
>>> +/* Additional RX/TX Descriptor Control definitions */ #define
>>> +E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue
>> */
>>> #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx
>>> Queue */
>>>
>>> /* Direct Cache Access (DCA) definitions */
© 2016 - 2026 Red Hat, Inc.