Add netdev notifier interfaces.
As we stipulate that netdevices with a vlan depth greater than 1
should disable the offload feature, Layer 1 vlan netdevices use
notifier to modify vlan_features.
Co-developed-by: Zhu Yikai <zhuyikai1@h-partners.com>
Signed-off-by: Zhu Yikai <zhuyikai1@h-partners.com>
Signed-off-by: Fan Gong <gongfan1@huawei.com>
---
.../net/ethernet/huawei/hinic3/hinic3_main.c | 89 +++++++++++++++++++
.../ethernet/huawei/hinic3/hinic3_nic_dev.h | 1 +
2 files changed, 90 insertions(+)
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
index 1308653819ef..463609585f46 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
@@ -32,6 +32,61 @@
#define HINIC3_RX_PENDING_LIMIT_LOW 2
#define HINIC3_RX_PENDING_LIMIT_HIGH 8
+#define HINIC3_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT 1
+#define HINIC3_VLAN_CLEAR_OFFLOAD \
+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
+ NETIF_F_SCTP_CRC | NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
+
+/* used for netdev notifier register/unregister */
+static DEFINE_MUTEX(hinic3_netdev_notifiers_mutex);
+static int hinic3_netdev_notifiers_ref_cnt;
+
+static u16 hinic3_get_vlan_depth(struct net_device *netdev)
+{
+ u16 vlan_depth = 0;
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+ while (is_vlan_dev(netdev)) {
+ netdev = vlan_dev_priv(netdev)->real_dev;
+ vlan_depth++;
+ }
+#endif
+ return vlan_depth;
+}
+
+static int hinic3_netdev_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct hinic3_nic_dev *nic_dev = netdev_priv(ndev);
+ u16 vlan_depth;
+
+ if (!is_vlan_dev(ndev))
+ return NOTIFY_DONE;
+
+ netdev_hold(ndev, &nic_dev->tracker, GFP_ATOMIC);
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ vlan_depth = hinic3_get_vlan_depth(ndev);
+ if (vlan_depth == HINIC3_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT)
+ ndev->vlan_features &= (~HINIC3_VLAN_CLEAR_OFFLOAD);
+
+ break;
+
+ default:
+ break;
+ }
+
+ netdev_put(ndev, &nic_dev->tracker);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block hinic3_netdev_notifier = {
+ .notifier_call = hinic3_netdev_event,
+};
+
static void init_intr_coal_param(struct net_device *netdev)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
@@ -309,6 +364,36 @@ static int hinic3_set_default_hw_feature(struct net_device *netdev)
return 0;
}
+static void hinic3_register_notifier(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&hinic3_netdev_notifiers_mutex);
+ hinic3_netdev_notifiers_ref_cnt++;
+ if (hinic3_netdev_notifiers_ref_cnt == 1) {
+ err = register_netdevice_notifier(&hinic3_netdev_notifier);
+ if (err) {
+ dev_dbg(nic_dev->hwdev->dev,
+ "Register netdevice notifier failed, err: %d\n",
+ err);
+ hinic3_netdev_notifiers_ref_cnt--;
+ }
+ }
+ mutex_unlock(&hinic3_netdev_notifiers_mutex);
+}
+
+static void hinic3_unregister_notifier(void)
+{
+ mutex_lock(&hinic3_netdev_notifiers_mutex);
+ if (hinic3_netdev_notifiers_ref_cnt == 1)
+ unregister_netdevice_notifier(&hinic3_netdev_notifier);
+
+ if (hinic3_netdev_notifiers_ref_cnt)
+ hinic3_netdev_notifiers_ref_cnt--;
+ mutex_unlock(&hinic3_netdev_notifiers_mutex);
+}
+
static void hinic3_link_status_change(struct net_device *netdev,
bool link_status_up)
{
@@ -412,6 +497,8 @@ static int hinic3_nic_probe(struct auxiliary_device *adev,
if (err)
goto err_uninit_sw;
+ hinic3_register_notifier(netdev);
+
queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ);
netif_carrier_off(netdev);
@@ -422,6 +509,7 @@ static int hinic3_nic_probe(struct auxiliary_device *adev,
return 0;
err_uninit_nic_feature:
+ hinic3_unregister_notifier();
hinic3_update_nic_feature(nic_dev, 0);
hinic3_set_nic_feature_to_hw(nic_dev);
@@ -452,6 +540,7 @@ static void hinic3_nic_remove(struct auxiliary_device *adev)
netdev = nic_dev->netdev;
unregister_netdev(netdev);
+ hinic3_unregister_notifier();
disable_delayed_work_sync(&nic_dev->periodic_work);
cancel_work_sync(&nic_dev->rx_mode_work);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
index 9bd46541b3e3..899f60588d25 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
@@ -97,6 +97,7 @@ struct hinic3_intr_coal_info {
struct hinic3_nic_dev {
struct pci_dev *pdev;
struct net_device *netdev;
+ netdevice_tracker tracker;
struct hinic3_hwdev *hwdev;
struct hinic3_nic_io *nic_io;
--
2.43.0
On 11/19/25 1:43 PM, Fan Gong wrote:
> Add netdev notifier interfaces.
> As we stipulate that netdevices with a vlan depth greater than 1
> should disable the offload feature, Layer 1 vlan netdevices use
> notifier to modify vlan_features.
As mentioned by Jakub in the previous revision, the net stack can send
packets with multiple stacked vlans. You need to implement
ndo_features_check(), check for the problematic packet layout there and
ev return a smaller features check excluding the relevant offloads.
> +static u16 hinic3_get_vlan_depth(struct net_device *netdev)
> +{
> + u16 vlan_depth = 0;
> +
> +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
> + while (is_vlan_dev(netdev)) {
> + netdev = vlan_dev_priv(netdev)->real_dev;
> + vlan_depth++;
> + }
> +#endif
> + return vlan_depth;
AFAICS the above can return any number >=
HINIC3_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT ...
> +}
> +
> +static int hinic3_netdev_event(struct notifier_block *notifier,
> + unsigned long event, void *ptr)
> +{
> + struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
> + struct hinic3_nic_dev *nic_dev = netdev_priv(ndev);
> + u16 vlan_depth;
> +
> + if (!is_vlan_dev(ndev))
> + return NOTIFY_DONE;
> +
> + netdev_hold(ndev, &nic_dev->tracker, GFP_ATOMIC);
> +
> + switch (event) {
> + case NETDEV_REGISTER:
> + vlan_depth = hinic3_get_vlan_depth(ndev);
> + if (vlan_depth == HINIC3_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT)
... so here you should use '>='> + ndev->vlan_features &=
(~HINIC3_VLAN_CLEAR_OFFLOAD);
> +
> + break;
> +
> + default:
> + break;
> + }
> +
> + netdev_put(ndev, &nic_dev->tracker);
> +
> + return NOTIFY_DONE;
> +}
> +
> +static struct notifier_block hinic3_netdev_notifier = {
> + .notifier_call = hinic3_netdev_event,
> +};
> +
> static void init_intr_coal_param(struct net_device *netdev)
> {
> struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
> @@ -309,6 +364,36 @@ static int hinic3_set_default_hw_feature(struct net_device *netdev)
> return 0;
> }
>
> +static void hinic3_register_notifier(struct net_device *netdev)
> +{
> + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
> + int err;
> +
> + mutex_lock(&hinic3_netdev_notifiers_mutex);
> + hinic3_netdev_notifiers_ref_cnt++;
> + if (hinic3_netdev_notifiers_ref_cnt == 1) {
Why do you need this notifier accounting? Instead you should be able to
call hinic3_register_notifier() only once.
/P
© 2016 - 2025 Red Hat, Inc.