Add MACsec support.
The MACsec block has four TX SCs and four RX SCs. The driver supports up
to four SecY. Each SecY with one TX SC and one RX SC.
The RX SCs can have two keys, key A and key B, written in hardware and
enabled at the same time.
The TX SCs can have two keys written in hardware, but only one can be
active at a given time.
On TX, the SC is selected using the MAC source address. Due of this
selection mechanism, each offloaded netdev must have a unique MAC
address.
On RX, the SC is selected by SCI(found in SecTAG or calculated using MAC
SA), or using RX SC 0 as implicit.
Signed-off-by: Radu Pirea (NXP OSS) <radu-nicolae.pirea@oss.nxp.com>
---
MAINTAINERS | 2 +-
drivers/net/phy/Kconfig | 2 +-
drivers/net/phy/Makefile | 4 +
drivers/net/phy/nxp-c45-tja11xx-macsec.c | 1397 ++++++++++++++++++++++
drivers/net/phy/nxp-c45-tja11xx.c | 72 +-
drivers/net/phy/nxp-c45-tja11xx.h | 55 +
6 files changed, 1501 insertions(+), 31 deletions(-)
create mode 100644 drivers/net/phy/nxp-c45-tja11xx-macsec.c
create mode 100644 drivers/net/phy/nxp-c45-tja11xx.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 9cc15c50c2c6..3d1e2c9c278b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -15173,7 +15173,7 @@ NXP C45 TJA11XX PHY DRIVER
M: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/phy/nxp-c45-tja11xx.c
+F: drivers/net/phy/nxp-c45-tja11xx*
NXP FSPI DRIVER
M: Han Xu <han.xu@nxp.com>
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 107880d13d21..79f54f773af2 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -306,7 +306,7 @@ config NXP_C45_TJA11XX_PHY
depends on PTP_1588_CLOCK_OPTIONAL
help
Enable support for NXP C45 TJA11XX PHYs.
- Currently supports the TJA1103 and TJA1120 PHYs.
+ Currently supports the TJA1103, TJA1104 and TJA1120 PHYs.
config NXP_TJA11XX_PHY
tristate "NXP TJA11xx PHYs support"
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index c945ed9bd14b..ee53e2fdb968 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -83,6 +83,10 @@ obj-$(CONFIG_MICROSEMI_PHY) += mscc/
obj-$(CONFIG_MOTORCOMM_PHY) += motorcomm.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
obj-$(CONFIG_NCN26000_PHY) += ncn26000.o
+nxp-c45-tja11xx-objs += nxp-c45-tja11xx.o
+ifdef CONFIG_MACSEC
+nxp-c45-tja11xx-objs += nxp-c45-tja11xx-macsec.o
+endif
obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja11xx.o
obj-$(CONFIG_NXP_CBTX_PHY) += nxp-cbtx.o
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
diff --git a/drivers/net/phy/nxp-c45-tja11xx-macsec.c b/drivers/net/phy/nxp-c45-tja11xx-macsec.c
new file mode 100644
index 000000000000..1567865b8de4
--- /dev/null
+++ b/drivers/net/phy/nxp-c45-tja11xx-macsec.c
@@ -0,0 +1,1397 @@
+// SPDX-License-Identifier: GPL-2.0
+/* NXP C45 PTP PHY driver interface
+ * Copyright 2023 NXP
+ * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/ethtool_netlink.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/processor.h>
+#include <net/macsec.h>
+
+#include "nxp-c45-tja11xx.h"
+
+#define MACSEC_REG_SIZE 32
+#define TX_SC_MAX 4
+
+#define TX_SC_BIT(secy_id) BIT(MACSEC_REG_SIZE - (secy_id) - 1)
+
+#define VEND1_MACSEC_BASE 0x9000
+
+#define MACSEC_CFG 0x0000
+#define MACSEC_CFG_BYPASS BIT(1)
+#define MACSEC_CFG_S0I BIT(0)
+
+#define MACSEC_TPNET 0x0044
+#define PN_WRAP_THRESHOLD 0xffffffff
+
+#define MACSEC_RXSCA 0x0080
+#define MACSEC_RXSCKA 0x0084
+
+#define MACSEC_TXSCA 0x00C0
+#define MACSEC_TXSCKA 0x00C4
+
+#define MACSEC_RXSC_SCI_1H 0x0100
+#define MACSEC_RXSC_SCI_2H 0x0104
+
+#define MACSEC_RXSC_CFG 0x0128
+#define MACSEC_RXSC_CFG_XPN BIT(25)
+#define MACSEC_RXSC_CFG_AES_256 BIT(24)
+#define MACSEC_RXSC_CFG_SCI_EN BIT(11)
+#define MACSEC_RXSC_CFG_RP BIT(10)
+#define MACSEC_RXSC_CFG_VF_MASK GENMASK(9, 8)
+#define MACSEC_RXSC_CFG_VF_OFF 8
+
+#define MACSEC_RPW 0x012C
+
+#define MACSEC_RXSA_A_CS 0x0180
+#define MACSEC_RXSA_A_NPN 0x0184
+#define MACSEC_RXSA_A_XNPN 0x0188
+#define MACSEC_RXSA_A_LNPN 0x018C
+#define MACSEC_RXSA_A_LXNPN 0x0190
+
+#define MACSEC_RXSA_B_CS 0x01C0
+#define MACSEC_RXSA_B_NPN 0x01C4
+#define MACSEC_RXSA_B_XNPN 0x01C8
+#define MACSEC_RXSA_B_LNPN 0x01CC
+#define MACSEC_RXSA_B_LXNPN 0x01D0
+
+#define MACSEC_RXSA_CS_A BIT(31)
+#define MACSEC_RXSA_CS_AN_OFF 1
+#define MACSEC_RXSA_CS_EN BIT(0)
+
+#define MACSEC_TXSC_SCI_1H 0x0200
+#define MACSEC_TXSC_SCI_2H 0x0204
+#define MACSEC_TXSC_CFG 0x0228
+#define MACSEC_TXSC_CFG_XPN BIT(25)
+#define MACSEC_TXSC_CFG_AES_256 BIT(24)
+#define MACSEC_TXSC_CFG_AN_MASK GENMASK(19, 18)
+#define MACSEC_TXSC_CFG_AN_OFF 18
+#define MACSEC_TXSC_CFG_ASA BIT(17)
+#define MACSEC_TXSC_CFG_SCE BIT(16)
+#define MACSEC_TXSC_CFG_ENCRYPT BIT(4)
+#define MACSEC_TXSC_CFG_PROTECT BIT(3)
+#define MACSEC_TXSC_CFG_SEND_SCI BIT(2)
+#define MACSEC_TXSC_CFG_END_STATION BIT(1)
+#define MACSEC_TXSC_CFG_SCI BIT(0)
+
+#define MACSEC_TXSA_A_CS 0x0280
+#define MACSEC_TXSA_A_NPN 0x0284
+#define MACSEC_TXSA_A_XNPN 0x0288
+
+#define MACSEC_TXSA_B_CS 0x02C0
+#define MACSEC_TXSA_B_NPN 0x02C4
+#define MACSEC_TXSA_B_XNPN 0x02C8
+
+#define MACSEC_TXSA_CS_A BIT(31)
+
+#define MACSEC_EVR 0x0400
+#define MACSEC_EVER 0x0404
+
+#define MACSEC_RXSA_A_KA 0x0700
+#define MACSEC_RXSA_A_SSCI 0x0720
+#define MACSEC_RXSA_A_SALT 0x0724
+
+#define MACSEC_RXSA_B_KA 0x0740
+#define MACSEC_RXSA_B_SSCI 0x0760
+#define MACSEC_RXSA_B_SALT 0x0764
+
+#define MACSEC_TXSA_A_KA 0x0780
+#define MACSEC_TXSA_A_SSCI 0x07A0
+#define MACSEC_TXSA_A_SALT 0x07A4
+
+#define MACSEC_TXSA_B_KA 0x07C0
+#define MACSEC_TXSA_B_SSCI 0x07E0
+#define MACSEC_TXSA_B_SALT 0x07E4
+
+#define MACSEC_UPFR0D2 0x0A08
+#define MACSEC_UPFR0M1 0x0A10
+#define MACSEC_OVP BIT(12)
+
+#define MACSEC_UPFR0M2 0x0A14
+#define ETYPE_MASK 0xffff
+
+#define MACSEC_UPFR0R 0x0A18
+#define MACSEC_UPFR_EN BIT(0)
+
+#define ADPTR_CNTRL 0x0F00
+#define ADPTR_CNTRL_CONFIG_EN BIT(14)
+#define ADPTR_CNTRL_ADPTR_EN BIT(12)
+
+#define TX_SC_FLT_BASE 0x800
+#define TX_SC_FLT_SIZE 0x10
+#define TX_FLT_BASE(flt_id) (TX_SC_FLT_BASE + \
+ TX_SC_FLT_SIZE * (flt_id))
+
+#define TX_SC_FLT_OFF_MAC_DA_SA 0x04
+#define TX_SC_FLT_OFF_MAC_SA 0x08
+#define TX_SC_FLT_OFF_MAC_CFG 0x0C
+#define TX_SC_FLT_BY_SA BIT(14)
+#define TX_SC_FLT_EN BIT(8)
+
+#define TX_SC_FLT_MAC_DA_SA(base) ((base) + TX_SC_FLT_OFF_MAC_DA_SA)
+#define TX_SC_FLT_MAC_SA(base) ((base) + TX_SC_FLT_OFF_MAC_SA)
+#define TX_SC_FLT_MAC_CFG(base) ((base) + TX_SC_FLT_OFF_MAC_CFG)
+
+#define ADAPTER_EN BIT(6)
+#define MACSEC_EN BIT(5)
+
+struct nxp_c45_rx_sc {
+ struct macsec_rx_sc *rx_sc;
+ struct macsec_rx_sa *rx_sa_a;
+ struct macsec_rx_sa *rx_sa_b;
+};
+
+struct nxp_c45_tx_sa {
+ struct macsec_tx_sa *tx_sa;
+ u8 key[MACSEC_MAX_KEY_LEN];
+ u8 salt[MACSEC_SALT_LEN];
+ u8 an;
+ u64 next_pn;
+ bool is_enabled;
+ bool is_key_a;
+};
+
+struct nxp_c45_secy {
+ struct macsec_secy *secy;
+ struct nxp_c45_tx_sa *tx_sa[MACSEC_NUM_AN];
+ struct nxp_c45_rx_sc *rx_sc;
+ int enabled_an;
+ int secy_id;
+ bool tx_sa_key_a;
+ bool point_to_point;
+ struct list_head list;
+};
+
+struct nxp_c45_macsec {
+ struct list_head secy_list;
+ DECLARE_BITMAP(secy_bitmap, TX_SC_MAX);
+ DECLARE_BITMAP(tx_sc_bitmap, TX_SC_MAX);
+};
+
+struct nxp_c45_macsec_sa_regs {
+ u16 rxsa_cs;
+ u16 rxsa_npn;
+ u16 rxsa_xnpn;
+ u16 rxsa_lnpn;
+ u16 rxsa_lxnpn;
+ u16 txsa_cs;
+ u16 txsa_npn;
+ u16 txsa_xnpn;
+ u16 rxsa_ka;
+ u16 rxsa_ssci;
+ u16 rxsa_salt;
+ u16 txsa_ka;
+ u16 txsa_ssci;
+ u16 txsa_salt;
+};
+
+static const struct nxp_c45_macsec_sa_regs sa_a_regs = {
+ .rxsa_cs = MACSEC_RXSA_A_CS,
+ .rxsa_npn = MACSEC_RXSA_A_NPN,
+ .rxsa_xnpn = MACSEC_RXSA_A_XNPN,
+ .rxsa_lnpn = MACSEC_RXSA_A_LNPN,
+ .rxsa_lxnpn = MACSEC_RXSA_A_LXNPN,
+ .txsa_cs = MACSEC_TXSA_A_CS,
+ .txsa_npn = MACSEC_TXSA_A_NPN,
+ .txsa_xnpn = MACSEC_TXSA_A_XNPN,
+ .rxsa_ka = MACSEC_RXSA_A_KA,
+ .rxsa_ssci = MACSEC_RXSA_A_SSCI,
+ .rxsa_salt = MACSEC_RXSA_A_SALT,
+ .txsa_ka = MACSEC_TXSA_A_KA,
+ .txsa_ssci = MACSEC_TXSA_A_SSCI,
+ .txsa_salt = MACSEC_TXSA_A_SALT,
+};
+
+static const struct nxp_c45_macsec_sa_regs sa_b_regs = {
+ .rxsa_cs = MACSEC_RXSA_B_CS,
+ .rxsa_npn = MACSEC_RXSA_B_NPN,
+ .rxsa_xnpn = MACSEC_RXSA_B_XNPN,
+ .rxsa_lnpn = MACSEC_RXSA_B_LNPN,
+ .rxsa_lxnpn = MACSEC_RXSA_B_LXNPN,
+ .txsa_cs = MACSEC_TXSA_B_CS,
+ .txsa_npn = MACSEC_TXSA_B_NPN,
+ .txsa_xnpn = MACSEC_TXSA_B_XNPN,
+ .rxsa_ka = MACSEC_RXSA_B_KA,
+ .rxsa_ssci = MACSEC_RXSA_B_SSCI,
+ .rxsa_salt = MACSEC_RXSA_B_SALT,
+ .txsa_ka = MACSEC_TXSA_B_KA,
+ .txsa_ssci = MACSEC_TXSA_B_SSCI,
+ .txsa_salt = MACSEC_TXSA_B_SALT,
+};
+
+static const
+struct nxp_c45_macsec_sa_regs *nxp_c45_get_macsec_sa_regs(bool key_a)
+{
+ if (key_a)
+ return &sa_a_regs;
+
+ return &sa_b_regs;
+}
+
+static int nxp_c45_macsec_write(struct phy_device *phydev, u16 reg, u32 val)
+{
+ WARN_ON_ONCE(reg % 4);
+
+ reg = reg / 2;
+ phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ VEND1_MACSEC_BASE + reg, val);
+ phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ VEND1_MACSEC_BASE + reg + 1, val >> 16);
+ return 0;
+}
+
+static int nxp_c45_macsec_read(struct phy_device *phydev, u16 reg, u32 *value)
+{
+ u32 lvalue;
+ int ret;
+
+ WARN_ON_ONCE(reg % 4);
+
+ reg = reg / 2;
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, VEND1_MACSEC_BASE + reg);
+ if (ret < 0)
+ return ret;
+
+ lvalue = (u32)ret & 0xffff;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, VEND1_MACSEC_BASE + reg + 1);
+ if (ret < 0)
+ return ret;
+
+ lvalue |= (u32)ret << 16;
+ *value = lvalue;
+
+ return 0;
+}
+
+static void nxp_c45_select_secy(struct phy_device *phydev, u8 id)
+{
+ nxp_c45_macsec_write(phydev, MACSEC_RXSCA, id);
+ nxp_c45_macsec_write(phydev, MACSEC_RXSCKA, id);
+ nxp_c45_macsec_write(phydev, MACSEC_TXSCA, id);
+ nxp_c45_macsec_write(phydev, MACSEC_TXSCKA, id);
+}
+
+void nxp_c45_macsec_config_init(struct phy_device *phydev)
+{
+ if (!phydev->macsec_ops)
+ return;
+
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
+ MACSEC_EN | ADAPTER_EN);
+
+ nxp_c45_macsec_write(phydev, ADPTR_CNTRL, ADPTR_CNTRL_CONFIG_EN |
+ ADPTR_CNTRL_ADPTR_EN);
+ nxp_c45_macsec_write(phydev, ADPTR_CNTRL, ADPTR_CNTRL_ADPTR_EN);
+
+ nxp_c45_macsec_write(phydev, MACSEC_TPNET, PN_WRAP_THRESHOLD);
+
+ /* Set MKA filter. */
+ nxp_c45_macsec_write(phydev, MACSEC_UPFR0D2, ETH_P_PAE);
+ nxp_c45_macsec_write(phydev, MACSEC_UPFR0M1, MACSEC_OVP);
+ nxp_c45_macsec_write(phydev, MACSEC_UPFR0M2, ETYPE_MASK);
+ nxp_c45_macsec_write(phydev, MACSEC_UPFR0R, MACSEC_UPFR_EN);
+}
+
+static void nxp_c45_macsec_cfg_ptp(struct phy_device *phydev, bool enable)
+{
+ u32 reg = 0;
+
+ nxp_c45_macsec_read(phydev, MACSEC_CFG, ®);
+ if (enable)
+ reg |= MACSEC_CFG_S0I;
+ else
+ reg &= ~MACSEC_CFG_S0I;
+ nxp_c45_macsec_write(phydev, MACSEC_CFG, reg);
+}
+
+static bool nxp_c45_mac_addr_free(struct macsec_context *ctx)
+{
+ struct nxp_c45_phy *priv = ctx->phydev->priv;
+ struct nxp_c45_secy *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, &priv->macsec->secy_list, list) {
+ if (pos->secy == ctx->secy)
+ continue;
+
+ if (memcmp(pos->secy->netdev->dev_addr,
+ ctx->secy->netdev->dev_addr, ETH_ALEN) == 0)
+ return false;
+ }
+
+ return true;
+}
+
+static bool nxp_c45_is_macsec_ptp_enabled(struct list_head *secy_list)
+{
+ struct nxp_c45_secy *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, secy_list, list)
+ if (pos->point_to_point)
+ return pos->point_to_point;
+
+ return false;
+}
+
+static struct nxp_c45_secy *nxp_c45_find_secy(struct list_head *secy_list,
+ sci_t sci)
+{
+ struct nxp_c45_secy *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, secy_list, list)
+ if (pos->secy->sci == sci)
+ return pos;
+
+ return ERR_PTR(-ENOENT);
+}
+
+static void nxp_c45_rx_sc_en(struct phy_device *phydev,
+ struct nxp_c45_rx_sc *rx_sc,
+ bool en)
+{
+ u32 reg = 0;
+
+ nxp_c45_macsec_read(phydev, MACSEC_RXSC_CFG, ®);
+ if (rx_sc->rx_sc->active && en)
+ reg |= MACSEC_RXSC_CFG_SCI_EN;
+ else
+ reg &= ~MACSEC_RXSC_CFG_SCI_EN;
+ nxp_c45_macsec_write(phydev, MACSEC_RXSC_CFG, reg);
+}
+
+static int nxp_c45_tx_sc_en_flt(struct phy_device *phydev, int secy_id, bool en)
+{
+ u32 tx_flt_base = TX_FLT_BASE(secy_id);
+ u32 reg = 0;
+
+ nxp_c45_macsec_read(phydev, TX_SC_FLT_MAC_CFG(tx_flt_base), ®);
+ if (en)
+ reg |= TX_SC_FLT_EN;
+ else
+ reg &= ~TX_SC_FLT_EN;
+ nxp_c45_macsec_write(phydev, TX_SC_FLT_MAC_CFG(tx_flt_base), reg);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_dev_open(struct macsec_context *ctx)
+{
+ struct nxp_c45_phy *priv = ctx->phydev->priv;
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_secy *phy_secy;
+ int any_bit_set;
+ u32 reg = 0;
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+
+ nxp_c45_tx_sc_en_flt(phydev, phy_secy->secy_id, true);
+ nxp_c45_macsec_cfg_ptp(phydev, phy_secy->point_to_point);
+ if (phy_secy->rx_sc)
+ nxp_c45_rx_sc_en(phydev, phy_secy->rx_sc, true);
+
+ any_bit_set = find_first_bit(priv->macsec->secy_bitmap, TX_SC_MAX);
+ if (any_bit_set == TX_SC_MAX) {
+ nxp_c45_macsec_read(phydev, MACSEC_CFG, ®);
+ reg |= MACSEC_CFG_BYPASS;
+ nxp_c45_macsec_write(phydev, MACSEC_CFG, reg);
+ }
+
+ set_bit(phy_secy->secy_id, priv->macsec->secy_bitmap);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_dev_stop(struct macsec_context *ctx)
+{
+ struct nxp_c45_phy *priv = ctx->phydev->priv;
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_secy *phy_secy;
+ int any_bit_set;
+ u32 reg = 0;
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+
+ nxp_c45_tx_sc_en_flt(phydev, phy_secy->secy_id, false);
+ if (phy_secy->rx_sc)
+ nxp_c45_rx_sc_en(phydev, phy_secy->rx_sc, false);
+ nxp_c45_macsec_cfg_ptp(phydev, false);
+
+ clear_bit(phy_secy->secy_id, priv->macsec->secy_bitmap);
+ any_bit_set = find_first_bit(priv->macsec->secy_bitmap, TX_SC_MAX);
+ if (any_bit_set == TX_SC_MAX) {
+ nxp_c45_macsec_read(phydev, MACSEC_CFG, ®);
+ reg &= ~MACSEC_CFG_BYPASS;
+ nxp_c45_macsec_write(phydev, MACSEC_CFG, reg);
+ }
+
+ return 0;
+}
+
+static int nxp_c45_tx_sc_set_flt(struct macsec_context *ctx, int secy_id)
+{
+ const u8 *dev_addr = ctx->secy->netdev->dev_addr;
+ u32 tx_flt_base = TX_FLT_BASE(secy_id);
+ u32 mac_sa;
+
+ mac_sa = dev_addr[0] << 8 | dev_addr[1];
+ nxp_c45_macsec_write(ctx->phydev, TX_SC_FLT_MAC_DA_SA(tx_flt_base),
+ mac_sa);
+ mac_sa = dev_addr[5] | dev_addr[4] << 8 |
+ dev_addr[3] << 16 | dev_addr[2] << 24;
+
+ nxp_c45_macsec_write(ctx->phydev, TX_SC_FLT_MAC_SA(tx_flt_base),
+ mac_sa);
+ nxp_c45_macsec_write(ctx->phydev, TX_SC_FLT_MAC_CFG(tx_flt_base),
+ TX_SC_FLT_BY_SA | TX_SC_FLT_EN | secy_id);
+
+ return 0;
+}
+
+static bool nxp_c45_port_valid(struct nxp_c45_secy *phy_secy, u16 port)
+{
+ if (phy_secy->secy->tx_sc.end_station &&
+ __be16_to_cpu((__force __be16)port) != 1)
+ return false;
+
+ return true;
+}
+
+static bool nxp_c45_rx_sc_valid(struct nxp_c45_secy *phy_secy,
+ struct macsec_rx_sc *rx_sc)
+{
+ u16 port = (__force u64)rx_sc->sci >> (ETH_ALEN * 8);
+
+ if (phy_secy->point_to_point && phy_secy->secy_id != 0)
+ return false;
+
+ return nxp_c45_port_valid(phy_secy, port);
+}
+
+static bool nxp_c45_secy_cfg_valid(struct nxp_c45_secy *phy_secy, bool can_ptp)
+{
+ u16 port = (__force u64)phy_secy->secy->sci >> (ETH_ALEN * 8);
+
+ if (phy_secy->secy->tx_sc.scb)
+ return false;
+
+ if (phy_secy->secy->tx_sc.send_sci && phy_secy->secy->tx_sc.end_station)
+ return false;
+
+ if (!phy_secy->secy->tx_sc.send_sci &&
+ !phy_secy->secy->tx_sc.end_station) {
+ if (!can_ptp)
+ return false;
+
+ if (phy_secy->secy_id != 0)
+ return false;
+
+ phy_secy->point_to_point = true;
+ } else {
+ phy_secy->point_to_point = false;
+ }
+
+ return nxp_c45_port_valid(phy_secy, port);
+}
+
+static int nxp_c45_update_tx_sc_secy_cfg(struct phy_device *phydev,
+ struct nxp_c45_secy *phy_secy)
+{
+ u32 cfg = 0;
+
+ nxp_c45_macsec_read(phydev, MACSEC_TXSC_CFG, &cfg);
+
+ phydev_dbg(phydev, "XPN %s\n", phy_secy->secy->xpn ? "on" : "off");
+ if (phy_secy->secy->xpn)
+ cfg |= MACSEC_TXSC_CFG_XPN;
+ else
+ cfg &= ~MACSEC_TXSC_CFG_XPN;
+
+ phydev_dbg(phydev, "key len %u\n", phy_secy->secy->key_len);
+ if (phy_secy->secy->key_len == 32)
+ cfg |= MACSEC_TXSC_CFG_AES_256;
+ else
+ cfg &= ~MACSEC_TXSC_CFG_AES_256;
+
+ phydev_dbg(phydev, "encryption %s\n",
+ phy_secy->secy->tx_sc.encrypt ? "on" : "off");
+ if (phy_secy->secy->tx_sc.encrypt)
+ cfg |= MACSEC_TXSC_CFG_ENCRYPT;
+ else
+ cfg &= ~MACSEC_TXSC_CFG_ENCRYPT;
+
+ phydev_dbg(phydev, "protect frames %s\n",
+ phy_secy->secy->protect_frames ? "on" : "off");
+ if (phy_secy->secy->protect_frames)
+ cfg |= MACSEC_TXSC_CFG_PROTECT;
+ else
+ cfg &= ~MACSEC_TXSC_CFG_PROTECT;
+
+ phydev_dbg(phydev, "send sci %s\n",
+ phy_secy->secy->tx_sc.send_sci ? "on" : "off");
+ if (phy_secy->secy->tx_sc.send_sci)
+ cfg |= MACSEC_TXSC_CFG_SEND_SCI;
+ else
+ cfg &= ~MACSEC_TXSC_CFG_SEND_SCI;
+
+ phydev_dbg(phydev, "end station %s\n",
+ phy_secy->secy->tx_sc.end_station ? "on" : "off");
+ if (phy_secy->secy->tx_sc.end_station)
+ cfg |= MACSEC_TXSC_CFG_END_STATION;
+ else
+ cfg &= ~MACSEC_TXSC_CFG_END_STATION;
+
+ phydev_dbg(phydev, "scb %s\n",
+ phy_secy->secy->tx_sc.scb ? "on" : "off");
+ if (phy_secy->secy->tx_sc.scb)
+ cfg |= MACSEC_TXSC_CFG_SCI;
+ else
+ cfg &= ~MACSEC_TXSC_CFG_SCI;
+
+ nxp_c45_macsec_write(phydev, MACSEC_TXSC_CFG, cfg);
+
+ return 0;
+}
+
+static int nxp_c45_update_rx_sc_secy_cfg(struct phy_device *phydev,
+ struct nxp_c45_secy *phy_secy)
+{
+ struct nxp_c45_rx_sc *rx_sc = phy_secy->rx_sc;
+ struct nxp_c45_phy *priv = phydev->priv;
+ u32 cfg = 0;
+
+ nxp_c45_macsec_read(phydev, MACSEC_RXSC_CFG, &cfg);
+ cfg &= ~MACSEC_RXSC_CFG_VF_MASK;
+ cfg = phy_secy->secy->validate_frames << MACSEC_RXSC_CFG_VF_OFF;
+
+ phydev_dbg(phydev, "validate frames %u\n",
+ phy_secy->secy->validate_frames);
+ phydev_dbg(phydev, "replay_protect %s window %u\n",
+ phy_secy->secy->replay_protect ? "on" : "off",
+ phy_secy->secy->replay_window);
+ if (phy_secy->secy->replay_protect) {
+ cfg |= MACSEC_RXSC_CFG_RP;
+ if (cfg & MACSEC_RXSC_CFG_SCI_EN) {
+ phydev_dbg(phydev, "RX SC enabled, window will not be updated\n");
+ } else {
+ phydev_dbg(phydev, "RX SC enabled, window will be updated\n");
+ nxp_c45_macsec_write(phydev, MACSEC_RPW,
+ phy_secy->secy->replay_window);
+ }
+ } else {
+ cfg &= ~MACSEC_RXSC_CFG_RP;
+ }
+
+ phydev_dbg(phydev, "rx_sc->active %s\n",
+ rx_sc->rx_sc->active ? "on" : "off");
+ if (rx_sc->rx_sc->active &&
+ test_bit(phy_secy->secy_id, priv->macsec->secy_bitmap))
+ cfg |= MACSEC_RXSC_CFG_SCI_EN;
+ else
+ cfg &= ~MACSEC_RXSC_CFG_SCI_EN;
+
+ phydev_dbg(phydev, "key len %u\n", phy_secy->secy->key_len);
+ if (phy_secy->secy->key_len == 32)
+ cfg |= MACSEC_RXSC_CFG_AES_256;
+ else
+ cfg &= ~MACSEC_RXSC_CFG_AES_256;
+
+ phydev_dbg(phydev, "XPN %s\n", phy_secy->secy->xpn ? "on" : "off");
+ if (phy_secy->secy->xpn)
+ cfg |= MACSEC_RXSC_CFG_XPN;
+ else
+ cfg &= ~MACSEC_RXSC_CFG_XPN;
+
+ nxp_c45_macsec_write(phydev, MACSEC_RXSC_CFG, cfg);
+ return 0;
+}
+
+static int nxp_c45_update_key_status(struct phy_device *phydev,
+ struct nxp_c45_tx_sa *tx_sa)
+{
+ bool key_a = tx_sa->is_key_a;
+ u32 cfg = 0;
+
+ nxp_c45_macsec_read(phydev, MACSEC_TXSC_CFG, &cfg);
+
+ cfg &= ~MACSEC_TXSC_CFG_AN_MASK;
+ cfg |= tx_sa->an << MACSEC_TXSC_CFG_AN_OFF;
+
+ if (!key_a)
+ cfg |= MACSEC_TXSC_CFG_ASA;
+ else
+ cfg &= ~MACSEC_TXSC_CFG_ASA;
+
+ tx_sa->is_enabled = tx_sa->tx_sa->active;
+ if (tx_sa->tx_sa->active)
+ cfg |= MACSEC_TXSC_CFG_SCE;
+ else
+ cfg &= ~MACSEC_TXSC_CFG_SCE;
+
+ nxp_c45_macsec_write(phydev, MACSEC_TXSC_CFG, cfg);
+
+ return 0;
+}
+
+static int nxp_c45_tx_sa_disable(struct phy_device *phydev,
+ struct nxp_c45_secy *phy_secy)
+{
+ u32 cfg = 0;
+
+ nxp_c45_macsec_read(phydev, MACSEC_TXSC_CFG, &cfg);
+ cfg &= ~MACSEC_TXSC_CFG_SCE;
+ nxp_c45_macsec_write(phydev, MACSEC_TXSC_CFG, cfg);
+
+ return 0;
+}
+
+static int nxp_c45_txsa_set_pn(struct phy_device *phydev,
+ struct nxp_c45_tx_sa *tx_sa)
+{
+ const struct nxp_c45_macsec_sa_regs *sa_regs;
+
+ sa_regs = nxp_c45_get_macsec_sa_regs(tx_sa->is_key_a);
+
+ nxp_c45_macsec_write(phydev, sa_regs->txsa_npn, tx_sa->next_pn);
+ nxp_c45_macsec_write(phydev, sa_regs->txsa_xnpn, tx_sa->next_pn >> 32);
+
+ return 0;
+}
+
+static int nxp_c45_txsa_get_pn(struct phy_device *phydev,
+ struct nxp_c45_tx_sa *tx_sa)
+{
+ const struct nxp_c45_macsec_sa_regs *sa_regs;
+ u32 reg = 0;
+
+ sa_regs = nxp_c45_get_macsec_sa_regs(tx_sa->is_key_a);
+
+ nxp_c45_macsec_read(phydev, sa_regs->txsa_npn, ®);
+ tx_sa->next_pn = reg;
+ nxp_c45_macsec_read(phydev, sa_regs->txsa_xnpn, ®);
+ tx_sa->next_pn |= (u64)reg << 32;
+
+ return 0;
+}
+
+static int nxp_c45_set_rxsa_key_cfg(struct macsec_context *ctx,
+ bool key_a, bool upd)
+{
+ const struct nxp_c45_macsec_sa_regs *sa_regs;
+ u64 npn = ctx->sa.rx_sa->next_pn;
+ u32 cfg;
+
+ sa_regs = nxp_c45_get_macsec_sa_regs(key_a);
+
+ if (npn && !upd) {
+ nxp_c45_macsec_write(ctx->phydev, sa_regs->rxsa_npn, npn);
+ nxp_c45_macsec_write(ctx->phydev, sa_regs->rxsa_lnpn, npn);
+ if (ctx->secy->xpn) {
+ nxp_c45_macsec_write(ctx->phydev, sa_regs->rxsa_xnpn,
+ npn >> 32);
+ nxp_c45_macsec_write(ctx->phydev, sa_regs->rxsa_lxnpn,
+ npn >> 32);
+ }
+ } else if (npn && upd) {
+ if (npn > ctx->secy->replay_window)
+ npn -= ctx->secy->replay_window;
+ else
+ npn = 1;
+
+ nxp_c45_macsec_write(ctx->phydev, sa_regs->rxsa_lnpn, npn);
+ if (ctx->secy->xpn)
+ nxp_c45_macsec_write(ctx->phydev, sa_regs->rxsa_lxnpn,
+ npn >> 32);
+ }
+
+ cfg = MACSEC_RXSA_CS_A | (ctx->sa.assoc_num << MACSEC_RXSA_CS_AN_OFF);
+ cfg |= ctx->sa.rx_sa->active ? MACSEC_RXSA_CS_EN : 0;
+ nxp_c45_macsec_write(ctx->phydev, sa_regs->rxsa_cs, cfg);
+
+ return 0;
+}
+
+static int nxp_c45_txsa_set_key(struct macsec_context *ctx,
+ struct nxp_c45_tx_sa *tx_sa)
+{
+ const struct nxp_c45_macsec_sa_regs *sa_regs;
+ u32 ssci = (__force u32)tx_sa->tx_sa->ssci;
+ u32 key_size = ctx->secy->key_len / 4;
+ u32 salt_size = MACSEC_SALT_LEN / 4;
+ u32 *salt = (u32 *)tx_sa->salt;
+ u32 *key = (u32 *)tx_sa->key;
+ u32 reg;
+ int i;
+
+ sa_regs = nxp_c45_get_macsec_sa_regs(tx_sa->is_key_a);
+
+ for (i = 0; i < key_size; i++) {
+ reg = sa_regs->txsa_ka + i * 4;
+ nxp_c45_macsec_write(ctx->phydev, reg,
+ (__force u32)cpu_to_be32(key[i]));
+ }
+
+ if (ctx->secy->xpn) {
+ for (i = 0; i < salt_size; i++) {
+ reg = sa_regs->txsa_salt + (2 - i) * 4;
+ nxp_c45_macsec_write(ctx->phydev, reg,
+ (__force u32)cpu_to_be32(salt[i]));
+ }
+
+ nxp_c45_macsec_write(ctx->phydev, sa_regs->txsa_ssci,
+ (__force u32)cpu_to_be32(ssci));
+ }
+
+ nxp_c45_macsec_write(ctx->phydev, sa_regs->txsa_cs, MACSEC_TXSA_CS_A);
+
+ return 0;
+}
+
+static int nxp_c45_commit_rx_sc_cfg(struct phy_device *phydev,
+ struct nxp_c45_secy *phy_secy)
+{
+ struct nxp_c45_rx_sc *rx_sc = phy_secy->rx_sc;
+ u64 sci = (__force u64)rx_sc->rx_sc->sci;
+
+ nxp_c45_macsec_write(phydev, MACSEC_RXSC_SCI_1H,
+ (__force u32)cpu_to_be32(sci));
+ nxp_c45_macsec_write(phydev, MACSEC_RXSC_SCI_2H,
+ (__force u32)cpu_to_be32(sci >> 32));
+
+ return nxp_c45_update_rx_sc_secy_cfg(phydev, phy_secy);
+}
+
+static int nxp_c45_disable_rxsa_key(struct phy_device *phydev, bool key_a)
+{
+ const struct nxp_c45_macsec_sa_regs *sa_regs;
+ u32 reg = 0;
+
+ sa_regs = nxp_c45_get_macsec_sa_regs(key_a);
+
+ nxp_c45_macsec_read(phydev, sa_regs->rxsa_cs, ®);
+ reg &= ~MACSEC_RXSA_CS_EN;
+ nxp_c45_macsec_write(phydev, sa_regs->rxsa_cs, reg);
+
+ return 0;
+}
+
+static int nxp_c45_rx_sc_del(struct phy_device *phydev,
+ struct nxp_c45_rx_sc *rx_sc)
+{
+ nxp_c45_macsec_write(phydev, MACSEC_RXSC_CFG, 0);
+ nxp_c45_macsec_write(phydev, MACSEC_RXSC_SCI_1H, 0);
+ nxp_c45_macsec_write(phydev, MACSEC_RXSC_SCI_2H, 0);
+ nxp_c45_macsec_write(phydev, MACSEC_RPW, 0);
+
+ if (rx_sc->rx_sa_a)
+ nxp_c45_disable_rxsa_key(phydev, true);
+
+ if (rx_sc->rx_sa_b)
+ nxp_c45_disable_rxsa_key(phydev, false);
+
+ return 0;
+}
+
+static int nxp_c45_set_rxsa_key(struct macsec_context *ctx, bool key_a)
+{
+ u32 *salt = (u32 *)ctx->sa.rx_sa->key.salt.bytes;
+ const struct nxp_c45_macsec_sa_regs *sa_regs;
+ u32 ssci = (__force u32)ctx->sa.rx_sa->ssci;
+ u32 key_size = ctx->secy->key_len / 4;
+ u32 salt_size = MACSEC_SALT_LEN / 4;
+ u32 *key = (u32 *)ctx->sa.key;
+ u32 reg;
+ int i;
+
+ sa_regs = nxp_c45_get_macsec_sa_regs(key_a);
+
+ for (i = 0; i < key_size; i++) {
+ reg = sa_regs->rxsa_ka + i * 4;
+ nxp_c45_macsec_write(ctx->phydev, reg,
+ (__force u32)cpu_to_be32(key[i]));
+ }
+
+ if (ctx->secy->xpn) {
+ for (i = 0; i < salt_size; i++) {
+ reg = sa_regs->rxsa_salt + (2 - i) * 4;
+ nxp_c45_macsec_write(ctx->phydev, reg,
+ (__force u32)cpu_to_be32(salt[i]));
+ }
+ nxp_c45_macsec_write(ctx->phydev, sa_regs->rxsa_ssci,
+ (__force u32)cpu_to_be32(ssci));
+ }
+
+ nxp_c45_set_rxsa_key_cfg(ctx, key_a, false);
+
+ return 0;
+}
+
+static void nxp_c45_tx_sc_clear(struct nxp_c45_secy *phy_secy)
+{
+ struct nxp_c45_tx_sa **tx_sa;
+ u8 i;
+
+ tx_sa = phy_secy->tx_sa;
+ for (i = 0; i < ARRAY_SIZE(phy_secy->tx_sa); i++) {
+ kfree(tx_sa[i]);
+ tx_sa[i] = NULL;
+ }
+}
+
+static int nxp_c45_mdo_add_secy(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ u64 sci = (__force u64)ctx->secy->sci;
+ struct nxp_c45_secy *phy_secy;
+ bool can_ptp;
+ int idx;
+ u32 reg;
+
+ phydev_dbg(ctx->phydev, "add secy SCI %llu\n", ctx->secy->sci);
+
+ if (!nxp_c45_mac_addr_free(ctx))
+ return -EBUSY;
+
+ if (nxp_c45_is_macsec_ptp_enabled(&priv->macsec->secy_list))
+ return -EBUSY;
+
+ idx = find_first_zero_bit(priv->macsec->tx_sc_bitmap, TX_SC_MAX);
+ if (idx == TX_SC_MAX)
+ return -EBUSY;
+
+ phy_secy = kzalloc(sizeof(*phy_secy), GFP_KERNEL);
+ if (!phy_secy)
+ return -ENOMEM;
+
+ phy_secy->secy = ctx->secy;
+ phy_secy->secy_id = idx;
+ phy_secy->enabled_an = ctx->secy->tx_sc.encoding_sa;
+ phy_secy->tx_sa_key_a = true;
+
+ /* If the point to point mode should be enabled, we should have only
+ * one secy enabled, respectively the new one.
+ */
+ can_ptp = list_count_nodes(&priv->macsec->secy_list) == 0;
+ if (!nxp_c45_secy_cfg_valid(phy_secy, can_ptp)) {
+ kfree(phy_secy);
+ return -EINVAL;
+ }
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ nxp_c45_macsec_write(phydev, MACSEC_TXSC_SCI_1H,
+ (__force u32)cpu_to_be32(sci));
+ nxp_c45_macsec_write(phydev, MACSEC_TXSC_SCI_2H,
+ (__force u32)cpu_to_be32(sci >> 32));
+ nxp_c45_tx_sc_set_flt(ctx, phy_secy->secy_id);
+ nxp_c45_update_tx_sc_secy_cfg(phydev, phy_secy);
+ if (phy_interrupt_is_valid(phydev)) {
+ nxp_c45_macsec_read(phydev, MACSEC_EVER, ®);
+ reg |= TX_SC_BIT(phy_secy->secy_id);
+ nxp_c45_macsec_write(phydev, MACSEC_EVER, reg);
+ }
+ set_bit(idx, priv->macsec->tx_sc_bitmap);
+ list_add_tail(&phy_secy->list, &priv->macsec->secy_list);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_upd_secy(struct macsec_context *ctx)
+{
+ struct nxp_c45_tx_sa *new_tx_sa, *old_tx_sa;
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ bool can_ptp;
+
+ phydev_dbg(phydev, "update secy SCI %llu\n", ctx->secy->sci);
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ if (!nxp_c45_mac_addr_free(ctx))
+ return -EBUSY;
+
+ /* If the point to point mode should be enabled, we should have only
+ * one secy enabled, respectively the new one.
+ */
+ can_ptp = list_count_nodes(&priv->macsec->secy_list) == 1;
+ if (!nxp_c45_secy_cfg_valid(phy_secy, can_ptp))
+ return -EINVAL;
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+
+ nxp_c45_tx_sc_set_flt(ctx, phy_secy->secy_id);
+ nxp_c45_update_tx_sc_secy_cfg(phydev, phy_secy);
+
+ if (phy_secy->enabled_an != ctx->secy->tx_sc.encoding_sa) {
+ old_tx_sa = phy_secy->tx_sa[phy_secy->enabled_an];
+ phy_secy->enabled_an = ctx->secy->tx_sc.encoding_sa;
+ new_tx_sa = phy_secy->tx_sa[phy_secy->enabled_an];
+ if (!new_tx_sa) {
+ nxp_c45_tx_sa_disable(phydev, phy_secy);
+ goto disable_old_tx_sa;
+ }
+
+ if (!new_tx_sa->tx_sa->active) {
+ nxp_c45_tx_sa_disable(phydev, phy_secy);
+ goto disable_old_tx_sa;
+ }
+
+ new_tx_sa->is_key_a = phy_secy->tx_sa_key_a;
+ phy_secy->tx_sa_key_a = phy_secy->tx_sa_key_a;
+ nxp_c45_txsa_set_key(ctx, new_tx_sa);
+ nxp_c45_txsa_set_pn(phydev, new_tx_sa);
+ nxp_c45_update_key_status(phydev, new_tx_sa);
+
+disable_old_tx_sa:
+ if (old_tx_sa) {
+ old_tx_sa->is_enabled = false;
+ nxp_c45_txsa_get_pn(phydev, old_tx_sa);
+ }
+ }
+
+ if (test_bit(phy_secy->secy_id, priv->macsec->secy_bitmap))
+ nxp_c45_macsec_cfg_ptp(phydev, phy_secy->point_to_point);
+
+ if (phy_secy->rx_sc)
+ nxp_c45_update_rx_sc_secy_cfg(phydev, phy_secy);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_del_secy(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ u32 reg;
+
+ phydev_dbg(phydev, "delete secy SCI %llu\n", ctx->secy->sci);
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+
+ nxp_c45_mdo_dev_stop(ctx);
+ nxp_c45_tx_sa_disable(phydev, phy_secy);
+ nxp_c45_tx_sc_clear(phy_secy);
+ if (phy_secy->rx_sc) {
+ nxp_c45_rx_sc_del(phydev, phy_secy->rx_sc);
+ kfree(phy_secy->rx_sc);
+ }
+
+ if (phy_interrupt_is_valid(phydev)) {
+ nxp_c45_macsec_read(phydev, MACSEC_EVER, ®);
+ reg &= ~TX_SC_BIT(phy_secy->secy_id);
+ nxp_c45_macsec_write(phydev, MACSEC_EVER, reg);
+ }
+
+ clear_bit(phy_secy->secy_id, priv->macsec->tx_sc_bitmap);
+ list_del(&phy_secy->list);
+ kfree(phy_secy);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_add_rxsc(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ struct nxp_c45_rx_sc *rx_sc;
+
+ phydev_dbg(phydev, "add RX SC %s\n",
+ ctx->rx_sc->active ? "enabled" : "disabled");
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ if (phy_secy->rx_sc)
+ return -ENOMEM;
+
+ if (!nxp_c45_rx_sc_valid(phy_secy, ctx->rx_sc))
+ return -EINVAL;
+
+ rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
+ if (!rx_sc)
+ return -ENOMEM;
+
+ phy_secy->rx_sc = rx_sc;
+ rx_sc->rx_sc = ctx->rx_sc;
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ nxp_c45_commit_rx_sc_cfg(phydev, phy_secy);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_upd_rxsc(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ struct nxp_c45_rx_sc *rx_sc;
+
+ phydev_dbg(phydev, "update RX SC %llu %s\n", ctx->rx_sc->sci,
+ ctx->rx_sc->active ? "enabled" : "disabled");
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ rx_sc = phy_secy->rx_sc;
+ if (rx_sc->rx_sc != ctx->rx_sc)
+ return -EINVAL;
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ nxp_c45_commit_rx_sc_cfg(phydev, phy_secy);
+
+ return 0;
+}
+
+static int nxp_c45_mdo_del_rxsc(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ struct nxp_c45_rx_sc *rx_sc;
+
+ phydev_dbg(phydev, "delete RX SC %llu %s\n", ctx->rx_sc->sci,
+ ctx->rx_sc->active ? "enabled" : "disabled");
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ rx_sc = phy_secy->rx_sc;
+ if (rx_sc->rx_sc != ctx->rx_sc)
+ return -EINVAL;
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ nxp_c45_rx_sc_del(phydev, rx_sc);
+ kfree(rx_sc);
+ phy_secy->rx_sc = NULL;
+
+ return 0;
+}
+
+static int nxp_c45_mdo_add_rxsa(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ struct nxp_c45_rx_sc *rx_sc;
+ struct macsec_rx_sa *rx_sa;
+ u8 an = ctx->sa.assoc_num;
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ rx_sc = phy_secy->rx_sc;
+ if (rx_sc->rx_sc != ctx->sa.rx_sa->sc)
+ return -EINVAL;
+
+ rx_sa = ctx->sa.rx_sa;
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ if (!rx_sc->rx_sa_a) {
+ phydev_dbg(phydev, "add RX SA A %u %s\n",
+ an, rx_sa->active ? "enabled" : "disabled");
+ nxp_c45_set_rxsa_key(ctx, true);
+ rx_sc->rx_sa_a = rx_sa;
+ return 0;
+ }
+
+ if (!rx_sc->rx_sa_b) {
+ phydev_dbg(phydev, "add RX SA B %u %s\n",
+ an, rx_sa->active ? "enabled" : "disabled");
+ nxp_c45_set_rxsa_key(ctx, false);
+ rx_sc->rx_sa_b = rx_sa;
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+static int nxp_c45_mdo_upd_rxsa(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ struct nxp_c45_rx_sc *rx_sc;
+ struct macsec_rx_sa *rx_sa;
+ u8 an = ctx->sa.assoc_num;
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ rx_sc = phy_secy->rx_sc;
+ if (rx_sc->rx_sc != ctx->sa.rx_sa->sc)
+ return -EINVAL;
+
+ rx_sa = ctx->sa.rx_sa;
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ if (rx_sc->rx_sa_a == rx_sa) {
+ phydev_dbg(phydev, "update RX SA A %u %s\n",
+ an, rx_sa->active ? "enabled" : "disabled");
+ nxp_c45_set_rxsa_key_cfg(ctx, true, true);
+ return 0;
+ }
+
+ if (rx_sc->rx_sa_b == rx_sa) {
+ phydev_dbg(phydev, "update RX SA B %u %s\n",
+ an, rx_sa->active ? "enabled" : "disabled");
+ nxp_c45_set_rxsa_key_cfg(ctx, false, true);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int nxp_c45_mdo_del_rxsa(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ struct nxp_c45_rx_sc *rx_sc;
+ struct macsec_rx_sa *rx_sa;
+ u8 an = ctx->sa.assoc_num;
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ rx_sc = phy_secy->rx_sc;
+ if (rx_sc->rx_sc != ctx->sa.rx_sa->sc)
+ return -EINVAL;
+
+ rx_sa = ctx->sa.rx_sa;
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ if (rx_sc->rx_sa_a == rx_sa) {
+ phydev_dbg(phydev, "delete RX SA A %u %s\n",
+ an, rx_sa->active ? "enabled" : "disabled");
+ nxp_c45_disable_rxsa_key(phydev, true);
+ rx_sc->rx_sa_a = NULL;
+ return 0;
+ }
+
+ if (rx_sc->rx_sa_b == rx_sa) {
+ phydev_dbg(phydev, "delete RX SA B %u %s\n",
+ an, rx_sa->active ? "enabled" : "disabled");
+ nxp_c45_disable_rxsa_key(phydev, false);
+ rx_sc->rx_sa_b = NULL;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int nxp_c45_mdo_add_txsa(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ struct nxp_c45_tx_sa *tx_sa;
+ u8 sa = ctx->sa.assoc_num;
+
+ phydev_dbg(phydev, "add TX SA %u %s\n", ctx->sa.assoc_num,
+ ctx->sa.tx_sa->active ? "enabled" : "disabled");
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ if (phy_secy->tx_sa[sa])
+ return -EBUSY;
+
+ tx_sa = kzalloc(sizeof(*tx_sa), GFP_KERNEL);
+ tx_sa->an = ctx->sa.assoc_num;
+ memcpy(tx_sa->key, ctx->sa.key, MACSEC_MAX_KEY_LEN);
+ memcpy(tx_sa->salt, ctx->sa.tx_sa->key.salt.bytes, MACSEC_SALT_LEN);
+ tx_sa->tx_sa = ctx->sa.tx_sa;
+ tx_sa->next_pn = ctx->sa.tx_sa->next_pn;
+ phy_secy->tx_sa[sa] = tx_sa;
+
+ if (tx_sa->an == phy_secy->enabled_an && tx_sa->tx_sa->active) {
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+ tx_sa->is_key_a = phy_secy->tx_sa_key_a;
+ phy_secy->tx_sa_key_a = !phy_secy->tx_sa_key_a;
+ nxp_c45_txsa_set_key(ctx, tx_sa);
+ nxp_c45_txsa_set_pn(phydev, tx_sa);
+ nxp_c45_update_key_status(phydev, tx_sa);
+ }
+
+ return 0;
+}
+
+static int nxp_c45_mdo_upd_txsa(struct macsec_context *ctx)
+{
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ u64 next_pn = ctx->sa.tx_sa->next_pn;
+ struct nxp_c45_secy *phy_secy;
+ struct nxp_c45_tx_sa *tx_sa;
+ u8 sa = ctx->sa.assoc_num;
+
+ phydev_dbg(phydev, "update TX SA %u %s\n", ctx->sa.assoc_num,
+ ctx->sa.tx_sa->active ? "enabled" : "disabled");
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ tx_sa = phy_secy->tx_sa[sa];
+ if (!tx_sa)
+ return -EINVAL;
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+
+ if (tx_sa->is_enabled && tx_sa->tx_sa->active && next_pn) {
+ tx_sa->next_pn = next_pn;
+ nxp_c45_txsa_set_pn(phydev, tx_sa);
+
+ return 0;
+ }
+
+ if (tx_sa->is_enabled && !tx_sa->tx_sa->active) {
+ if (next_pn)
+ tx_sa->next_pn = next_pn;
+ else
+ nxp_c45_txsa_get_pn(phydev, tx_sa);
+
+ nxp_c45_update_key_status(phydev, tx_sa);
+
+ return 0;
+ }
+
+ if (!tx_sa->is_enabled && tx_sa->tx_sa->active &&
+ tx_sa->an == phy_secy->enabled_an) {
+ if (next_pn)
+ tx_sa->next_pn = next_pn;
+
+ tx_sa->is_key_a = phy_secy->tx_sa_key_a;
+ phy_secy->tx_sa_key_a = !phy_secy->tx_sa_key_a;
+ nxp_c45_txsa_set_key(ctx, tx_sa);
+ nxp_c45_txsa_set_pn(phydev, tx_sa);
+ nxp_c45_update_key_status(phydev, tx_sa);
+
+ return 0;
+ }
+
+ if (!tx_sa->is_enabled && !tx_sa->tx_sa->active)
+ tx_sa->next_pn = next_pn;
+
+ return 0;
+}
+
+static int nxp_c45_mdo_del_txsa(struct macsec_context *ctx)
+{
+ struct macsec_tx_sa *ctx_sa = ctx->sa.tx_sa;
+ struct phy_device *phydev = ctx->phydev;
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *phy_secy;
+ struct nxp_c45_tx_sa *tx_sa;
+ u8 sa = ctx->sa.assoc_num;
+
+ phydev_dbg(phydev, "delete TX SA %u %s\n", sa,
+ ctx_sa->active ? "enabled" : "disabled");
+
+ phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
+ if (IS_ERR(phy_secy))
+ return PTR_ERR(phy_secy);
+
+ tx_sa = phy_secy->tx_sa[sa];
+ if (!tx_sa)
+ return -EINVAL;
+
+ nxp_c45_select_secy(phydev, phy_secy->secy_id);
+
+ if (tx_sa->is_enabled)
+ nxp_c45_update_key_status(phydev, tx_sa);
+
+ phy_secy->tx_sa[sa] = NULL;
+ kfree(tx_sa);
+
+ return 0;
+}
+
+static const struct macsec_ops nxp_c45_macsec_ops = {
+ .mdo_dev_open = nxp_c45_mdo_dev_open,
+ .mdo_dev_stop = nxp_c45_mdo_dev_stop,
+ .mdo_add_secy = nxp_c45_mdo_add_secy,
+ .mdo_upd_secy = nxp_c45_mdo_upd_secy,
+ .mdo_del_secy = nxp_c45_mdo_del_secy,
+ .mdo_add_rxsc = nxp_c45_mdo_add_rxsc,
+ .mdo_upd_rxsc = nxp_c45_mdo_upd_rxsc,
+ .mdo_del_rxsc = nxp_c45_mdo_del_rxsc,
+ .mdo_add_rxsa = nxp_c45_mdo_add_rxsa,
+ .mdo_upd_rxsa = nxp_c45_mdo_upd_rxsa,
+ .mdo_del_rxsa = nxp_c45_mdo_del_rxsa,
+ .mdo_add_txsa = nxp_c45_mdo_add_txsa,
+ .mdo_upd_txsa = nxp_c45_mdo_upd_txsa,
+ .mdo_del_txsa = nxp_c45_mdo_del_txsa,
+};
+
+int nxp_c45_macsec_probe(struct phy_device *phydev)
+{
+ struct nxp_c45_phy *priv = phydev->priv;
+
+ priv->macsec = kzalloc(sizeof(*priv->macsec), GFP_KERNEL);
+ if (!priv->macsec)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&priv->macsec->secy_list);
+ phydev->macsec_ops = &nxp_c45_macsec_ops;
+
+ return 0;
+}
+
+void nxp_c45_handle_macsec_interrupt(struct phy_device *phydev,
+ irqreturn_t *ret)
+{
+ struct nxp_c45_phy *priv = phydev->priv;
+ struct nxp_c45_secy *pos, *tmp;
+ struct nxp_c45_tx_sa *tx_sa;
+ int secy_id;
+ u32 reg = 0;
+
+ if (!phydev->macsec_ops)
+ return;
+
+ do {
+ nxp_c45_macsec_read(phydev, MACSEC_EVR, ®);
+ if (!reg)
+ return;
+
+ secy_id = MACSEC_REG_SIZE - ffs(reg);
+ list_for_each_entry_safe(pos, tmp, &priv->macsec->secy_list,
+ list)
+ if (pos->secy_id == secy_id)
+ break;
+
+ phydev_dbg(phydev, "pn_wrapped: tx sc %d, tx sa an %u\n",
+ pos->secy_id, pos->enabled_an);
+ tx_sa = pos->tx_sa[pos->enabled_an];
+ macsec_pn_wrapped(pos->secy, tx_sa->tx_sa);
+ nxp_c45_macsec_write(phydev, MACSEC_EVR,
+ TX_SC_BIT(pos->secy_id));
+ *ret = IRQ_HANDLED;
+ } while (reg);
+}
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 7ab080ff02df..5bf7caa4e63d 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -14,9 +14,10 @@
#include <linux/processor.h>
#include <linux/property.h>
#include <linux/ptp_classify.h>
-#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
+#include "nxp-c45-tja11xx.h"
+
#define PHY_ID_TJA_1103 0x001BB010
#define PHY_ID_TJA_1120 0x001BB031
@@ -75,9 +76,11 @@
#define PORT_CONTROL_EN BIT(14)
#define VEND1_PORT_ABILITIES 0x8046
+#define MACSEC_ABILITY BIT(5)
#define PTP_ABILITY BIT(3)
#define VEND1_PORT_FUNC_IRQ_EN 0x807A
+#define MACSEC_IRQS BIT(5)
#define PTP_IRQS BIT(3)
#define VEND1_PTP_IRQ_ACK 0x9008
@@ -148,7 +151,6 @@
#define TS_SEC_MASK GENMASK(1, 0)
-#define VEND1_PORT_FUNC_ENABLES 0x8048
#define PTP_ENABLE BIT(3)
#define PHY_TEST_ENABLE BIT(0)
@@ -281,25 +283,6 @@ struct nxp_c45_phy_data {
irqreturn_t *irq_status);
};
-struct nxp_c45_phy {
- const struct nxp_c45_phy_data *phy_data;
- struct phy_device *phydev;
- struct mii_timestamper mii_ts;
- struct ptp_clock *ptp_clock;
- struct ptp_clock_info caps;
- struct sk_buff_head tx_queue;
- struct sk_buff_head rx_queue;
- /* used to access the PTP registers atomic */
- struct mutex ptp_lock;
- int hwts_tx;
- int hwts_rx;
- u32 tx_delay;
- u32 rx_delay;
- struct timespec64 extts_ts;
- int extts_index;
- bool extts;
-};
-
static const
struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
{
@@ -1218,12 +1201,25 @@ static int nxp_c45_start_op(struct phy_device *phydev)
static int nxp_c45_config_intr(struct phy_device *phydev)
{
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ int ret;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
+ if (ret)
+ return ret;
+
return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
- else
- return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
- VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
+ }
+
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
+ if (ret)
+ return ret;
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
}
static int tja1103_config_intr(struct phy_device *phydev)
@@ -1289,6 +1285,7 @@ static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
}
data->nmi_handler(phydev, &ret);
+ nxp_c45_handle_macsec_interrupt(phydev, &ret);
return ret;
}
@@ -1614,6 +1611,7 @@ static int nxp_c45_config_init(struct phy_device *phydev)
nxp_c45_counters_enable(phydev);
nxp_c45_ptp_init(phydev);
+ nxp_c45_macsec_config_init(phydev);
return nxp_c45_start_op(phydev);
}
@@ -1629,7 +1627,9 @@ static int nxp_c45_get_features(struct phy_device *phydev)
static int nxp_c45_probe(struct phy_device *phydev)
{
struct nxp_c45_phy *priv;
- int ptp_ability;
+ bool macsec_ability;
+ int phy_abilities;
+ bool ptp_ability;
int ret = 0;
priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
@@ -1645,9 +1645,9 @@ static int nxp_c45_probe(struct phy_device *phydev)
mutex_init(&priv->ptp_lock);
- ptp_ability = phy_read_mmd(phydev, MDIO_MMD_VEND1,
- VEND1_PORT_ABILITIES);
- ptp_ability = !!(ptp_ability & PTP_ABILITY);
+ phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PORT_ABILITIES);
+ ptp_ability = !!(phy_abilities & PTP_ABILITY);
if (!ptp_ability) {
phydev_dbg(phydev, "the phy does not support PTP");
goto no_ptp_support;
@@ -1666,6 +1666,20 @@ static int nxp_c45_probe(struct phy_device *phydev)
}
no_ptp_support:
+ macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
+ if (!macsec_ability) {
+ phydev_info(phydev, "the phy does not support MACsec\n");
+ goto no_macsec_support;
+ }
+
+ if (IS_ENABLED(CONFIG_MACSEC)) {
+ ret = nxp_c45_macsec_probe(phydev);
+ phydev_dbg(phydev, "MACsec support enabled.");
+ } else {
+ phydev_dbg(phydev, "MACsec support not enabled even if the phy supports it");
+ }
+
+no_macsec_support:
return ret;
}
diff --git a/drivers/net/phy/nxp-c45-tja11xx.h b/drivers/net/phy/nxp-c45-tja11xx.h
new file mode 100644
index 000000000000..905c5afb0a5e
--- /dev/null
+++ b/drivers/net/phy/nxp-c45-tja11xx.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* NXP C45 PHY driver header file
+ * Copyright 2023 NXP
+ * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
+ */
+
+#include <linux/ptp_clock_kernel.h>
+
+#define VEND1_PORT_FUNC_ENABLES 0x8048
+
+struct nxp_c45_macsec;
+
+struct nxp_c45_phy {
+ const struct nxp_c45_phy_data *phy_data;
+ struct phy_device *phydev;
+ struct mii_timestamper mii_ts;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info caps;
+ struct sk_buff_head tx_queue;
+ struct sk_buff_head rx_queue;
+ /* used to access the PTP registers atomic */
+ struct mutex ptp_lock;
+ int hwts_tx;
+ int hwts_rx;
+ u32 tx_delay;
+ u32 rx_delay;
+ struct timespec64 extts_ts;
+ int extts_index;
+ bool extts;
+ struct nxp_c45_macsec *macsec;
+};
+
+#if IS_ENABLED(CONFIG_MACSEC)
+void nxp_c45_macsec_config_init(struct phy_device *phydev);
+void nxp_c45_handle_macsec_interrupt(struct phy_device *phydev,
+ irqreturn_t *ret);
+int nxp_c45_macsec_probe(struct phy_device *phydev);
+#else
+static inline
+void nxp_c45_macsec_config_init(struct phy_device *phydev)
+{
+}
+
+static inline
+void nxp_c45_handle_macsec_interrupt(struct phy_device *phydev,
+ irqreturn_t *ret)
+{
+}
+
+static inline
+int nxp_c45_macsec_probe(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif
--
2.34.1
On Thu, Aug 24, 2023 at 12:16:13PM +0300, Radu Pirea (NXP OSS) wrote: > Add MACsec support. > The MACsec block has four TX SCs and four RX SCs. The driver supports up > to four SecY. Each SecY with one TX SC and one RX SC. > The RX SCs can have two keys, key A and key B, written in hardware and > enabled at the same time. > The TX SCs can have two keys written in hardware, but only one can be > active at a given time. > On TX, the SC is selected using the MAC source address. Due of this > selection mechanism, each offloaded netdev must have a unique MAC > address. > On RX, the SC is selected by SCI(found in SecTAG or calculated using MAC > SA), or using RX SC 0 as implicit. > > Signed-off-by: Radu Pirea (NXP OSS) <radu-nicolae.pirea@oss.nxp.com> ... > diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile > index c945ed9bd14b..ee53e2fdb968 100644 > --- a/drivers/net/phy/Makefile > +++ b/drivers/net/phy/Makefile > @@ -83,6 +83,10 @@ obj-$(CONFIG_MICROSEMI_PHY) += mscc/ > obj-$(CONFIG_MOTORCOMM_PHY) += motorcomm.o > obj-$(CONFIG_NATIONAL_PHY) += national.o > obj-$(CONFIG_NCN26000_PHY) += ncn26000.o > +nxp-c45-tja11xx-objs += nxp-c45-tja11xx.o Hi Radu, The coincidence of "nxp-c45-tja11x" on both sides of the "+=" operator seems to cause a build failure (for x86_64 allmodconfig with gcc-13). Circular drivers/net/phy/nxp-c45-tja11xx.o <- drivers/net/phy/nxp-c45-tja11xx.o dependency dropped. > +ifdef CONFIG_MACSEC > +nxp-c45-tja11xx-objs += nxp-c45-tja11xx-macsec.o > +endif > obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja11xx.o > obj-$(CONFIG_NXP_CBTX_PHY) += nxp-cbtx.o > obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o ...
[Some of the questions I'm asking are probably dumb since I don't know
anything about phy drivers. Sorry if that's the case.]
General code organization nit: I think it would be easier to review
the code if helpers functions were grouped by the type of object they
work on. All the RXSA-related functions together, all the TXSA
functions together, same for RXSC and then TXSC/SecY. Right now I see
some RXSA functions in a group of TXSA functions, another in the
middle of a group of RXSC functions. It makes navigating through the
code a bit less convenient.
Another nit: for consistency, it would be nice to stick to either
"tx_sa" or "txsa" (same for rxsa and rxsc) in function names.
2023-08-24, 12:16:13 +0300, Radu Pirea (NXP OSS) wrote:
> +static int nxp_c45_macsec_write(struct phy_device *phydev, u16 reg, u32 val)
> +{
> + WARN_ON_ONCE(reg % 4);
> +
> + reg = reg / 2;
> + phy_write_mmd(phydev, MDIO_MMD_VEND2,
> + VEND1_MACSEC_BASE + reg, val);
> + phy_write_mmd(phydev, MDIO_MMD_VEND2,
> + VEND1_MACSEC_BASE + reg + 1, val >> 16);
Can these calls fail? ie, do you need to handle errors like in
nxp_c45_macsec_read (and then in callers of nxp_c45_macsec_write)?
I see that no caller of nxp_c45_macsec_read actually checks the return
value, so maybe those errors don't matter.
[...]
> +void nxp_c45_macsec_config_init(struct phy_device *phydev)
> +{
> + if (!phydev->macsec_ops)
> + return;
> +
> + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
> + MACSEC_EN | ADAPTER_EN);
The calls to phy_set_bits_mmd() in nxp_c45_config_intr() have error
handling. Does this need error handling as well?
[...]
> +static bool nxp_c45_port_valid(struct nxp_c45_secy *phy_secy, u16 port)
> +{
> + if (phy_secy->secy->tx_sc.end_station &&
> + __be16_to_cpu((__force __be16)port) != 1)
> + return false;
> +
> + return true;
> +}
> +
> +static bool nxp_c45_rx_sc_valid(struct nxp_c45_secy *phy_secy,
> + struct macsec_rx_sc *rx_sc)
> +{
> + u16 port = (__force u64)rx_sc->sci >> (ETH_ALEN * 8);
u64 sci = be64_to_cpu((__force __be64)rx_sc->sci);
u16 port = (u16)sci;
And then drop the __be16_to_cpu conversion from nxp_c45_port_valid
> +
> + if (phy_secy->point_to_point && phy_secy->secy_id != 0)
> + return false;
> +
> + return nxp_c45_port_valid(phy_secy, port);
> +}
> +
> +static bool nxp_c45_secy_cfg_valid(struct nxp_c45_secy *phy_secy, bool can_ptp)
> +{
> + u16 port = (__force u64)phy_secy->secy->sci >> (ETH_ALEN * 8);
u64 sci = be64_to_cpu((__force __be64)rx_sc->sci);
u16 port = (u16)sci;
> + if (phy_secy->secy->tx_sc.scb)
> + return false;
[...]
> +static int nxp_c45_update_tx_sc_secy_cfg(struct phy_device *phydev,
> + struct nxp_c45_secy *phy_secy)
> +{
[...]
> + phydev_dbg(phydev, "scb %s\n",
> + phy_secy->secy->tx_sc.scb ? "on" : "off");
> + if (phy_secy->secy->tx_sc.scb)
> + cfg |= MACSEC_TXSC_CFG_SCI;
> + else
> + cfg &= ~MACSEC_TXSC_CFG_SCI;
Should that be called MACSEC_TXSC_CFG_SCB? I had to check that it
wasn't using the wrong constant, using "SCI" for "SCB" (when SCI is
already a well-defined thing in macsec) confused me.
> +
> + nxp_c45_macsec_write(phydev, MACSEC_TXSC_CFG, cfg);
> +
> + return 0;
> +}
> +
[...]
> +static int nxp_c45_set_rxsa_key(struct macsec_context *ctx, bool key_a)
> +{
> + u32 *salt = (u32 *)ctx->sa.rx_sa->key.salt.bytes;
> + const struct nxp_c45_macsec_sa_regs *sa_regs;
> + u32 ssci = (__force u32)ctx->sa.rx_sa->ssci;
> + u32 key_size = ctx->secy->key_len / 4;
> + u32 salt_size = MACSEC_SALT_LEN / 4;
> + u32 *key = (u32 *)ctx->sa.key;
> + u32 reg;
> + int i;
> +
> + sa_regs = nxp_c45_get_macsec_sa_regs(key_a);
> +
> + for (i = 0; i < key_size; i++) {
> + reg = sa_regs->rxsa_ka + i * 4;
> + nxp_c45_macsec_write(ctx->phydev, reg,
> + (__force u32)cpu_to_be32(key[i]));
> + }
> +
> + if (ctx->secy->xpn) {
> + for (i = 0; i < salt_size; i++) {
> + reg = sa_regs->rxsa_salt + (2 - i) * 4;
> + nxp_c45_macsec_write(ctx->phydev, reg,
> + (__force u32)cpu_to_be32(salt[i]));
> + }
> + nxp_c45_macsec_write(ctx->phydev, sa_regs->rxsa_ssci,
> + (__force u32)cpu_to_be32(ssci));
> + }
This looks basically identical to nxp_c45_txsa_set_key except for the
registers it writes to. It could be turned into 2 or 3 small helpers
(one for the key, then salt and ssci).
> +
> + nxp_c45_set_rxsa_key_cfg(ctx, key_a, false);
> +
> + return 0;
> +}
[...]
> +static int nxp_c45_mdo_add_secy(struct macsec_context *ctx)
> +{
> + struct phy_device *phydev = ctx->phydev;
> + struct nxp_c45_phy *priv = phydev->priv;
> + u64 sci = (__force u64)ctx->secy->sci;
> + struct nxp_c45_secy *phy_secy;
> + bool can_ptp;
> + int idx;
> + u32 reg;
> +
> + phydev_dbg(ctx->phydev, "add secy SCI %llu\n", ctx->secy->sci);
nit: %016llx feels more natural for SCIs since they can be broken down
into address+port.
And since it's stored in network byte order, you'll want to convert it
via be64_to_cpu before you print it out.
I'd suggest doing that directly into the sci variable:
u64 sci = be64_to_cpu((__force __be64)ctx->secy->sci);
and then adapt the uses of sci further down.
Feel free to move the sci_to_cpu function from
drivers/net/netdevsim/macsec.c to include/net/macsec.h and reuse it.
[...]
> +static int nxp_c45_mdo_upd_secy(struct macsec_context *ctx)
> +{
[...]
> + if (phy_secy->enabled_an != ctx->secy->tx_sc.encoding_sa) {
> + old_tx_sa = phy_secy->tx_sa[phy_secy->enabled_an];
> + phy_secy->enabled_an = ctx->secy->tx_sc.encoding_sa;
> + new_tx_sa = phy_secy->tx_sa[phy_secy->enabled_an];
> + if (!new_tx_sa) {
> + nxp_c45_tx_sa_disable(phydev, phy_secy);
> + goto disable_old_tx_sa;
> + }
> +
> + if (!new_tx_sa->tx_sa->active) {
> + nxp_c45_tx_sa_disable(phydev, phy_secy);
> + goto disable_old_tx_sa;
> + }
You can combine those two conditions into
if (!new_tx_sa || !new_tx_sa->tx_sa->active) {
nxp_c45_tx_sa_disable(phydev, phy_secy);
goto disable_old_tx_sa;
}
> +
> + new_tx_sa->is_key_a = phy_secy->tx_sa_key_a;
> + phy_secy->tx_sa_key_a = phy_secy->tx_sa_key_a;
Is this missing a ! on the right side?
Maybe worth creating a "next_sa_key_id" helper (or something like
that) that returns the current value and updates tx_sa_key_a, since
you use this pattern a few times.
[...]
> +static int nxp_c45_mdo_add_rxsc(struct macsec_context *ctx)
> +{
> + struct phy_device *phydev = ctx->phydev;
> + struct nxp_c45_phy *priv = phydev->priv;
> + struct nxp_c45_secy *phy_secy;
> + struct nxp_c45_rx_sc *rx_sc;
> +
> + phydev_dbg(phydev, "add RX SC %s\n",
> + ctx->rx_sc->active ? "enabled" : "disabled");
If the HW/driver supports multiple TXSC/RXSC on the same device, it
would probably be helpful to add their SCIs to this debug message (and
the update/delete ones, also for the mdo_*_rxsa and mdo_*_txsa
functions).
[...]
> +static int nxp_c45_mdo_add_rxsa(struct macsec_context *ctx)
> +{
[...]
> + if (!rx_sc->rx_sa_b) {
> + phydev_dbg(phydev, "add RX SA B %u %s\n",
> + an, rx_sa->active ? "enabled" : "disabled");
> + nxp_c45_set_rxsa_key(ctx, false);
> + rx_sc->rx_sa_b = rx_sa;
> + return 0;
> + }
> +
> + return -ENOMEM;
maybe -ENOSPC would fit better?
> +}
> +
[...]
> +static int nxp_c45_mdo_add_txsa(struct macsec_context *ctx)
> +{
[...]
> + if (phy_secy->tx_sa[sa])
> + return -EBUSY;
> +
> + tx_sa = kzalloc(sizeof(*tx_sa), GFP_KERNEL);
missing NULL check
[...]
> +static int nxp_c45_mdo_del_txsa(struct macsec_context *ctx)
> +{
[...]
> +
> + phy_secy->tx_sa[sa] = NULL;
> + kfree(tx_sa);
tx_sa contains the key, so this needs to be kfree_sensitive, or add a
memzero_explicit(tx_sa->key) before freeing. Or if possible, don't
copy the key at all into tx_sa.
similar changes in the mscc driver:
1b16b3fdf675 ("net: phy: mscc: macsec: clear encryption keys when freeing a flow")
0dc33c65835d ("net: phy: mscc: macsec: do not copy encryption keys")
[...]
> diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
> index 7ab080ff02df..5bf7caa4e63d 100644
> --- a/drivers/net/phy/nxp-c45-tja11xx.c
> +++ b/drivers/net/phy/nxp-c45-tja11xx.c
[...]
> @@ -1218,12 +1201,25 @@ static int nxp_c45_start_op(struct phy_device *phydev)
>
> static int nxp_c45_config_intr(struct phy_device *phydev)
> {
> - if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
> + int ret;
> +
> + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
> + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
> + VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
> + if (ret)
> + return ret;
> +
> return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
> VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
Maybe a dumb question: should we be clearing the MACSEC_IRQS bits when
this 2nd call to phy_set_bits_mmd fails? (and same below, reset when
the 2nd clear fails)
> - else
> - return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
> - VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
> + }
> +
> + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
> + VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
> + if (ret)
> + return ret;
> +
> + return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
> + VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
> }
[...]
> @@ -1666,6 +1666,20 @@ static int nxp_c45_probe(struct phy_device *phydev)
> }
>
> no_ptp_support:
> + macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
> + if (!macsec_ability) {
> + phydev_info(phydev, "the phy does not support MACsec\n");
> + goto no_macsec_support;
> + }
> +
> + if (IS_ENABLED(CONFIG_MACSEC)) {
> + ret = nxp_c45_macsec_probe(phydev);
I don't know how this probing is handled so maybe another dumb
question: if that fails, are we going to leak resources allocated
earlier? (devm_kzalloc for example)
> + phydev_dbg(phydev, "MACsec support enabled.");
> + } else {
> + phydev_dbg(phydev, "MACsec support not enabled even if the phy supports it");
> + }
> +
> +no_macsec_support:
>
> return ret;
> }
--
Sabrina
On Fri, Aug 25, 2023 at 02:52:57PM +0200, Sabrina Dubroca wrote:
> [Some of the questions I'm asking are probably dumb since I don't know
> anything about phy drivers. Sorry if that's the case.]
>
> General code organization nit: I think it would be easier to review
> the code if helpers functions were grouped by the type of object they
> work on. All the RXSA-related functions together, all the TXSA
> functions together, same for RXSC and then TXSC/SecY. Right now I see
> some RXSA functions in a group of TXSA functions, another in the
> middle of a group of RXSC functions. It makes navigating through the
> code a bit less convenient.
For networking, and Linux in general, forward declarations are not
liked. Functions should appear before they are used. That places a bit
of restrictions on ordering, but in general you can still group code
in meaningful ways.
> 2023-08-24, 12:16:13 +0300, Radu Pirea (NXP OSS) wrote:
> > +static int nxp_c45_macsec_write(struct phy_device *phydev, u16 reg, u32 val)
> > +{
> > + WARN_ON_ONCE(reg % 4);
> > +
> > + reg = reg / 2;
> > + phy_write_mmd(phydev, MDIO_MMD_VEND2,
> > + VEND1_MACSEC_BASE + reg, val);
> > + phy_write_mmd(phydev, MDIO_MMD_VEND2,
> > + VEND1_MACSEC_BASE + reg + 1, val >> 16);
>
> Can these calls fail? ie, do you need to handle errors like in
> nxp_c45_macsec_read (and then in callers of nxp_c45_macsec_write)?
Access to PHY devices can fail, but if it does, such failures are
generally fatal and there is no real recovery, also the next read/
write is also likely to fail. So we do recommend checking return codes
and just return the error up the stack. That failure might get trapped
up the stack, and turned into a phy_error() call which will disable
the PHY.
> > +static bool nxp_c45_rx_sc_valid(struct nxp_c45_secy *phy_secy,
> > + struct macsec_rx_sc *rx_sc)
> > +{
> > + u16 port = (__force u64)rx_sc->sci >> (ETH_ALEN * 8);
>
> u64 sci = be64_to_cpu((__force __be64)rx_sc->sci);
why is the __force needed? What happens with a normal cast?
Andrew
2023-08-25, 15:29:30 +0200, Andrew Lunn wrote:
> On Fri, Aug 25, 2023 at 02:52:57PM +0200, Sabrina Dubroca wrote:
> > 2023-08-24, 12:16:13 +0300, Radu Pirea (NXP OSS) wrote:
> > > +static int nxp_c45_macsec_write(struct phy_device *phydev, u16 reg, u32 val)
> > > +{
> > > + WARN_ON_ONCE(reg % 4);
> > > +
> > > + reg = reg / 2;
> > > + phy_write_mmd(phydev, MDIO_MMD_VEND2,
> > > + VEND1_MACSEC_BASE + reg, val);
> > > + phy_write_mmd(phydev, MDIO_MMD_VEND2,
> > > + VEND1_MACSEC_BASE + reg + 1, val >> 16);
> >
> > Can these calls fail? ie, do you need to handle errors like in
> > nxp_c45_macsec_read (and then in callers of nxp_c45_macsec_write)?
>
> Access to PHY devices can fail, but if it does, such failures are
> generally fatal and there is no real recovery, also the next read/
> write is also likely to fail. So we do recommend checking return codes
> and just return the error up the stack. That failure might get trapped
> up the stack, and turned into a phy_error() call which will disable
> the PHY.
Ok, thanks. A lot of the calls to nxp_c45_macsec_write come from the
core macsec code (via mdo_*), so at least this part of the stack isn't
going to catch them. Either these errors can be caught directly in the
driver, or we'll have to ignore them (once we return from the driver
to the macsec core, we can't know if the error was fatal so we have to
assume it's not). And phy_error's doc says it can't be called under
phydev->lock, which we're holding in all those mdo_* functions (called
from macsec_offload()).
--
Sabrina
On 25.08.2023 16:29, Andrew Lunn wrote:
>>> +static bool nxp_c45_rx_sc_valid(struct nxp_c45_secy *phy_secy,
>>> + struct macsec_rx_sc *rx_sc)
>>> +{
>>> + u16 port = (__force u64)rx_sc->sci >> (ETH_ALEN * 8);
>>
>> u64 sci = be64_to_cpu((__force __be64)rx_sc->sci);
>
> why is the __force needed? What happens with a normal cast?
>
Sparse will print warnings if __force is missing.
> Andrew
>
--
Radu P.
> > > > +static bool nxp_c45_rx_sc_valid(struct nxp_c45_secy *phy_secy,
> > > > + struct macsec_rx_sc *rx_sc)
> > > > +{
> > > > + u16 port = (__force u64)rx_sc->sci >> (ETH_ALEN * 8);
> > >
> > > u64 sci = be64_to_cpu((__force __be64)rx_sc->sci);
> >
> > why is the __force needed? What happens with a normal cast?
> >
>
> Sparse will print warnings if __force is missing.
What is the warning? I just want to make sure __force is the correct
solution, not that something has the wrong type and we should be
fixing a design issue.
Andrew
On Fri, Aug 25, 2023 at 03:50:06PM +0200, Andrew Lunn wrote:
> > > > > +static bool nxp_c45_rx_sc_valid(struct nxp_c45_secy *phy_secy,
> > > > > + struct macsec_rx_sc *rx_sc)
> > > > > +{
> > > > > + u16 port = (__force u64)rx_sc->sci >> (ETH_ALEN * 8);
> > > >
> > > > u64 sci = be64_to_cpu((__force __be64)rx_sc->sci);
> > >
> > > why is the __force needed? What happens with a normal cast?
> > >
> >
> > Sparse will print warnings if __force is missing.
>
> What is the warning? I just want to make sure __force is the correct
> solution, not that something has the wrong type and we should be
> fixing a design issue.
Hi Andrew,
rx_sc->sci is sci_t, which is defined as:
typedef u64 __bitwise sci_t;
Sparse documentation (Documentation/dev-tools/sparse.rst) states that:
"__bitwise" is a type attribute, so you have to do something like this::
...
which makes PM_SUSPEND and PM_RESUME "bitwise" integers (the "__force"
is there because sparse will complain about casting to/from a bitwise
type, but in this case we really _do_ want to force the conversion).
So basically, sci is a bitwise type, which means sparse gives it
special properties that ensures it can only be operated on using
similarly typed integers.
So, those __force casts are needed to convert sci to something else.
--
RMK's Patch system: https://www.armlinux.org.uk/developer/patches/
FTTP is here! 80Mbps down 10Mbps up. Decent connectivity at last!
On 25.08.2023 16:50, Andrew Lunn wrote:
>>>>> +static bool nxp_c45_rx_sc_valid(struct nxp_c45_secy *phy_secy,
>>>>> + struct macsec_rx_sc *rx_sc)
>>>>> +{
>>>>> + u16 port = (__force u64)rx_sc->sci >> (ETH_ALEN * 8);
>>>>
>>>> u64 sci = be64_to_cpu((__force __be64)rx_sc->sci);
>>>
>>> why is the __force needed? What happens with a normal cast?
>>>
>>
>> Sparse will print warnings if __force is missing.
>
> What is the warning? I just want to make sure __force is the correct
> solution, not that something has the wrong type and we should be
> fixing a design issue.
Let's consider the following example:
Function declaration:
static int nxp_c45_macsec_write(struct phy_device *phydev, u16 reg,
u32 val)
Call without __force:
nxp_c45_macsec_write(ctx->phydev, sa_regs->txsa_ssci,
(u32)cpu_to_be32(ssci));
Warning:
drivers/net/phy/nxp-c45-tja11xx-macsec.c:803:39: warning: cast from
restricted __be32
Even if I will write another function that takes an __be32 as parameter,
I will need to silent sparse for phy_write_mmd calls.
And in the following example will cry because of sci_t to __be64 conversion:
u64 sci = be64_to_cpu((__force __be64)rx_sc->sci);
> Andrew
>
--
Radu P.
© 2016 - 2025 Red Hat, Inc.