Add support for the Ethernet Media Access Controller found in the J-Core
family of SoCs.
Signed-off-by: Artur Rojek <contact@artur-rojek.eu>
---
drivers/net/ethernet/Kconfig | 12 +
drivers/net/ethernet/Makefile | 1 +
drivers/net/ethernet/jcore_emac.c | 391 ++++++++++++++++++++++++++++++
3 files changed, 404 insertions(+)
create mode 100644 drivers/net/ethernet/jcore_emac.c
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index f86d4557d8d7..0d55d8794f47 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -159,6 +159,18 @@ config ETHOC
help
Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
+config JCORE_EMAC
+ tristate "J-Core Ethernet MAC support"
+ depends on CPU_J2 || COMPILE_TEST
+ depends on HAS_IOMEM
+ select REGMAP_MMIO
+ help
+ This enables support for the Ethernet Media Access Controller found
+ in the J-Core family of SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called jcore_emac.
+
config OA_TC6
tristate "OPEN Alliance TC6 10BASE-T1x MAC-PHY support" if COMPILE_TEST
depends on SPI
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 67182339469a..e1e03a1d47a6 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/
obj-$(CONFIG_NET_VENDOR_MICROSOFT) += microsoft/
obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/
+obj-$(CONFIG_JCORE_EMAC) += jcore_emac.o
obj-$(CONFIG_JME) += jme.o
obj-$(CONFIG_KORINA) += korina.o
obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
diff --git a/drivers/net/ethernet/jcore_emac.c b/drivers/net/ethernet/jcore_emac.c
new file mode 100644
index 000000000000..fbfac4b16d6d
--- /dev/null
+++ b/drivers/net/ethernet/jcore_emac.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ethernet MAC driver for the J-Core family of SoCs.
+ * Based on SEI MAC driver by Oleksandr G Zhadan / Smart Energy Instruments Inc.
+ * Copyright (c) 2025 Artur Rojek <contact@artur-rojek.eu>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define JCORE_EMAC_CONTROL 0x0
+#define JCORE_EMAC_TX_LEN 0x4
+#define JCORE_EMAC_MACL 0x8
+#define JCORE_EMAC_MACH 0xc
+#define JCORE_EMAC_MCAST_MASK(n) (0x60 + ((n) * 4))
+#define JCORE_EMAC_RX_BUF 0x1000
+#define JCORE_EMAC_TX_BUF 0x1800
+
+#define JCORE_EMAC_ENABLE_RX BIT(1)
+#define JCORE_EMAC_BUSY BIT(2)
+#define JCORE_EMAC_MCAST BIT(3)
+#define JCORE_EMAC_READ BIT(4)
+#define JCORE_EMAC_ENABLE_INT_RX BIT(5)
+#define JCORE_EMAC_ENABLE_INT_TX BIT(6)
+#define JCORE_EMAC_PROMISC BIT(7)
+#define JCORE_EMAC_COMPLETE BIT(8)
+#define JCORE_EMAC_CRC_ERR BIT(9)
+#define JCORE_EMAC_PKT_LEN GENMASK(26, 16)
+
+#define JCORE_EMAC_RX_BUFFERS 4
+#define JCORE_EMAC_TX_TIMEOUT (2 * USEC_PER_SEC)
+#define JCORE_EMAC_MCAST_ADDRS 4
+
+struct jcore_emac {
+ void __iomem *base;
+ struct regmap *map;
+ struct net_device *ndev;
+ struct {
+ struct u64_stats_sync syncp;
+ u64 rx_packets;
+ u64 tx_packets;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ u64 rx_dropped;
+ u64 rx_errors;
+ u64 rx_crc_errors;
+ } stats;
+};
+
+static irqreturn_t jcore_emac_irq(int irq, void *data)
+{
+ struct jcore_emac *priv = data;
+ struct net_device *ndev = priv->ndev;
+ struct sk_buff *skb;
+ struct {
+ int packets;
+ int bytes;
+ int dropped;
+ int crc_errors;
+ } stats = {};
+ unsigned int status, pkt_len, i;
+
+ for (i = 0; i < JCORE_EMAC_RX_BUFFERS; i++) {
+ regmap_read(priv->map, JCORE_EMAC_CONTROL, &status);
+
+ if (!(status & JCORE_EMAC_COMPLETE))
+ break;
+
+ /* Handle the next RX ping-pong buffer. */
+ if (status & JCORE_EMAC_CRC_ERR) {
+ stats.dropped++;
+ stats.crc_errors++;
+ goto next;
+ }
+
+ skb = netdev_alloc_skb_ip_align(ndev, ndev->mtu +
+ ETH_HLEN + ETH_FCS_LEN);
+ if (!skb) {
+ stats.dropped++;
+ goto next;
+ }
+
+ pkt_len = FIELD_GET(JCORE_EMAC_PKT_LEN, status);
+ skb_put(skb, pkt_len);
+
+ memcpy_fromio(skb->data, priv->base + JCORE_EMAC_RX_BUF,
+ pkt_len);
+ skb->dev = ndev;
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ stats.packets++;
+ stats.bytes += pkt_len;
+
+ netif_rx(skb);
+
+next:
+ regmap_set_bits(priv->map, JCORE_EMAC_CONTROL, JCORE_EMAC_READ);
+ }
+
+ u64_stats_update_begin(&priv->stats.syncp);
+ priv->stats.rx_packets += stats.packets;
+ priv->stats.rx_bytes += stats.bytes;
+ priv->stats.rx_dropped += stats.dropped;
+ priv->stats.rx_crc_errors += stats.crc_errors;
+ priv->stats.rx_errors += stats.crc_errors;
+ u64_stats_update_end(&priv->stats.syncp);
+
+ return IRQ_HANDLED;
+}
+
+static int jcore_emac_wait(struct jcore_emac *priv)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout(priv->map, JCORE_EMAC_CONTROL, val,
+ !(val & JCORE_EMAC_BUSY),
+ 100, JCORE_EMAC_TX_TIMEOUT);
+}
+
+static void jcore_emac_reset(struct jcore_emac *priv)
+{
+ regmap_write(priv->map, JCORE_EMAC_CONTROL, 0);
+ usleep_range(10, 20);
+}
+
+static int jcore_emac_open(struct net_device *ndev)
+{
+ struct jcore_emac *priv = netdev_priv(ndev);
+
+ if (jcore_emac_wait(priv))
+ return -ETIMEDOUT;
+
+ jcore_emac_reset(priv);
+ regmap_set_bits(priv->map, JCORE_EMAC_CONTROL,
+ JCORE_EMAC_ENABLE_RX | JCORE_EMAC_ENABLE_INT_RX |
+ JCORE_EMAC_READ);
+ regmap_clear_bits(priv->map, JCORE_EMAC_CONTROL, JCORE_EMAC_BUSY);
+
+ netif_start_queue(ndev);
+ netif_carrier_on(ndev);
+
+ return 0;
+}
+
+static int jcore_emac_close(struct net_device *ndev)
+{
+ struct jcore_emac *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+
+ if (jcore_emac_wait(priv))
+ return -ETIMEDOUT;
+
+ jcore_emac_reset(priv);
+
+ return 0;
+}
+
+static int jcore_emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct jcore_emac *priv = netdev_priv(ndev);
+ unsigned int tx_len;
+
+ if (jcore_emac_wait(priv))
+ return NETDEV_TX_BUSY;
+
+ memcpy_toio(priv->base + JCORE_EMAC_TX_BUF, skb->data, skb->len);
+
+ tx_len = max(skb->len, 60);
+ regmap_write(priv->map, JCORE_EMAC_TX_LEN, tx_len);
+ regmap_set_bits(priv->map, JCORE_EMAC_CONTROL, JCORE_EMAC_BUSY);
+
+ u64_stats_update_begin(&priv->stats.syncp);
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += skb->len;
+ u64_stats_update_end(&priv->stats.syncp);
+
+ consume_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static void jcore_emac_set_rx_mode(struct net_device *ndev)
+{
+ struct jcore_emac *priv = netdev_priv(ndev);
+ struct netdev_hw_addr *ha;
+ unsigned int reg, i, idx = 0, set_mask = 0, clear_mask = 0, addr = 0;
+
+ if (ndev->flags & IFF_PROMISC)
+ set_mask |= JCORE_EMAC_PROMISC;
+ else
+ clear_mask |= JCORE_EMAC_PROMISC;
+
+ if (ndev->flags & IFF_ALLMULTI)
+ set_mask |= JCORE_EMAC_MCAST;
+ else
+ clear_mask |= JCORE_EMAC_MCAST;
+
+ regmap_update_bits(priv->map, JCORE_EMAC_CONTROL, set_mask | clear_mask,
+ set_mask);
+
+ if (!(ndev->flags & IFF_MULTICAST))
+ return;
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ /* Only the first 3 octets are used in a hardware mcast mask. */
+ memcpy(&addr, ha->addr, 3);
+
+ for (i = 0; i < idx; i++) {
+ regmap_read(priv->map, JCORE_EMAC_MCAST_MASK(i), ®);
+ if (reg == addr)
+ goto next_ha;
+ }
+
+ regmap_write(priv->map, JCORE_EMAC_MCAST_MASK(idx), addr);
+ if (++idx >= JCORE_EMAC_MCAST_ADDRS) {
+ netdev_warn(ndev, "Multicast list limit reached\n");
+ break;
+ }
+next_ha:
+ }
+
+ /* Clear the remaining mask entries. */
+ for (i = idx; i < JCORE_EMAC_MCAST_ADDRS; i++)
+ regmap_write(priv->map, JCORE_EMAC_MCAST_MASK(i), 0);
+}
+
+static void jcore_emac_read_hw_addr(struct jcore_emac *priv, u8 *addr)
+{
+ unsigned int val;
+
+ regmap_read(priv->map, JCORE_EMAC_MACL, &val);
+ addr[5] = val;
+ addr[4] = val >> 8;
+ addr[3] = val >> 16;
+ addr[2] = val >> 24;
+ regmap_read(priv->map, JCORE_EMAC_MACH, &val);
+ addr[1] = val;
+ addr[0] = val >> 8;
+}
+
+static void jcore_emac_write_hw_addr(struct jcore_emac *priv, u8 *addr)
+{
+ unsigned int val;
+
+ val = addr[0] << 8 | addr[1];
+ regmap_write(priv->map, JCORE_EMAC_MACH, val);
+
+ val = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
+ regmap_write(priv->map, JCORE_EMAC_MACL, val);
+}
+
+static int jcore_emac_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct jcore_emac *priv = netdev_priv(ndev);
+ struct sockaddr *sa = addr;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(ndev, addr);
+ if (ret)
+ return ret;
+
+ jcore_emac_write_hw_addr(priv, sa->sa_data);
+ eth_hw_addr_set(ndev, sa->sa_data);
+
+ return 0;
+}
+
+static void jcore_emac_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct jcore_emac *priv = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&priv->stats.syncp);
+ stats->rx_packets = priv->stats.rx_packets;
+ stats->tx_packets = priv->stats.tx_packets;
+ stats->rx_bytes = priv->stats.rx_bytes;
+ stats->tx_bytes = priv->stats.tx_bytes;
+ stats->rx_dropped = priv->stats.rx_dropped;
+ stats->rx_errors = priv->stats.rx_errors;
+ stats->rx_crc_errors = priv->stats.rx_crc_errors;
+ } while (u64_stats_fetch_retry(&priv->stats.syncp, start));
+}
+
+static const struct net_device_ops jcore_emac_netdev_ops = {
+ .ndo_open = jcore_emac_open,
+ .ndo_stop = jcore_emac_close,
+ .ndo_start_xmit = jcore_emac_start_xmit,
+ .ndo_set_rx_mode = jcore_emac_set_rx_mode,
+ .ndo_set_mac_address = jcore_emac_set_mac_address,
+ .ndo_get_stats64 = jcore_emac_get_stats64,
+};
+
+static const struct regmap_config jcore_emac_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = JCORE_EMAC_MCAST_MASK(3),
+ .fast_io = true, /* Force spinlock for JCORE_EMAC_CONTROL ISR access. */
+};
+
+static int jcore_emac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct jcore_emac *priv;
+ struct net_device *ndev;
+ u8 mac[ETH_ALEN];
+ unsigned int i;
+ int irq, ret;
+
+ ndev = devm_alloc_etherdev(dev, sizeof(*priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, dev);
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->map = devm_regmap_init_mmio(dev, priv->base,
+ &jcore_emac_regmap_cfg);
+ if (IS_ERR(priv->map))
+ return PTR_ERR(priv->map);
+
+ platform_set_drvdata(pdev, ndev);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, jcore_emac_irq, 0, dev_name(dev),
+ priv);
+ if (ret < 0)
+ return ret;
+
+ ndev->watchdog_timeo = usecs_to_jiffies(JCORE_EMAC_TX_TIMEOUT);
+ ndev->netdev_ops = &jcore_emac_netdev_ops;
+
+ /* Put hardware into a known state. */
+ jcore_emac_reset(priv);
+ for (i = 0; i < JCORE_EMAC_MCAST_ADDRS; i++)
+ regmap_write(priv->map, JCORE_EMAC_MCAST_MASK(i), 0);
+
+ jcore_emac_read_hw_addr(priv, mac);
+ if (is_zero_ether_addr(mac)) {
+ eth_random_addr(mac);
+ jcore_emac_write_hw_addr(priv, mac);
+ }
+ eth_hw_addr_set(ndev, mac);
+
+ ret = devm_register_netdev(dev, ndev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Unable to register netdev\n");
+
+ return 0;
+}
+
+static const struct of_device_id jcore_emac_of_match[] = {
+ { .compatible = "jcore,emac", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, jcore_emac_of_match);
+
+static struct platform_driver jcore_emac_driver = {
+ .driver = {
+ .name = "jcore-emac",
+ .of_match_table = jcore_emac_of_match,
+ },
+ .probe = jcore_emac_probe,
+};
+
+module_platform_driver(jcore_emac_driver);
+MODULE_DESCRIPTION("Ethernet MAC driver for the J-Core family of SoCs");
+MODULE_AUTHOR("Artur Rojek <contact@artur-rojek.eu>");
+MODULE_LICENSE("GPL");
--
2.50.1
Hi Artur, kernel test robot noticed the following build errors: [auto build test ERROR on robh/for-next] [also build test ERROR on linus/master v6.17-rc2 next-20250819] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Artur-Rojek/dt-bindings-vendor-prefixes-Document-J-Core/20250816-042354 base: https://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git for-next patch link: https://lore.kernel.org/r/20250815194806.1202589-4-contact%40artur-rojek.eu patch subject: [PATCH 3/3] net: j2: Introduce J-Core EMAC config: m68k-randconfig-r113-20250819 (https://download.01.org/0day-ci/archive/20250820/202508200456.GIhKD5qv-lkp@intel.com/config) compiler: m68k-linux-gcc (GCC) 8.5.0 reproduce: (https://download.01.org/0day-ci/archive/20250820/202508200456.GIhKD5qv-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202508200456.GIhKD5qv-lkp@intel.com/ All errors (new ones prefixed by >>): drivers/net/ethernet/jcore_emac.c: In function 'jcore_emac_set_rx_mode': >> drivers/net/ethernet/jcore_emac.c:230:1: error: label at end of compound statement next_ha: ^~~~~~~ vim +230 drivers/net/ethernet/jcore_emac.c 192 193 static void jcore_emac_set_rx_mode(struct net_device *ndev) 194 { 195 struct jcore_emac *priv = netdev_priv(ndev); 196 struct netdev_hw_addr *ha; 197 unsigned int reg, i, idx = 0, set_mask = 0, clear_mask = 0, addr = 0; 198 199 if (ndev->flags & IFF_PROMISC) 200 set_mask |= JCORE_EMAC_PROMISC; 201 else 202 clear_mask |= JCORE_EMAC_PROMISC; 203 204 if (ndev->flags & IFF_ALLMULTI) 205 set_mask |= JCORE_EMAC_MCAST; 206 else 207 clear_mask |= JCORE_EMAC_MCAST; 208 209 regmap_update_bits(priv->map, JCORE_EMAC_CONTROL, set_mask | clear_mask, 210 set_mask); 211 212 if (!(ndev->flags & IFF_MULTICAST)) 213 return; 214 215 netdev_for_each_mc_addr(ha, ndev) { 216 /* Only the first 3 octets are used in a hardware mcast mask. */ 217 memcpy(&addr, ha->addr, 3); 218 219 for (i = 0; i < idx; i++) { 220 regmap_read(priv->map, JCORE_EMAC_MCAST_MASK(i), ®); 221 if (reg == addr) 222 goto next_ha; 223 } 224 225 regmap_write(priv->map, JCORE_EMAC_MCAST_MASK(idx), addr); 226 if (++idx >= JCORE_EMAC_MCAST_ADDRS) { 227 netdev_warn(ndev, "Multicast list limit reached\n"); 228 break; 229 } > 230 next_ha: 231 } 232 233 /* Clear the remaining mask entries. */ 234 for (i = idx; i < JCORE_EMAC_MCAST_ADDRS; i++) 235 regmap_write(priv->map, JCORE_EMAC_MCAST_MASK(i), 0); 236 } 237 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
Hi Artur, kernel test robot noticed the following build warnings: [auto build test WARNING on robh/for-next] [also build test WARNING on linus/master v6.17-rc1 next-20250815] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Artur-Rojek/dt-bindings-vendor-prefixes-Document-J-Core/20250816-042354 base: https://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git for-next patch link: https://lore.kernel.org/r/20250815194806.1202589-4-contact%40artur-rojek.eu patch subject: [PATCH 3/3] net: j2: Introduce J-Core EMAC config: hexagon-allmodconfig (https://download.01.org/0day-ci/archive/20250816/202508161930.ergOga3z-lkp@intel.com/config) compiler: clang version 17.0.6 (https://github.com/llvm/llvm-project 6009708b4367171ccdbf4b5905cb6a803753fe18) reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250816/202508161930.ergOga3z-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202508161930.ergOga3z-lkp@intel.com/ All warnings (new ones prefixed by >>): >> drivers/net/ethernet/jcore_emac.c:231:2: warning: label at end of compound statement is a C2x extension [-Wc2x-extensions] 231 | } | ^ 1 warning generated. vim +231 drivers/net/ethernet/jcore_emac.c 192 193 static void jcore_emac_set_rx_mode(struct net_device *ndev) 194 { 195 struct jcore_emac *priv = netdev_priv(ndev); 196 struct netdev_hw_addr *ha; 197 unsigned int reg, i, idx = 0, set_mask = 0, clear_mask = 0, addr = 0; 198 199 if (ndev->flags & IFF_PROMISC) 200 set_mask |= JCORE_EMAC_PROMISC; 201 else 202 clear_mask |= JCORE_EMAC_PROMISC; 203 204 if (ndev->flags & IFF_ALLMULTI) 205 set_mask |= JCORE_EMAC_MCAST; 206 else 207 clear_mask |= JCORE_EMAC_MCAST; 208 209 regmap_update_bits(priv->map, JCORE_EMAC_CONTROL, set_mask | clear_mask, 210 set_mask); 211 212 if (!(ndev->flags & IFF_MULTICAST)) 213 return; 214 215 netdev_for_each_mc_addr(ha, ndev) { 216 /* Only the first 3 octets are used in a hardware mcast mask. */ 217 memcpy(&addr, ha->addr, 3); 218 219 for (i = 0; i < idx; i++) { 220 regmap_read(priv->map, JCORE_EMAC_MCAST_MASK(i), ®); 221 if (reg == addr) 222 goto next_ha; 223 } 224 225 regmap_write(priv->map, JCORE_EMAC_MCAST_MASK(idx), addr); 226 if (++idx >= JCORE_EMAC_MCAST_ADDRS) { 227 netdev_warn(ndev, "Multicast list limit reached\n"); 228 break; 229 } 230 next_ha: > 231 } 232 233 /* Clear the remaining mask entries. */ 234 for (i = idx; i < JCORE_EMAC_MCAST_ADDRS; i++) 235 regmap_write(priv->map, JCORE_EMAC_MCAST_MASK(i), 0); 236 } 237 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
On Fri, 15 Aug 2025 21:48:06 +0200 Artur Rojek <contact@artur-rojek.eu> wrote: > + struct { > + int packets; > + int bytes; > + int dropped; > + int crc_errors; > + } stats = {}; You don't want signed integer here. Probably u32 or u64.
> +static irqreturn_t jcore_emac_irq(int irq, void *data) > +{ > + struct jcore_emac *priv = data; > + struct net_device *ndev = priv->ndev; > + struct sk_buff *skb; > + struct { > + int packets; > + int bytes; > + int dropped; > + int crc_errors; > + } stats = {}; > + unsigned int status, pkt_len, i; netdev uses 'reverse christmas tree' for local variables. They should be sorted longest to shortest. This sometimes means you need to move assignments into the body of the function, in this case, ndev. > + jcore_emac_read_hw_addr(priv, mac); > + if (is_zero_ether_addr(mac)) { It would be more normal to use !is_valid_ether_addr() What support is there for MDIO? Normally the MAC driver would not be setting the carrier status, phylink or phylib would do that. Andrew --- pw-bot: cr
On 2025-08-15 22:16, Andrew Lunn wrote: Hi Andrew, thanks for the review! >> +static irqreturn_t jcore_emac_irq(int irq, void *data) >> +{ >> + struct jcore_emac *priv = data; >> + struct net_device *ndev = priv->ndev; >> + struct sk_buff *skb; >> + struct { >> + int packets; >> + int bytes; >> + int dropped; >> + int crc_errors; >> + } stats = {}; >> + unsigned int status, pkt_len, i; > > netdev uses 'reverse christmas tree' for local variables. They should > be sorted longest to shortest. This sometimes means you need to move > assignments into the body of the function, in this case, ndev. > >> + jcore_emac_read_hw_addr(priv, mac); >> + if (is_zero_ether_addr(mac)) { > > It would be more normal to use !is_valid_ether_addr() > > What support is there for MDIO? Normally the MAC driver would not be > setting the carrier status, phylink or phylib would do that. From what I can tell, none. This is a very simple FPGA RTL implementation of a MAC, and looking at the VHDL, I don't see any MDIO registers. Moreover, the MDIO pin on the PHY IC on my dev board also appears unconnected. Perhaps Rob L. or Jeff can shine more light on this design wise. Cheers, Artur > > Andrew > > --- > pw-bot: cr
On 2025-08-15 22:52, Artur Rojek wrote: > On 2025-08-15 22:16, Andrew Lunn wrote: > > Hi Andrew, > thanks for the review! > >>> +static irqreturn_t jcore_emac_irq(int irq, void *data) >>> +{ >>> + struct jcore_emac *priv = data; >>> + struct net_device *ndev = priv->ndev; >>> + struct sk_buff *skb; >>> + struct { >>> + int packets; >>> + int bytes; >>> + int dropped; >>> + int crc_errors; >>> + } stats = {}; >>> + unsigned int status, pkt_len, i; >> >> netdev uses 'reverse christmas tree' for local variables. They should >> be sorted longest to shortest. This sometimes means you need to move >> assignments into the body of the function, in this case, ndev. Should I move the struct stats members into stand alone variables as well? Or is below sorting acceptable with regards to stats vs skb: > unsigned int status, pkt_len, i; > struct jcore_emac *priv = data; > struct net_device *ndev; > struct { > int crc_errors; > int dropped; > int packets; > int bytes; > } stats = {}; > struct sk_buff *skb; >> >>> + jcore_emac_read_hw_addr(priv, mac); >>> + if (is_zero_ether_addr(mac)) { >> >> It would be more normal to use !is_valid_ether_addr() >> >> What support is there for MDIO? Normally the MAC driver would not be >> setting the carrier status, phylink or phylib would do that. > > From what I can tell, none. This is a very simple FPGA RTL > implementation of a MAC, and looking at the VHDL, I don't see any MDIO > registers. > Moreover, the MDIO pin on the PHY IC on my dev board also > appears unconnected. I spoke too soon on that one. It appears to be connected through a trace that goes under the IC. Nevertheless, I don't think MDIO support is in the IP core design. > Perhaps Rob L. or Jeff can shine more light on this > design wise. > > Cheers, > Artur > >> >> Andrew >> >> --- >> pw-bot: cr
On Fri, Aug 15, 2025 at 11:14:08PM +0200, Artur Rojek wrote: > On 2025-08-15 22:52, Artur Rojek wrote: > > On 2025-08-15 22:16, Andrew Lunn wrote: > > > > Hi Andrew, > > thanks for the review! > > > > > > +static irqreturn_t jcore_emac_irq(int irq, void *data) > > > > +{ > > > > + struct jcore_emac *priv = data; > > > > + struct net_device *ndev = priv->ndev; > > > > + struct sk_buff *skb; > > > > + struct { > > > > + int packets; > > > > + int bytes; > > > > + int dropped; > > > > + int crc_errors; > > > > + } stats = {}; > > > > + unsigned int status, pkt_len, i; > > > > > > netdev uses 'reverse christmas tree' for local variables. They should > > > be sorted longest to shortest. This sometimes means you need to move > > > assignments into the body of the function, in this case, ndev. > > Should I move the struct stats members into stand alone variables as > well? Or is below sorting acceptable with regards to stats vs skb: I would pull the structure definition out of the function. Then just create one instance of the structure on the stack. > > > What support is there for MDIO? Normally the MAC driver would not be > > > setting the carrier status, phylink or phylib would do that. > > > > From what I can tell, none. This is a very simple FPGA RTL > > implementation of a MAC, and looking at the VHDL, I don't see any MDIO > > registers. > > > Moreover, the MDIO pin on the PHY IC on my dev board also > > appears unconnected. > > I spoke too soon on that one. It appears to be connected through a trace > that goes under the IC. Nevertheless, I don't think MDIO support is in > the IP core design. MDIO is actually two pins. MDC and MDIO. It might be there is a second IP core which implements MDIO. There is no reason it needs to be tightly integrated into the MAC. But it does make the MAC driver slightly more complex. You then need a Linux MDIO bus driver for it, and the DT for the MAC would include a phy-handle property pointing to the PHY on the MDIO bus. Is there an Ethernet PHY on your board? Andrew
On 8/15/25 17:38, Andrew Lunn wrote: >>>> What support is there for MDIO? Normally the MAC driver would not be >>>> setting the carrier status, phylink or phylib would do that. >>> >>> From what I can tell, none. This is a very simple FPGA RTL >>> implementation of a MAC, and looking at the VHDL, I don't see any MDIO >>> registers. >> >>> Moreover, the MDIO pin on the PHY IC on my dev board also >>> appears unconnected. >> >> I spoke too soon on that one. It appears to be connected through a trace >> that goes under the IC. Nevertheless, I don't think MDIO support is in >> the IP core design. > > MDIO is actually two pins. MDC and MDIO. I asked Jeff and he pointed me at https://github.com/j-core/jcore-soc/blob/master/targets/boards/turtle_1v1/pad_ring.vhd#L732 and https://github.com/j-core/jcore-soc/blob/master/targets/pins/turtle_1v0.pins and said those two pins are "wired to zero". He also said: "It would only take a few hrs to add MDIO." but there basically hasn't been a use case yet. > It might be there is a second IP core which implements MDIO. There is > no reason it needs to be tightly integrated into the MAC. But it does > make the MAC driver slightly more complex. You then need a Linux MDIO > bus driver for it, and the DT for the MAC would include a phy-handle > property pointing to the PHY on the MDIO bus. > > Is there an Ethernet PHY on your board? According to https://github.com/j-core/jcore-jx/blob/master/schematic.pdf it's a https://www.micros.com.pl/mediaserver/info-uiip101a.pdf > Andrew Rob
On Sun, Aug 17, 2025 at 11:04:36AM -0500, Rob Landley wrote: > On 8/15/25 17:38, Andrew Lunn wrote: > > > > > What support is there for MDIO? Normally the MAC driver would not be > > > > > setting the carrier status, phylink or phylib would do that. > > > > > > > > From what I can tell, none. This is a very simple FPGA RTL > > > > implementation of a MAC, and looking at the VHDL, I don't see any MDIO > > > > registers. > > > > > > > Moreover, the MDIO pin on the PHY IC on my dev board also > > > > appears unconnected. > > > > > > I spoke too soon on that one. It appears to be connected through a trace > > > that goes under the IC. Nevertheless, I don't think MDIO support is in > > > the IP core design. > > > > MDIO is actually two pins. MDC and MDIO. > > I asked Jeff and he pointed me at https://github.com/j-core/jcore-soc/blob/master/targets/boards/turtle_1v1/pad_ring.vhd#L732 > and > https://github.com/j-core/jcore-soc/blob/master/targets/pins/turtle_1v0.pins > and said those two pins are "wired to zero". > > He also said: "It would only take a few hrs to add MDIO." but there > basically hasn't been a use case yet. Has anybody tried a link peer which only does 10Mbps? Or one which negotiates 100Mbs half duplex? Does the MAC still work, or is the link dead? That would be one use case, making the system more robust to such conditions. Andrew
On 2025-08-16 00:38, Andrew Lunn wrote: > On Fri, Aug 15, 2025 at 11:14:08PM +0200, Artur Rojek wrote: >> On 2025-08-15 22:52, Artur Rojek wrote: >> > On 2025-08-15 22:16, Andrew Lunn wrote: >> > >> > Hi Andrew, >> > thanks for the review! >> > >> > > > +static irqreturn_t jcore_emac_irq(int irq, void *data) >> > > > +{ >> > > > + struct jcore_emac *priv = data; >> > > > + struct net_device *ndev = priv->ndev; >> > > > + struct sk_buff *skb; >> > > > + struct { >> > > > + int packets; >> > > > + int bytes; >> > > > + int dropped; >> > > > + int crc_errors; >> > > > + } stats = {}; >> > > > + unsigned int status, pkt_len, i; >> > > >> > > netdev uses 'reverse christmas tree' for local variables. They should >> > > be sorted longest to shortest. This sometimes means you need to move >> > > assignments into the body of the function, in this case, ndev. >> >> Should I move the struct stats members into stand alone variables as >> well? Or is below sorting acceptable with regards to stats vs skb: > > I would pull the structure definition out of the function. Then just > create one instance of the structure on the stack. Makes sense, thanks. > >> > > What support is there for MDIO? Normally the MAC driver would not be >> > > setting the carrier status, phylink or phylib would do that. >> > >> > From what I can tell, none. This is a very simple FPGA RTL >> > implementation of a MAC, and looking at the VHDL, I don't see any MDIO >> > registers. >> >> > Moreover, the MDIO pin on the PHY IC on my dev board also >> > appears unconnected. >> >> I spoke too soon on that one. It appears to be connected through a >> trace >> that goes under the IC. Nevertheless, I don't think MDIO support is in >> the IP core design. > > MDIO is actually two pins. MDC and MDIO. > > It might be there is a second IP core which implements MDIO. There is > no reason it needs to be tightly integrated into the MAC. But it does > make the MAC driver slightly more complex. You then need a Linux MDIO > bus driver for it, and the DT for the MAC would include a phy-handle > property pointing to the PHY on the MDIO bus. > > Is there an Ethernet PHY on your board? Yes, it's an IC+ IP101ALF 10/100 Ethernet PHY [1]. It does have both MDC and MDIO pins connected, however I suspect that nothing really configures it, and it simply runs on default register values (which allow for valid operation in 100Mb/s mode, it seems). I doubt there is another IP core to handle MDIO, as this SoC design is optimized for minimal utilization of FPGA blocks. Does it make sense to you that a MAC could run without any access to an MDIO bus? If neither Rob L. or Jeff clarify on this topic, I'll hook up a logic analyzer to the MDIO bus and see if anything (e.g. loader firmware) touches it at any point. Cheers, Artur [1] https://www.micros.com.pl/mediaserver/info-uiip101a.pdf > > Andrew
> Yes, it's an IC+ IP101ALF 10/100 Ethernet PHY [1]. It does have both MDC > and MDIO pins connected, however I suspect that nothing really > configures it, and it simply runs on default register values (which > allow for valid operation in 100Mb/s mode, it seems). I doubt there is > another IP core to handle MDIO, as this SoC design is optimized for > minimal utilization of FPGA blocks. Does it make sense to you that a MAC > could run without any access to an MDIO bus? It can work like that. You will likely have problems if the link ever negotiates 10Mbps or 100Mbps half duplex. You generally need to change something in the MAC to support different speeds and duplex. Without being able to talk to the PHY over MDIO you have no idea what it has negotiated with the link peer. Andrew
On 2025-08-16 02:18, Andrew Lunn wrote: >> Yes, it's an IC+ IP101ALF 10/100 Ethernet PHY [1]. It does have both >> MDC >> and MDIO pins connected, however I suspect that nothing really >> configures it, and it simply runs on default register values (which >> allow for valid operation in 100Mb/s mode, it seems). I doubt there is >> another IP core to handle MDIO, as this SoC design is optimized for >> minimal utilization of FPGA blocks. Does it make sense to you that a >> MAC >> could run without any access to an MDIO bus? > > It can work like that. You will likely have problems if the link ever > negotiates 10Mbps or 100Mbps half duplex. You generally need to change > something in the MAC to support different speeds and duplex. Without > being able to talk to the PHY over MDIO you have no idea what it has > negotiated with the link peer. Thanks for the explanation. I just confirmed that there is no activity on the MDIO bus from board power on, up to the jcore_emac driver start (and past it), so most likely this SoC design does not provide any management interface between MAC and PHY. I guess once/if MDIO is implemented, we can distinguish between IP core revision compatibles, and properly switch between netif_carrier_*()/phylink logic. Cheers, Artur > > Andrew
On Aug 16, 2025, at 22:40, Artur Rojek <contact@artur-rojek.eu> wrote: The MDIO isn’t implemented yet. There is a pin driver for it, but it relies on pin strapping the Phy. Probably because all the designs that SoC base is in (IIRC 10 or so customer and prototype designs, plus Turtle and a few derivatives), the SoC was designed in conjunction with board. A bit lazy. But they all have the MDIO connected, so we should add it (it’s very simple). Cheers, J. > On 2025-08-16 02:18, Andrew Lunn wrote: >>> Yes, it's an IC+ IP101ALF 10/100 Ethernet PHY [1]. It does have both MDC >>> and MDIO pins connected, however I suspect that nothing really >>> configures it, and it simply runs on default register values (which >>> allow for valid operation in 100Mb/s mode, it seems). I doubt there is >>> another IP core to handle MDIO, as this SoC design is optimized for >>> minimal utilization of FPGA blocks. Does it make sense to you that a MAC >>> could run without any access to an MDIO bus? >> It can work like that. You will likely have problems if the link ever >> negotiates 10Mbps or 100Mbps half duplex. You generally need to change >> something in the MAC to support different speeds and duplex. Without >> being able to talk to the PHY over MDIO you have no idea what it has >> negotiated with the link peer. > > Thanks for the explanation. I just confirmed that there is no activity > on the MDIO bus from board power on, up to the jcore_emac driver start > (and past it), so most likely this SoC design does not provide any > management interface between MAC and PHY. I guess once/if MDIO is > implemented, we can distinguish between IP core revision compatibles, > and properly switch between netif_carrier_*()/phylink logic. > > Cheers, > Artur > >> Andrew
On 2025-08-17 06:29, D. Jeff Dionne wrote: > On Aug 16, 2025, at 22:40, Artur Rojek <contact@artur-rojek.eu> wrote: > > The MDIO isn’t implemented yet. There is a pin driver for it, but it > relies on > pin strapping the Phy. Probably because all the designs that SoC base > is in > (IIRC 10 or so customer and prototype designs, plus Turtle and a few > derivatives), the SoC was designed in conjunction with board. A bit > lazy. > > But they all have the MDIO connected, so we should add it (it’s very > simple). Hi Jeff, thanks for the elaboration. It sounds to me then that I should wait with the driver upstream until the MDIO interface is implemented. At least I gave you guys a little bit of a nudge :-) Cheers, Artur > > Cheers, > J. > >> On 2025-08-16 02:18, Andrew Lunn wrote: >>>> Yes, it's an IC+ IP101ALF 10/100 Ethernet PHY [1]. It does have both >>>> MDC >>>> and MDIO pins connected, however I suspect that nothing really >>>> configures it, and it simply runs on default register values (which >>>> allow for valid operation in 100Mb/s mode, it seems). I doubt there >>>> is >>>> another IP core to handle MDIO, as this SoC design is optimized for >>>> minimal utilization of FPGA blocks. Does it make sense to you that a >>>> MAC >>>> could run without any access to an MDIO bus? >>> It can work like that. You will likely have problems if the link ever >>> negotiates 10Mbps or 100Mbps half duplex. You generally need to >>> change >>> something in the MAC to support different speeds and duplex. Without >>> being able to talk to the PHY over MDIO you have no idea what it has >>> negotiated with the link peer. >> >> Thanks for the explanation. I just confirmed that there is no activity >> on the MDIO bus from board power on, up to the jcore_emac driver start >> (and past it), so most likely this SoC design does not provide any >> management interface between MAC and PHY. I guess once/if MDIO is >> implemented, we can distinguish between IP core revision compatibles, >> and properly switch between netif_carrier_*()/phylink logic. >> >> Cheers, >> Artur >> >>> Andrew
On Sat, Aug 16, 2025 at 03:40:57PM +0200, Artur Rojek wrote: > On 2025-08-16 02:18, Andrew Lunn wrote: > > > Yes, it's an IC+ IP101ALF 10/100 Ethernet PHY [1]. It does have both > > > MDC > > > and MDIO pins connected, however I suspect that nothing really > > > configures it, and it simply runs on default register values (which > > > allow for valid operation in 100Mb/s mode, it seems). I doubt there is > > > another IP core to handle MDIO, as this SoC design is optimized for > > > minimal utilization of FPGA blocks. Does it make sense to you that a > > > MAC > > > could run without any access to an MDIO bus? > > > > It can work like that. You will likely have problems if the link ever > > negotiates 10Mbps or 100Mbps half duplex. You generally need to change > > something in the MAC to support different speeds and duplex. Without > > being able to talk to the PHY over MDIO you have no idea what it has > > negotiated with the link peer. > > Thanks for the explanation. I just confirmed that there is no activity > on the MDIO bus from board power on, up to the jcore_emac driver start > (and past it), so most likely this SoC design does not provide any > management interface between MAC and PHY. I guess once/if MDIO is > implemented, we can distinguish between IP core revision compatibles, > and properly switch between netif_carrier_*()/phylink logic. How cut down of a SoC design is it? Is there pinmux and each pin can also be used for GPIO? Linux has software bit-banging MDIO, if you can make the two pins be standard Linux GPIOs, and can configure them correctly, i _think_ open drain on MDIO. It will be slow, but it works, and it is pretty much for free. MDIO itself is simple, just a big shift register: https://opencores.org/websvn/filedetails?repname=ethmac10g&path=%2Fethmac10g%2Ftrunk%2Frtl%2Fverilog%2Fmgmt%2Fmdio.v Andrew
On 8/16/25 10:04, Andrew Lunn wrote: > On Sat, Aug 16, 2025 at 03:40:57PM +0200, Artur Rojek wrote: >> On 2025-08-16 02:18, Andrew Lunn wrote: >>>> Yes, it's an IC+ IP101ALF 10/100 Ethernet PHY [1]. It does have both >>>> MDC >>>> and MDIO pins connected, however I suspect that nothing really >>>> configures it, and it simply runs on default register values (which >>>> allow for valid operation in 100Mb/s mode, it seems). I doubt there is >>>> another IP core to handle MDIO, as this SoC design is optimized for >>>> minimal utilization of FPGA blocks. Does it make sense to you that a >>>> MAC >>>> could run without any access to an MDIO bus? >>> >>> It can work like that. You will likely have problems if the link ever >>> negotiates 10Mbps or 100Mbps half duplex. You generally need to change >>> something in the MAC to support different speeds and duplex. Without >>> being able to talk to the PHY over MDIO you have no idea what it has >>> negotiated with the link peer. >> >> Thanks for the explanation. I just confirmed that there is no activity >> on the MDIO bus from board power on, up to the jcore_emac driver start >> (and past it), so most likely this SoC design does not provide any >> management interface between MAC and PHY. I guess once/if MDIO is >> implemented, we can distinguish between IP core revision compatibles, >> and properly switch between netif_carrier_*()/phylink logic. > > How cut down of a SoC design is it? The engineers focused on getting projects done for customers implemented what they actually needed, and had a todo list for potential future development that mostly hasn't come up yet. (Most of the boards I worked on aren't actually using ethernet, but doing their own derived protocol that's electrically isolated and includes timing information in each packet. Same RJ45 jack, I think it even uses the same transceiver chip, but different protocol and signaling.) > Is there pinmux and each pin can > also be used for GPIO? Linux has software bit-banging MDIO, if you can > make the two pins be standard Linux GPIOs, and can configure them > correctly, i _think_ open drain on MDIO. It will be slow, but it > works, and it is pretty much for free. > > MDIO itself is simple, just a big shift register: My vague recollection is this SOC only implemented full duplex 100baseT because they didn't have any hardware lying around that _couldn't_ talk to that. So it never needed to downshift to talk to anything they tested it against, and there were plenty of desktop switches if we wound up needing an adapter in future for some reason. (1995 was a while ago even back then.) The negotiation stuff was slated to be part of implementing gigabit ethernet, but 11 megabytes/second is actually pretty decent throughput for an individual endpoint so nothing's really needed it yet. Turtle's FPGA couldn't easily do gigabit anyway: a spartan 6 can handle a 50mhz phy interface but 125mhz for gigabit is pushing it. That really wants something like Kintex (WAY more expensive, and runs quite hot). Even with the SOC in ASIC, a gigabit phy chip is still more expensive and consumes more power so needs a reason to use it. There's been more interest in wifi and bluetooth, which you either get as its own chip or in a micro-sdio card because rolling your own implementation is a regulatory hellscape of spectrum compliance certifications in a zillion different jurisdictions. (Implementing it isn't that hard, getting permission to deploy it in a city is hard.) USB dongles are also available but consume WAY too much power. Oh, a few years ago we did a USB 2.0 implementation in a Turtle HAT: https://www.raspberrypi.com/news/introducing-raspberry-pi-hats/ And implemented a VHDL CDC-ECM ethernet device in the turtle SOC bitstream to test it out with, which did 40 megabytes/second sustained throughput no problem. (With not so much a "driver" as a quick and dirty userspace realtime program marshalling packets in and out of the hardware buffers to a TAP device. Both the USB and Ethernet PHY are basically parallel to serial converters so their outsides can clock way slower than the line speed of the protocol they transceive. The USB 2.0 one we used ran at 60mhz=3*5*2*2 and the ethernet needs 50mhz=5*5*2 so the voltage controlled oscillator could run at 3*5*5*2*2=300mhz and then get divided evenly down to drive both, and yes we had it run as a little gateway for testing.) Alas, while CDC-EDM using the reference vendor:device IDs out of the USB standards document works fine out of the box on both Linux and MacOS, Windows refuses to recognize it because Microsoft demanded a five figure license fee from each hardware vendor to get a driver signed. (Lina Kahn didn't get around to them before the plutocrats plugged that hole in the pressure cooker.) In an attempt to bypass the bastardry we changed the VHDL to do RNDIS instead (microsoft's own admittedly inferior version of CDC-EDM that they let you implement using an existing driver), and Greg KH went "my turn": https://fosstodon.org/@kernellogger/109397395514594409 Which of course broke Android USB tethering. *shrug* The 100baseT one works for us. Rob P.S. Way back when, we did slap quick wifi on an existing design once as a proof of concept using a three inch cat5 cable going to a $15 tiny single port wifi router from a store down the street in Akihabara. The 100baseT connection worked fine. Alas the tiny router couldn't _not_ NAT, and writing software to automatically navigate its built-in web page to associate with an access point was a rathole we didn't go down. P.P.S. Oh, and we've connected boards over optical fiber using this transceiver chip. Those didn't call for mdio either, I'm not sure the VHDL changed at all, just board-level schematic changes. Technically that's 100baseF I believe.
> My vague recollection is this SOC only implemented full duplex 100baseT > because they didn't have any hardware lying around that _couldn't_ talk to > that. It is pretty unusual to find hardware, now a days, which only does 10Mbp. So it is a somewhat theoretical use case. And as you say, 100Mbps is plenty fast for lots of applications. What we need to think about is the path forwards, how MDIO and PHY support can be added later, without breaking DT backwards compatibility. What you would normally do if there is no access to the PHY is use fixed-link. It emulates a PHY, one that is always up and at a fixed speed. See fixed-link in Documentation/devicetree/bindings/net/ethernet-controller.yaml That allows the MAC driver to use the phylib API. The MAC does not hard coded the carrier up, phylib tells the MAC link is up, and phylib manages the carrier. What this means is, if sometime in the future MDIO is added, and phylib gets access to the PHY, there are no MAC driver changes. Old DT blobs, using fixed-link still work, and new DT blobs with MDIO, a PHY node, and a phy-handle have working PHY. If you don't do this now, adding support later will be messy, if you don't want to break backwards compatibility with old DT blobs. Andrew
On 2025-08-16 17:04, Andrew Lunn wrote: > On Sat, Aug 16, 2025 at 03:40:57PM +0200, Artur Rojek wrote: >> On 2025-08-16 02:18, Andrew Lunn wrote: >> > > Yes, it's an IC+ IP101ALF 10/100 Ethernet PHY [1]. It does have both >> > > MDC >> > > and MDIO pins connected, however I suspect that nothing really >> > > configures it, and it simply runs on default register values (which >> > > allow for valid operation in 100Mb/s mode, it seems). I doubt there is >> > > another IP core to handle MDIO, as this SoC design is optimized for >> > > minimal utilization of FPGA blocks. Does it make sense to you that a >> > > MAC >> > > could run without any access to an MDIO bus? >> > >> > It can work like that. You will likely have problems if the link ever >> > negotiates 10Mbps or 100Mbps half duplex. You generally need to change >> > something in the MAC to support different speeds and duplex. Without >> > being able to talk to the PHY over MDIO you have no idea what it has >> > negotiated with the link peer. >> >> Thanks for the explanation. I just confirmed that there is no activity >> on the MDIO bus from board power on, up to the jcore_emac driver start >> (and past it), so most likely this SoC design does not provide any >> management interface between MAC and PHY. I guess once/if MDIO is >> implemented, we can distinguish between IP core revision compatibles, >> and properly switch between netif_carrier_*()/phylink logic. > > How cut down of a SoC design is it? Is there pinmux and each pin can > also be used for GPIO? It's pretty limited - there is no MMU or DMA, for example. There does appear to be a GPIO controller, however I'm not sure if it is of pinmux variety (whether pins used by an IP core can be multiplexed to PIO), or if it has its own pool of general purpose pins, that don't overlap with PHY. In any case, there is no Linux driver for it (I have interest to eventually write one), and I don't know if the design for it is even included in the bitstream on my board. > Linux has software bit-banging MDIO, if you can > make the two pins be standard Linux GPIOs, and can configure them > correctly, i _think_ open drain on MDIO. It will be slow, but it > works, and it is pretty much for free. This is a clever idea! It does however sound like Jeff & co. will eventually add a proper MDIO interface to this MAC. I will halt upstream of this driver for now and see what happens first (that, or me experimenting with GPIO). Thanks for the review of this series thus far! Cheers, Artur > > MDIO itself is simple, just a big shift register: > > https://opencores.org/websvn/filedetails?repname=ethmac10g&path=%2Fethmac10g%2Ftrunk%2Frtl%2Fverilog%2Fmgmt%2Fmdio.v > > Andrew
© 2016 - 2025 Red Hat, Inc.