[PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device

Yojana Mallik posted 3 patches 1 year, 8 months ago
[PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
Posted by Yojana Mallik 1 year, 8 months ago
Register the RPMsg driver as network device and add support for
basic ethernet functionality by using the shared memory for data
plane.

The shared memory layout is as below, with the region between
PKT_1_LEN to PKT_N modelled as circular buffer.

-------------------------
|          HEAD         |
-------------------------
|          TAIL         |
-------------------------
|       PKT_1_LEN       |
|         PKT_1         |
-------------------------
|       PKT_2_LEN       |
|         PKT_2         |
-------------------------
|           .           |
|           .           |
-------------------------
|       PKT_N_LEN       |
|         PKT_N         |
-------------------------

The offset between the HEAD and TAIL is polled to process the Rx packets.

Signed-off-by: Yojana Mallik <y-mallik@ti.com>
---
 drivers/net/ethernet/ti/icve_rpmsg_common.h   |  86 ++++
 drivers/net/ethernet/ti/inter_core_virt_eth.c | 453 +++++++++++++++++-
 drivers/net/ethernet/ti/inter_core_virt_eth.h |  35 +-
 3 files changed, 570 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ethernet/ti/icve_rpmsg_common.h b/drivers/net/ethernet/ti/icve_rpmsg_common.h
index 7cd157479d4d..2e3833de14bd 100644
--- a/drivers/net/ethernet/ti/icve_rpmsg_common.h
+++ b/drivers/net/ethernet/ti/icve_rpmsg_common.h
@@ -15,14 +15,58 @@ enum icve_msg_type {
 	ICVE_NOTIFY_MSG,
 };
 
+enum icve_rpmsg_type {
+	/* Request types */
+	ICVE_REQ_SHM_INFO = 0,
+	ICVE_REQ_SET_MAC_ADDR,
+
+	/* Response types */
+	ICVE_RESP_SHM_INFO,
+	ICVE_RESP_SET_MAC_ADDR,
+
+	/* Notification types */
+	ICVE_NOTIFY_PORT_UP,
+	ICVE_NOTIFY_PORT_DOWN,
+	ICVE_NOTIFY_PORT_READY,
+	ICVE_NOTIFY_REMOTE_READY,
+};
+
+struct icve_shm_info {
+	/* Total shared memory size */
+	u32 total_shm_size;
+	/* Total number of buffers */
+	u32 num_pkt_bufs;
+	/* Per buff slot size i.e MTU Size + 4 bytes for magic number + 4 bytes
+	 * for Pkt len
+	 */
+	u32 buff_slot_size;
+	/* Base Address for Tx or Rx shared memory */
+	u32 base_addr;
+} __packed;
+
+struct icve_shm {
+	struct icve_shm_info shm_info_tx;
+	struct icve_shm_info shm_info_rx;
+} __packed;
+
+struct icve_mac_addr {
+	char addr[ETH_ALEN];
+} __packed;
+
 struct request_message {
 	u32 type; /* Request Type */
 	u32 id;	  /* Request ID */
+	union {
+		struct icve_mac_addr mac_addr;
+	};
 } __packed;
 
 struct response_message {
 	u32 type;	/* Response Type */
 	u32 id;		/* Response ID */
+	union {
+		struct icve_shm shm_info;
+	};
 } __packed;
 
 struct notify_message {
@@ -44,4 +88,46 @@ struct message {
 	};
 } __packed;
 
+/*      Shared Memory Layout
+ *
+ *	---------------------------	*****************
+ *	|        MAGIC_NUM        |	 icve_shm_head
+ *	|          HEAD           |
+ *	---------------------------	*****************
+ *	|        MAGIC_NUM        |
+ *	|        PKT_1_LEN        |
+ *	|          PKT_1          |
+ *	---------------------------
+ *	|        MAGIC_NUM        |
+ *	|        PKT_2_LEN        |	 icve_shm_buf
+ *	|          PKT_2          |
+ *	---------------------------
+ *	|           .             |
+ *	|           .             |
+ *	---------------------------
+ *	|        MAGIC_NUM        |
+ *	|        PKT_N_LEN        |
+ *	|          PKT_N          |
+ *	---------------------------	****************
+ *	|        MAGIC_NUM        |      icve_shm_tail
+ *	|          TAIL           |
+ *	---------------------------	****************
+ */
+
+struct icve_shm_index {
+	u32 magic_num;
+	u32 index;
+}  __packed;
+
+struct icve_shm_buf {
+	char __iomem *base_addr;	/* start addr of first buffer */
+	u32 magic_num;
+} __packed;
+
+struct icve_shared_mem {
+	struct icve_shm_index __iomem *head;
+	struct icve_shm_buf __iomem *buf;
+	struct icve_shm_index __iomem *tail;
+} __packed;
+
 #endif /* __ICVE_RPMSG_COMMON_H__ */
diff --git a/drivers/net/ethernet/ti/inter_core_virt_eth.c b/drivers/net/ethernet/ti/inter_core_virt_eth.c
index bea822d2373a..d96547d317fe 100644
--- a/drivers/net/ethernet/ti/inter_core_virt_eth.c
+++ b/drivers/net/ethernet/ti/inter_core_virt_eth.c
@@ -6,11 +6,145 @@
 
 #include "inter_core_virt_eth.h"
 
+#define ICVE_MIN_PACKET_SIZE ETH_ZLEN
+#define ICVE_MAX_PACKET_SIZE 1540 //(ETH_FRAME_LEN + ETH_FCS_LEN)
+#define ICVE_MAX_TX_QUEUES 1
+#define ICVE_MAX_RX_QUEUES 1
+
+#define PKT_LEN_SIZE_TYPE sizeof(u32)
+#define MAGIC_NUM_SIZE_TYPE sizeof(u32)
+
+/* 4 bytes to hold packet length and ICVE_MAX_PACKET_SIZE to hold packet */
+#define ICVE_BUFFER_SIZE \
+	(ICVE_MAX_PACKET_SIZE + PKT_LEN_SIZE_TYPE + MAGIC_NUM_SIZE_TYPE)
+
+#define RX_POLL_TIMEOUT 1000 /* 1000usec */
+#define RX_POLL_JIFFIES (jiffies + usecs_to_jiffies(RX_POLL_TIMEOUT))
+
+#define STATE_MACHINE_TIME msecs_to_jiffies(100)
+#define ICVE_REQ_TIMEOUT msecs_to_jiffies(100)
+
+#define icve_ndev_to_priv(ndev) ((struct icve_ndev_priv *)netdev_priv(ndev))
+#define icve_ndev_to_port(ndev) (icve_ndev_to_priv(ndev)->port)
+#define icve_ndev_to_common(ndev) (icve_ndev_to_port(ndev)->common)
+
+static int create_request(struct icve_common *common,
+			  enum icve_rpmsg_type rpmsg_type)
+{
+	struct message *msg = &common->send_msg;
+	int ret = 0;
+
+	msg->msg_hdr.src_id = common->port->port_id;
+	msg->req_msg.type = rpmsg_type;
+
+	switch (rpmsg_type) {
+	case ICVE_REQ_SHM_INFO:
+		msg->msg_hdr.msg_type = ICVE_REQUEST_MSG;
+		break;
+	case ICVE_REQ_SET_MAC_ADDR:
+		msg->msg_hdr.msg_type = ICVE_REQUEST_MSG;
+		ether_addr_copy(msg->req_msg.mac_addr.addr,
+				common->port->ndev->dev_addr);
+		break;
+	case ICVE_NOTIFY_PORT_UP:
+	case ICVE_NOTIFY_PORT_DOWN:
+		msg->msg_hdr.msg_type = ICVE_NOTIFY_MSG;
+		break;
+	default:
+		ret = -EINVAL;
+		dev_err(common->dev, "Invalid RPMSG request\n");
+	};
+	return ret;
+}
+
+static int icve_create_send_request(struct icve_common *common,
+				    enum icve_rpmsg_type rpmsg_type,
+				    bool wait)
+{
+	unsigned long flags;
+	int ret;
+
+	if (wait)
+		reinit_completion(&common->sync_msg);
+
+	spin_lock_irqsave(&common->send_msg_lock, flags);
+	create_request(common, rpmsg_type);
+	rpmsg_send(common->rpdev->ept, (void *)(&common->send_msg),
+		   sizeof(common->send_msg));
+	spin_unlock_irqrestore(&common->send_msg_lock, flags);
+
+	if (wait) {
+		ret = wait_for_completion_timeout(&common->sync_msg,
+						  ICVE_REQ_TIMEOUT);
+
+		if (!ret) {
+			dev_err(common->dev, "Failed to receive response within %ld jiffies\n",
+				ICVE_REQ_TIMEOUT);
+			ret = -ETIMEDOUT;
+			return ret;
+		}
+	}
+	return ret;
+}
+
+static void icve_state_machine(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct icve_common *common;
+	struct icve_port *port;
+
+	common = container_of(dwork, struct icve_common, state_work);
+	port = common->port;
+
+	mutex_lock(&common->state_lock);
+
+	switch (common->state) {
+	case ICVE_STATE_PROBE:
+		break;
+	case ICVE_STATE_OPEN:
+		icve_create_send_request(common, ICVE_REQ_SHM_INFO, false);
+		break;
+	case ICVE_STATE_CLOSE:
+		break;
+	case ICVE_STATE_READY:
+		icve_create_send_request(common, ICVE_REQ_SET_MAC_ADDR, false);
+		napi_enable(&port->rx_napi);
+		netif_carrier_on(port->ndev);
+		mod_timer(&port->rx_timer, RX_POLL_TIMEOUT);
+		break;
+	case ICVE_STATE_RUNNING:
+		break;
+	}
+	mutex_unlock(&common->state_lock);
+}
+
+static void icve_rx_timer(struct timer_list *timer)
+{
+	struct icve_port *port = from_timer(port, timer, rx_timer);
+	struct napi_struct *napi;
+	int num_pkts = 0;
+	u32 head, tail;
+
+	head = port->rx_buffer->head->index;
+	tail = port->rx_buffer->tail->index;
+
+	num_pkts = tail - head;
+	num_pkts = num_pkts >= 0 ? num_pkts :
+				   (num_pkts + port->icve_rx_max_buffers);
+
+	napi = &port->rx_napi;
+	if (num_pkts && likely(napi_schedule_prep(napi)))
+		__napi_schedule(napi);
+	else
+		mod_timer(&port->rx_timer, RX_POLL_JIFFIES);
+}
+
 static int icve_rpmsg_cb(struct rpmsg_device *rpdev, void *data, int len,
 			 void *priv, u32 src)
 {
 	struct icve_common *common = dev_get_drvdata(&rpdev->dev);
 	struct message *msg = (struct message *)data;
+	struct icve_port *port = common->port;
 	u32 msg_type = msg->msg_hdr.msg_type;
 	u32 rpmsg_type;
 
@@ -24,11 +158,79 @@ static int icve_rpmsg_cb(struct rpmsg_device *rpdev, void *data, int len,
 		rpmsg_type = msg->resp_msg.type;
 		dev_dbg(common->dev, "Msg type = %d; RPMsg type = %d\n",
 			msg_type, rpmsg_type);
+		switch (rpmsg_type) {
+		case ICVE_RESP_SHM_INFO:
+			/* Retrieve Tx and Rx shared memory info from msg */
+			port->tx_buffer->head =
+				ioremap(msg->resp_msg.shm_info.shm_info_tx.base_addr,
+					sizeof(*port->tx_buffer->head));
+
+			port->tx_buffer->buf->base_addr =
+				ioremap((msg->resp_msg.shm_info.shm_info_tx.base_addr +
+					sizeof(*port->tx_buffer->head)),
+					(msg->resp_msg.shm_info.shm_info_tx.num_pkt_bufs *
+					 msg->resp_msg.shm_info.shm_info_tx.buff_slot_size));
+
+			port->tx_buffer->tail =
+				ioremap(msg->resp_msg.shm_info.shm_info_tx.base_addr +
+					sizeof(*port->tx_buffer->head) +
+					(msg->resp_msg.shm_info.shm_info_tx.num_pkt_bufs *
+					msg->resp_msg.shm_info.shm_info_tx.buff_slot_size),
+					sizeof(*port->tx_buffer->tail));
+
+			port->icve_tx_max_buffers = msg->resp_msg.shm_info.shm_info_tx.num_pkt_bufs;
+
+			port->rx_buffer->head =
+				ioremap(msg->resp_msg.shm_info.shm_info_rx.base_addr,
+					sizeof(*port->rx_buffer->head));
+
+			port->rx_buffer->buf->base_addr =
+				ioremap(msg->resp_msg.shm_info.shm_info_rx.base_addr +
+					sizeof(*port->rx_buffer->head),
+					(msg->resp_msg.shm_info.shm_info_rx.num_pkt_bufs *
+					 msg->resp_msg.shm_info.shm_info_rx.buff_slot_size));
+
+			port->rx_buffer->tail =
+				ioremap(msg->resp_msg.shm_info.shm_info_rx.base_addr +
+					sizeof(*port->rx_buffer->head) +
+					(msg->resp_msg.shm_info.shm_info_rx.num_pkt_bufs *
+					msg->resp_msg.shm_info.shm_info_rx.buff_slot_size),
+					sizeof(*port->rx_buffer->tail));
+
+			port->icve_rx_max_buffers =
+				msg->resp_msg.shm_info.shm_info_rx.num_pkt_bufs;
+
+			mutex_lock(&common->state_lock);
+			common->state = ICVE_STATE_READY;
+			mutex_unlock(&common->state_lock);
+
+			mod_delayed_work(system_wq,
+					 &common->state_work,
+					 STATE_MACHINE_TIME);
+
+			break;
+		case ICVE_RESP_SET_MAC_ADDR:
+			break;
+		}
+
 		break;
+
 	case ICVE_NOTIFY_MSG:
 		rpmsg_type = msg->notify_msg.type;
-		dev_dbg(common->dev, "Msg type = %d; RPMsg type = %d\n",
-			msg_type, rpmsg_type);
+		switch (rpmsg_type) {
+		case ICVE_NOTIFY_REMOTE_READY:
+			mutex_lock(&common->state_lock);
+			common->state = ICVE_STATE_RUNNING;
+			mutex_unlock(&common->state_lock);
+
+			mod_delayed_work(system_wq,
+					 &common->state_work,
+					 STATE_MACHINE_TIME);
+			break;
+		case ICVE_NOTIFY_PORT_UP:
+		case ICVE_NOTIFY_PORT_DOWN:
+			break;
+		}
 		break;
 	default:
 		dev_err(common->dev, "Invalid msg type\n");
@@ -37,10 +239,242 @@ static int icve_rpmsg_cb(struct rpmsg_device *rpdev, void *data, int len,
 	return 0;
 }
 
+static int icve_rx_packets(struct napi_struct *napi, int budget)
+{
+	struct icve_port *port = container_of(napi, struct icve_port, rx_napi);
+	u32 count, process_pkts;
+	struct sk_buff *skb;
+	u32 head, tail;
+	int num_pkts;
+	u32 pkt_len;
+
+	head = port->rx_buffer->head->index;
+	tail = port->rx_buffer->tail->index;
+
+	num_pkts = head - tail;
+
+	num_pkts = num_pkts >= 0 ? num_pkts :
+				   (num_pkts + port->icve_rx_max_buffers);
+	process_pkts = min(num_pkts, budget);
+	count = 0;
+	while (count < process_pkts) {
+		memcpy_fromio((void *)&pkt_len,
+			      (void *)(port->rx_buffer->buf->base_addr +
+			      MAGIC_NUM_SIZE_TYPE +
+			      (((tail + count) % port->icve_rx_max_buffers) *
+			      ICVE_BUFFER_SIZE)),
+			      PKT_LEN_SIZE_TYPE);
+		/* Start building the skb */
+		skb = napi_alloc_skb(napi, pkt_len);
+		if (!skb) {
+			port->ndev->stats.rx_dropped++;
+			goto rx_dropped;
+		}
+
+		skb->dev = port->ndev;
+		skb_put(skb, pkt_len);
+		memcpy_fromio((void *)skb->data,
+			      (void *)(port->rx_buffer->buf->base_addr +
+			      PKT_LEN_SIZE_TYPE + MAGIC_NUM_SIZE_TYPE +
+			      (((tail + count) % port->icve_rx_max_buffers) *
+			      ICVE_BUFFER_SIZE)),
+			      pkt_len);
+
+		skb->protocol = eth_type_trans(skb, port->ndev);
+
+		/* Push skb into network stack */
+		napi_gro_receive(napi, skb);
+
+		count++;
+		port->ndev->stats.rx_packets++;
+		port->ndev->stats.rx_bytes += skb->len;
+	}
+
+rx_dropped:
+
+	if (num_pkts) {
+		port->rx_buffer->tail->index =
+			(port->rx_buffer->tail->index + count) %
+			port->icve_rx_max_buffers;
+
+		if (num_pkts < budget && napi_complete_done(napi, count))
+			mod_timer(&port->rx_timer, RX_POLL_TIMEOUT);
+	}
+
+	return count;
+}
+
+static int icve_ndo_open(struct net_device *ndev)
+{
+	struct icve_common *common = icve_ndev_to_common(ndev);
+
+	mutex_lock(&common->state_lock);
+	common->state = ICVE_STATE_OPEN;
+	mutex_unlock(&common->state_lock);
+	mod_delayed_work(system_wq, &common->state_work, msecs_to_jiffies(100));
+
+	return 0;
+}
+
+static int icve_ndo_stop(struct net_device *ndev)
+{
+	struct icve_common *common = icve_ndev_to_common(ndev);
+	struct icve_port *port = icve_ndev_to_port(ndev);
+
+	mutex_lock(&common->state_lock);
+	common->state = ICVE_STATE_CLOSE;
+	mutex_unlock(&common->state_lock);
+
+	netif_carrier_off(port->ndev);
+
+	__dev_mc_unsync(ndev, icve_del_mc_addr);
+	__hw_addr_init(&common->mc_list);
+
+	cancel_delayed_work_sync(&common->state_work);
+	del_timer_sync(&port->rx_timer);
+	napi_disable(&port->rx_napi);
+
+	cancel_work_sync(&common->rx_mode_work);
+
+	return 0;
+}
+
+static netdev_tx_t icve_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct icve_port *port = icve_ndev_to_port(ndev);
+	u32 head, tail;
+	int num_pkts;
+	u32 len;
+
+	len = skb_headlen(skb);
+	head = port->tx_buffer->head->index;
+	tail = port->tx_buffer->tail->index;
+
+	/* If the buffer queue is full, then drop packet */
+	num_pkts = head - tail;
+	num_pkts = num_pkts >= 0 ? num_pkts :
+				   (num_pkts + port->icve_tx_max_buffers);
+
+	if ((num_pkts + 1) == port->icve_tx_max_buffers) {
+		netdev_warn(ndev, "Tx buffer full %d\n", num_pkts);
+		goto ring_full;
+	}
+	/* Copy length */
+	memcpy_toio((void *)port->tx_buffer->buf->base_addr +
+			    MAGIC_NUM_SIZE_TYPE +
+			    (port->tx_buffer->head->index * ICVE_BUFFER_SIZE),
+		    (void *)&len, PKT_LEN_SIZE_TYPE);
+	/* Copy data to shared mem */
+	memcpy_toio((void *)(port->tx_buffer->buf->base_addr +
+			     MAGIC_NUM_SIZE_TYPE + PKT_LEN_SIZE_TYPE +
+			     (port->tx_buffer->head->index * ICVE_BUFFER_SIZE)),
+		    (void *)skb->data, len);
+	port->tx_buffer->head->index =
+		(port->tx_buffer->head->index + 1) % port->icve_tx_max_buffers;
+
+	ndev->stats.tx_packets++;
+	ndev->stats.tx_bytes += skb->len;
+
+	dev_consume_skb_any(skb);
+	return NETDEV_TX_OK;
+
+ring_full:
+	return NETDEV_TX_BUSY;
+}
+
+static int icve_set_mac_address(struct net_device *ndev, void *addr)
+{
+	struct icve_common *common = icve_ndev_to_common(ndev);
+	int ret;
+
+	ret = eth_mac_addr(ndev, addr);
+
+	if (ret < 0)
+		return ret;
+	icve_create_send_request(common, ICVE_REQ_SET_MAC_ADDR, false);
+	return ret;
+}
+
+static const struct net_device_ops icve_netdev_ops = {
+	.ndo_open = icve_ndo_open,
+	.ndo_stop = icve_ndo_stop,
+	.ndo_start_xmit = icve_start_xmit,
+	.ndo_set_mac_address = icve_set_mac_address,
+};
+
+static int icve_init_ndev(struct icve_common *common)
+{
+	struct device *dev = &common->rpdev->dev;
+	struct icve_ndev_priv *ndev_priv;
+	struct icve_port *port;
+	static u32 port_id;
+	int err;
+
+	port = common->port;
+	port->common = common;
+	port->port_id = port_id++;
+
+	port->ndev = devm_alloc_etherdev_mqs(common->dev, sizeof(*ndev_priv),
+					     ICVE_MAX_TX_QUEUES,
+					     ICVE_MAX_RX_QUEUES);
+
+	if (!port->ndev) {
+		dev_err(dev, "error allocating net_device\n");
+		return -ENOMEM;
+	}
+
+	ndev_priv = netdev_priv(port->ndev);
+	ndev_priv->port = port;
+	SET_NETDEV_DEV(port->ndev, dev);
+
+	port->ndev->min_mtu = ICVE_MIN_PACKET_SIZE;
+	port->ndev->max_mtu = ICVE_MAX_PACKET_SIZE;
+	port->ndev->netdev_ops = &icve_netdev_ops;
+
+	/* Allocate memory to test without actual RPMsg handshaking */
+	port->tx_buffer =
+		devm_kzalloc(dev, sizeof(*port->tx_buffer), GFP_KERNEL);
+	if (!port->tx_buffer) {
+		dev_err(dev, "Memory not available\n");
+		return -ENOMEM;
+	}
+
+	port->tx_buffer->buf =
+		devm_kzalloc(dev, sizeof(*port->tx_buffer->buf), GFP_KERNEL);
+	if (!port->tx_buffer->buf) {
+		dev_err(dev, "Memory not available\n");
+		return -ENOMEM;
+	}
+
+	port->rx_buffer =
+		devm_kzalloc(dev, sizeof(*port->rx_buffer), GFP_KERNEL);
+	if (!port->rx_buffer) {
+		dev_err(dev, "Memory not available\n");
+		return -ENOMEM;
+	}
+
+	port->rx_buffer->buf =
+		devm_kzalloc(dev, sizeof(*port->rx_buffer->buf), GFP_KERNEL);
+	if (!port->rx_buffer->buf) {
+		dev_err(dev, "Memory not available\n");
+		return -ENOMEM;
+	};
+	netif_carrier_off(port->ndev);
+
+	netif_napi_add(port->ndev, &port->rx_napi, icve_rx_packets);
+	timer_setup(&port->rx_timer, icve_rx_timer, 0);
+	err = register_netdev(port->ndev);
+
+	if (err)
+		dev_err(dev, "error registering icve net device %d\n", err);
+	return 0;
+}
+
 static int icve_rpmsg_probe(struct rpmsg_device *rpdev)
 {
 	struct device *dev = &rpdev->dev;
 	struct icve_common *common;
+	int ret = 0;
 
 	common = devm_kzalloc(&rpdev->dev, sizeof(*common), GFP_KERNEL);
 	if (!common)
@@ -51,12 +485,27 @@ static int icve_rpmsg_probe(struct rpmsg_device *rpdev)
 	common->port = devm_kzalloc(dev, sizeof(*common->port), GFP_KERNEL);
 	common->dev = dev;
 	common->rpdev = rpdev;
+	common->state = ICVE_STATE_PROBE;
+	spin_lock_init(&common->send_msg_lock);
+	spin_lock_init(&common->recv_msg_lock);
+	mutex_init(&common->state_lock);
+	INIT_DELAYED_WORK(&common->state_work, icve_state_machine);
+	init_completion(&common->sync_msg);
 
+	/* Register the network device */
+	ret = icve_init_ndev(common);
+	if (ret)
+		return ret;
 	return 0;
 }
 
 static void icve_rpmsg_remove(struct rpmsg_device *rpdev)
 {
+	struct icve_common *common = dev_get_drvdata(&rpdev->dev);
+	struct icve_port *port = common->port;
+
+	netif_napi_del(&port->rx_napi);
+	del_timer_sync(&port->rx_timer);
 	dev_info(&rpdev->dev, "icve rpmsg client driver is removed\n");
 }
 
diff --git a/drivers/net/ethernet/ti/inter_core_virt_eth.h b/drivers/net/ethernet/ti/inter_core_virt_eth.h
index 91a3aba96996..4fc420cb9eab 100644
--- a/drivers/net/ethernet/ti/inter_core_virt_eth.h
+++ b/drivers/net/ethernet/ti/inter_core_virt_eth.h
@@ -14,14 +14,45 @@
 #include <linux/rpmsg.h>
 #include "icve_rpmsg_common.h"
 
+enum icve_state {
+	ICVE_STATE_PROBE,
+	ICVE_STATE_OPEN,
+	ICVE_STATE_CLOSE,
+	ICVE_STATE_READY,
+	ICVE_STATE_RUNNING,
+
+};
+
 struct icve_port {
+	struct icve_shared_mem *tx_buffer; /* Write buffer for data to be consumed remote side */
+	struct icve_shared_mem *rx_buffer; /* Read buffer for data to be consumed by this driver */
+	struct timer_list rx_timer;
 	struct icve_common *common;
-} __packed;
+	struct napi_struct rx_napi;
+	u8 local_mac_addr[ETH_ALEN];
+	struct net_device *ndev;
+	u32 icve_tx_max_buffers;
+	u32 icve_rx_max_buffers;
+	u32 port_id;
+};
 
 struct icve_common {
 	struct rpmsg_device *rpdev;
+	spinlock_t send_msg_lock; /* Acquire this lock while sending RPMsg */
+	spinlock_t recv_msg_lock; /* Acquire this lock while processing received RPMsg */
+	struct message send_msg;
+	struct message recv_msg;
 	struct icve_port *port;
 	struct device *dev;
-} __packed;
+	enum icve_state	state;
+	struct mutex state_lock; /* Lock to be used while changing the interface state */
+	struct delayed_work state_work;
+	struct completion sync_msg;
+};
+
+struct icve_ndev_priv {
+	struct icve_port *port;
+};
+
 
 #endif /* __INTER_CORE_VIRT_ETH_H__ */
-- 
2.40.1
Re: [PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
Posted by Andrew Lunn 1 year, 8 months ago
> +enum icve_rpmsg_type {
> +	/* Request types */
> +	ICVE_REQ_SHM_INFO = 0,
> +	ICVE_REQ_SET_MAC_ADDR,
> +
> +	/* Response types */
> +	ICVE_RESP_SHM_INFO,
> +	ICVE_RESP_SET_MAC_ADDR,
> +
> +	/* Notification types */
> +	ICVE_NOTIFY_PORT_UP,
> +	ICVE_NOTIFY_PORT_DOWN,
> +	ICVE_NOTIFY_PORT_READY,
> +	ICVE_NOTIFY_REMOTE_READY,
> +};

+struct message_header {
+       u32 src_id;
+       u32 msg_type; /* Do not use enum type, as enum size is compiler dependent */
+} __packed;


Given how you have defined icve_rpmsg_type, what is the point of
message_header.msg_type?

It seems like this would make more sense:

enum icve_rpmsg_request_type {
	ICVE_REQ_SHM_INFO = 0,
	ICVE_REQ_SET_MAC_ADDR,
}

enum icve_rpmsg_response_type {
	ICVE_RESP_SHM_INFO,
	ICVE_RESP_SET_MAC_ADDR,
}
enum icve_rpmsg_notify_type {
	ICVE_NOTIFY_PORT_UP,
	ICVE_NOTIFY_PORT_DOWN,
	ICVE_NOTIFY_PORT_READY,
	ICVE_NOTIFY_REMOTE_READY,
};

Also, why SET_MAC_ADDR? It would be good to document where the MAC
address are coming from. And what address this is setting.

In fact, please put all the protocol documentation into a .rst
file. That will help us discuss the protocol independent of the
implementation. The protocol is an ABI, so needs to be reviewed well.

> +struct icve_shm_info {
> +	/* Total shared memory size */
> +	u32 total_shm_size;
> +	/* Total number of buffers */
> +	u32 num_pkt_bufs;
> +	/* Per buff slot size i.e MTU Size + 4 bytes for magic number + 4 bytes
> +	 * for Pkt len
> +	 */

What is your definition of MTU?

enp2s0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc fq_codel state DOWN mode DEFAULT group default qlen 1000

Typically, MTU does not include the Ethernet header or checksum. Is
that what you mean here?

> +	u32 buff_slot_size;
> +	/* Base Address for Tx or Rx shared memory */
> +	u32 base_addr;
> +} __packed;

What do you mean by address here? Virtual address, physical address,
DMA address? And whos address is this, you have two CPUs here, with no
guaranteed the shared memory is mapped to the same address in both
address spaces.

	Andrew
Re: [PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
Posted by Yojana Mallik 1 year, 8 months ago
Hi Andrew,

On 6/2/24 22:15, Andrew Lunn wrote:
>> +enum icve_rpmsg_type {
>> +	/* Request types */
>> +	ICVE_REQ_SHM_INFO = 0,
>> +	ICVE_REQ_SET_MAC_ADDR,
>> +
>> +	/* Response types */
>> +	ICVE_RESP_SHM_INFO,
>> +	ICVE_RESP_SET_MAC_ADDR,
>> +
>> +	/* Notification types */
>> +	ICVE_NOTIFY_PORT_UP,
>> +	ICVE_NOTIFY_PORT_DOWN,
>> +	ICVE_NOTIFY_PORT_READY,
>> +	ICVE_NOTIFY_REMOTE_READY,
>> +};
> 
> +struct message_header {
> +       u32 src_id;
> +       u32 msg_type; /* Do not use enum type, as enum size is compiler dependent */
> +} __packed;
> 
> 
> Given how you have defined icve_rpmsg_type, what is the point of
> message_header.msg_type?
> 


+	rpmsg_send(common->rpdev->ept, (void *)(&common->send_msg),
+		   sizeof(common->send_msg));

rpmsg_send in icve_create_send_request is sending message_header.msg_type to R5
core using (void *)(&common->send_msg).
In icve_rpmsg_cb function switch case statements for option msg_type are used
and cases are from enum icve_rpmsg_type.

> It seems like this would make more sense:
> 
> enum icve_rpmsg_request_type {
> 	ICVE_REQ_SHM_INFO = 0,
> 	ICVE_REQ_SET_MAC_ADDR,
> }
> 
> enum icve_rpmsg_response_type {
> 	ICVE_RESP_SHM_INFO,
> 	ICVE_RESP_SET_MAC_ADDR,
> }
> enum icve_rpmsg_notify_type {
> 	ICVE_NOTIFY_PORT_UP,
> 	ICVE_NOTIFY_PORT_DOWN,
> 	ICVE_NOTIFY_PORT_READY,
> 	ICVE_NOTIFY_REMOTE_READY,
> };
> 

Since msg_hdr.msg_type which takes value from enum icve_msg_type is being used
in rpmsg_send and icve_rpmsg_cb, I would prefer to continue with the 2 separate
enums, that is enum icve_msg_type and enum icve_rpmsg_type. However I am always
open to make improvements and would be happy to discuss this further if you
have additional insights.

> Also, why SET_MAC_ADDR? It would be good to document where the MAC
> address are coming from. And what address this is setting.
> 

The interface which is controlled by Linux and interacting with the R5 core is
assigned mac address 00:00:00:00:00:00 by default. To ensure reliable
communication and compliance with network standards a different MAC address is
set for this interface using icve_set_mac_address.

> In fact, please put all the protocol documentation into a .rst
> file. That will help us discuss the protocol independent of the
> implementation. The protocol is an ABI, so needs to be reviewed well.
> 

I will do it.

>> +struct icve_shm_info {
>> +	/* Total shared memory size */
>> +	u32 total_shm_size;
>> +	/* Total number of buffers */
>> +	u32 num_pkt_bufs;
>> +	/* Per buff slot size i.e MTU Size + 4 bytes for magic number + 4 bytes
>> +	 * for Pkt len
>> +	 */
> 
> What is your definition of MTU?
> 
> enp2s0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc fq_codel state DOWN mode DEFAULT group default qlen 1000
> 
> Typically, MTU does not include the Ethernet header or checksum. Is
> that what you mean here?
> 

MTU is only the Payload. I have realized the macro names are misleading and I
will change them as
#define ICVE_PACKET_BUFFER_SIZE   1540
#define MAX_MTU   ICVE_PACKET_BUFFER_SIZE - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
Hence value of max mtu is 1518 bytes.

>> +	u32 buff_slot_size;
>> +	/* Base Address for Tx or Rx shared memory */
>> +	u32 base_addr;
>> +} __packed;
> 
> What do you mean by address here? Virtual address, physical address,
> DMA address? And whos address is this, you have two CPUs here, with no
> guaranteed the shared memory is mapped to the same address in both
> address spaces.
> 
> 	Andrew

The address referred above is physical address. It is the address of Tx and Rx
buffer under the control of Linux operating over A53 core. The check if the
shared memory is mapped to the same address in both address spaces is checked
by the R5 core.

Thanks for the feedback.
Regards
Yojana Mallik
Re: [PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
Posted by Andrew Lunn 1 year, 8 months ago
> > Also, why SET_MAC_ADDR? It would be good to document where the MAC
> > address are coming from. And what address this is setting.
> > 
> 
> The interface which is controlled by Linux and interacting with the R5 core is
> assigned mac address 00:00:00:00:00:00 by default. To ensure reliable
> communication and compliance with network standards a different MAC address is
> set for this interface using icve_set_mac_address.

So this is the peer telling the Linux machine what MAC address to use?
As i said, it is not clear what direction this message is flowing. Or
even if it can be used the other way around. Can Linux tell the peer
what address it should use?

Also, what is the big picture here. Is this purely a point to point
link? There is no intention that one or both ends could bridge packets
to another network? Does this link always carrier IP? If so, why do
you need the Ethernet header? Why not just do the same as SLIP, PPP,
other point to point network protocols.

	Andrew
Re: [PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
Posted by Yojana Mallik 1 year, 8 months ago

On 6/4/24 18:30, Andrew Lunn wrote:
>>> Also, why SET_MAC_ADDR? It would be good to document where the MAC
>>> address are coming from. And what address this is setting.
>>>
>>
>> The interface which is controlled by Linux and interacting with the R5 core is
>> assigned mac address 00:00:00:00:00:00 by default. To ensure reliable
>> communication and compliance with network standards a different MAC address is
>> set for this interface using icve_set_mac_address.
> 
> So this is the peer telling the Linux machine what MAC address to use?
> As i said, it is not clear what direction this message is flowing. Or
> even if it can be used the other way around. Can Linux tell the peer
> what address it should use?
> 

No, RTOS the peer is not telling the Linux machine what MAC address to use. The
user can add a unicast mac address to the virtual network interface using the
following steps:

Steps to add MAC Address
# Bring down the virtual port interface
$ ifconfig eth1 down
# Set MAC address for the virtual port interface, ex 01:02:03:04:05:06
$ ifconfig eth1 hw ether 01:02:03:04:05:06
# Bring the interface up
$ ifconfig eth1 up

These commands will call the net_device_ops
.ndo_stop = icve_ndo_stop
.ndo_set_mac_address = icve_set_mac_address
.ndo_open = icve_ndo_open,

While adidng the mac address, ndo ops will call set_mac_address which will call
create_send_request which will create a request with command add mac address
and send a rpmsg to R5 core.


> Also, what is the big picture here. Is this purely a point to point
> link? There is no intention that one or both ends could bridge packets
> to another network? Does this link always carrier IP? If so, why do
> you need the Ethernet header? Why not just do the same as SLIP, PPP,
> other point to point network protocols.
> 
> 	Andrew

We want the shared memory to be accessed by 2 cores only. So the inter-core
virtual ethernet driver is indeed a point to point link. There is a bridging
application in R5 core. If there is a cluster of cores like multiple A53 cores
and R5 cores, and one A53 core has to send packets to another A53 core then the
A53 core can send packets to R5 core using a icve a virtual driver and R5 core
can transmit these packets to the other A53 core using another instance of icve
virtual driver. Multiple instances of virtual driver are used by the bridging
application but the virtual driver by itself is a point to point link.

The goal of the inter-core virtual ethernet driver is network traffic
tunneling between heterogeneous processors. R5 core will send ethernet packets
to Linux because R5 core wants Linux to process the ethernet packets further.
It is intended that R5 core should use resources for other processes and A53
core should use resources to take actions on the Ethernet packet. How A53 core
handles the ethernet packets depends on the ethernet header. Hence it is
necessary for the A53 core to receive ethernet header.

Regards,
Yojana Mallik
Re: [PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
Posted by Andrew Lunn 1 year, 8 months ago
> No, RTOS the peer is not telling the Linux machine what MAC address to use. The
> user can add a unicast mac address to the virtual network interface using the
> following steps:
> 
> Steps to add MAC Address
> # Bring down the virtual port interface
> $ ifconfig eth1 down
> # Set MAC address for the virtual port interface, ex 01:02:03:04:05:06
> $ ifconfig eth1 hw ether 01:02:03:04:05:06
> # Bring the interface up
> $ ifconfig eth1 up

ifconfig is long deprecated, replaced by iproute2. Please don't use it
for anything modern.

> While adidng the mac address, ndo ops will call set_mac_address which will call
> create_send_request which will create a request with command add mac address
> and send a rpmsg to R5 core.

The protocol documentation should be at a level that your can give it
to somebody else and they can implement it, e.g. for a different
OS. So please make the documentation clearer, what direction are these
messages flowing. And make the protocol description OS agnostic.

	Andrew
Re: [PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
Posted by Andrew Lunn 1 year, 8 months ago
> >> +	u32 buff_slot_size;
> >> +	/* Base Address for Tx or Rx shared memory */
> >> +	u32 base_addr;
> >> +} __packed;
> > 
> > What do you mean by address here? Virtual address, physical address,
> > DMA address? And whos address is this, you have two CPUs here, with no
> > guaranteed the shared memory is mapped to the same address in both
> > address spaces.
> > 
> > 	Andrew
> 
> The address referred above is physical address. It is the address of Tx and Rx
> buffer under the control of Linux operating over A53 core. The check if the
> shared memory is mapped to the same address in both address spaces is checked
> by the R5 core.

u32 is too small for a physical address. I'm sure there are systems
with more than 4G of address space. Also, i would not assume both CPUs
map the memory to the same physical address.

	Andrew
Re: [PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
Posted by Yojana Mallik 1 year, 8 months ago

On 6/4/24 18:24, Andrew Lunn wrote:
>>>> +	u32 buff_slot_size;
>>>> +	/* Base Address for Tx or Rx shared memory */
>>>> +	u32 base_addr;
>>>> +} __packed;
>>>
>>> What do you mean by address here? Virtual address, physical address,
>>> DMA address? And whos address is this, you have two CPUs here, with no
>>> guaranteed the shared memory is mapped to the same address in both
>>> address spaces.
>>>
>>> 	Andrew
>>
>> The address referred above is physical address. It is the address of Tx and Rx
>> buffer under the control of Linux operating over A53 core. The check if the
>> shared memory is mapped to the same address in both address spaces is checked
>> by the R5 core.
> 
> u32 is too small for a physical address. I'm sure there are systems
> with more than 4G of address space. Also, i would not assume both CPUs
> map the memory to the same physical address.
> 
> 	Andrew

The shared memory address space in AM64x board is 2G and u32 data type for
address works to use this address space. In order to make the driver generic,to
work with systems that have more than 4G address space, we can change the base
addr data type to u64 in the virtual driver code and the corresponding
necessary changes have to be made in the firmware.

During handshake between Linux and remote core, the remote core advertises Tx
and Rx shared memory info to Linux using rpmsg framework. Linux retrieves the
info related to shared memory from the response received using icve_rpmsg_cb
function.

+		case ICVE_RESP_SHM_INFO:
+			/* Retrieve Tx and Rx shared memory info from msg */
+			port->tx_buffer->head =
+				ioremap(msg->resp_msg.shm_info.shm_info_tx.base_addr,
+					sizeof(*port->tx_buffer->head));
+
+			port->tx_buffer->buf->base_addr =
+				ioremap((msg->resp_msg.shm_info.shm_info_tx.base_addr +
+					sizeof(*port->tx_buffer->head)),
+					(msg->resp_msg.shm_info.shm_info_tx.num_pkt_bufs *
+					 msg->resp_msg.shm_info.shm_info_tx.buff_slot_size));
+
+			port->tx_buffer->tail =
+				ioremap(msg->resp_msg.shm_info.shm_info_tx.base_addr +
+					sizeof(*port->tx_buffer->head) +
+					(msg->resp_msg.shm_info.shm_info_tx.num_pkt_bufs *
+					msg->resp_msg.shm_info.shm_info_tx.buff_slot_size),
+					sizeof(*port->tx_buffer->tail));
+
+

	
The shared memory layout is modeled as circular buffer.
/*      Shared Memory Layout
 *
 *	---------------------------	*****************
 *	|        MAGIC_NUM        |	 icve_shm_head
 *	|          HEAD           |
 *	---------------------------	*****************
 *	|        MAGIC_NUM        |
 *	|        PKT_1_LEN        |
 *	|          PKT_1          |
 *	---------------------------
 *	|        MAGIC_NUM        |
 *	|        PKT_2_LEN        |	 icve_shm_buf
 *	|          PKT_2          |
 *	---------------------------
 *	|           .             |
 *	|           .             |
 *	---------------------------
 *	|        MAGIC_NUM        |
 *	|        PKT_N_LEN        |
 *	|          PKT_N          |
 *	---------------------------	****************
 *	|        MAGIC_NUM        |      icve_shm_tail
 *	|          TAIL           |
 *	---------------------------	****************
 */

Linux retrieves the following info provided in response by R5 core:

Tx buffer head address which is stored in port->tx_buffer->head

Tx buffer buffer's base address which is stored in port->tx_buffer->buf->base_addr

Tx buffer tail address which is stored in port->tx_buffer->tail

The number of packets that can be put into Tx buffer which is stored in
port->icve_tx_max_buffers

Rx buffer head address which is stored in port->rx_buffer->head

Rx buffer buffer's base address which is stored in port->rx_buffer->buf->base_addr

Rx buffer tail address which is stored in port->rx_buffer->tail

The number of packets that are put into Rx buffer which is stored in
port->icve_rx_max_buffers

Linux trusts these addresses sent by the R5 core to send or receive ethernet
packets. By this way both the CPUs map to the same physical address.

Regards,
Yojana Mallik
Re: [PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
Posted by Andrew Lunn 1 year, 8 months ago
> The shared memory address space in AM64x board is 2G and u32 data type for
> address works to use this address space. In order to make the driver generic,to
> work with systems that have more than 4G address space, we can change the base
> addr data type to u64 in the virtual driver code and the corresponding
> necessary changes have to be made in the firmware.

You probably need to think about this concept in a more generic
way. You have a block of memory which is physically shared between two
CPUs. Does each have its own MMU involved in accesses this memory? Why
would each see the memory at the same physical address? Why does one
CPU actually know anything about the memory layout of another CPU, and
can tell it how to use its own memory? Do not think about your AM64x
board when answering these questions. Think about an abstract system,
two CPUs with a block of shared memory. Maybe it is even a CPU and a
GPU with shared memory, etc. 

> The shared memory layout is modeled as circular buffer.
> /*      Shared Memory Layout
>  *
>  *	---------------------------	*****************
>  *	|        MAGIC_NUM        |	 icve_shm_head
>  *	|          HEAD           |
>  *	---------------------------	*****************
>  *	|        MAGIC_NUM        |
>  *	|        PKT_1_LEN        |
>  *	|          PKT_1          |
>  *	---------------------------
>  *	|        MAGIC_NUM        |
>  *	|        PKT_2_LEN        |	 icve_shm_buf
>  *	|          PKT_2          |
>  *	---------------------------
>  *	|           .             |
>  *	|           .             |
>  *	---------------------------
>  *	|        MAGIC_NUM        |
>  *	|        PKT_N_LEN        |
>  *	|          PKT_N          |
>  *	---------------------------	****************
>  *	|        MAGIC_NUM        |      icve_shm_tail
>  *	|          TAIL           |
>  *	---------------------------	****************
>  */
> 
> Linux retrieves the following info provided in response by R5 core:
> 
> Tx buffer head address which is stored in port->tx_buffer->head
> 
> Tx buffer buffer's base address which is stored in port->tx_buffer->buf->base_addr
> 
> Tx buffer tail address which is stored in port->tx_buffer->tail
> 
> The number of packets that can be put into Tx buffer which is stored in
> port->icve_tx_max_buffers
> 
> Rx buffer head address which is stored in port->rx_buffer->head
> 
> Rx buffer buffer's base address which is stored in port->rx_buffer->buf->base_addr
> 
> Rx buffer tail address which is stored in port->rx_buffer->tail
> 
> The number of packets that are put into Rx buffer which is stored in
> port->icve_rx_max_buffers

I think most of these should not be pointers, but offsets from the
base of the shared memory. It then does not matter if they are mapped
at different physical addresses on each CPU.

> Linux trusts these addresses sent by the R5 core to send or receive ethernet
> packets. By this way both the CPUs map to the same physical address.

I'm not sure Linux should trust the R5. For a generic implementation,
the trust should be held to a minimum. There needs to be an agreement
about how the shared memory is partitioned, but each end needs to
verify that the memory is in fact valid, that none of the data
structures point outside of the shared memory etc. Otherwise one
system can cause memory corruption on the other, and that sort of bug
is going to be very hard to debug.

	Andrew
Re: [PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
Posted by Yojana Mallik 1 year, 8 months ago

On 6/12/24 20:29, Andrew Lunn wrote:
>> The shared memory address space in AM64x board is 2G and u32 data type for
>> address works to use this address space. In order to make the driver generic,to
>> work with systems that have more than 4G address space, we can change the base
>> addr data type to u64 in the virtual driver code and the corresponding
>> necessary changes have to be made in the firmware.
> 
> You probably need to think about this concept in a more generic
> way. You have a block of memory which is physically shared between two
> CPUs. Does each have its own MMU involved in accesses this memory? Why
> would each see the memory at the same physical address? Why does one
> CPU actually know anything about the memory layout of another CPU, and
> can tell it how to use its own memory? Do not think about your AM64x
> board when answering these questions. Think about an abstract system,
> two CPUs with a block of shared memory. Maybe it is even a CPU and a
> GPU with shared memory, etc. 
> 
>> The shared memory layout is modeled as circular buffer.
>> /*      Shared Memory Layout
>>  *
>>  *	---------------------------	*****************
>>  *	|        MAGIC_NUM        |	 icve_shm_head
>>  *	|          HEAD           |
>>  *	---------------------------	*****************
>>  *	|        MAGIC_NUM        |
>>  *	|        PKT_1_LEN        |
>>  *	|          PKT_1          |
>>  *	---------------------------
>>  *	|        MAGIC_NUM        |
>>  *	|        PKT_2_LEN        |	 icve_shm_buf
>>  *	|          PKT_2          |
>>  *	---------------------------
>>  *	|           .             |
>>  *	|           .             |
>>  *	---------------------------
>>  *	|        MAGIC_NUM        |
>>  *	|        PKT_N_LEN        |
>>  *	|          PKT_N          |
>>  *	---------------------------	****************
>>  *	|        MAGIC_NUM        |      icve_shm_tail
>>  *	|          TAIL           |
>>  *	---------------------------	****************
>>  */
>>
>> Linux retrieves the following info provided in response by R5 core:
>>
>> Tx buffer head address which is stored in port->tx_buffer->head
>>
>> Tx buffer buffer's base address which is stored in port->tx_buffer->buf->base_addr
>>
>> Tx buffer tail address which is stored in port->tx_buffer->tail
>>
>> The number of packets that can be put into Tx buffer which is stored in
>> port->icve_tx_max_buffers
>>
>> Rx buffer head address which is stored in port->rx_buffer->head
>>
>> Rx buffer buffer's base address which is stored in port->rx_buffer->buf->base_addr
>>
>> Rx buffer tail address which is stored in port->rx_buffer->tail
>>
>> The number of packets that are put into Rx buffer which is stored in
>> port->icve_rx_max_buffers
> 
> I think most of these should not be pointers, but offsets from the
> base of the shared memory. It then does not matter if they are mapped
> at different physical addresses on each CPU.
> 
>> Linux trusts these addresses sent by the R5 core to send or receive ethernet
>> packets. By this way both the CPUs map to the same physical address.
> 
> I'm not sure Linux should trust the R5. For a generic implementation,
> the trust should be held to a minimum. There needs to be an agreement
> about how the shared memory is partitioned, but each end needs to
> verify that the memory is in fact valid, that none of the data
> structures point outside of the shared memory etc. Otherwise one
> system can cause memory corruption on the other, and that sort of bug
> is going to be very hard to debug.
> 
> 	Andrew
> 

The Linux Remoteproc driver which initializes remote processor cores carves out
a section from DDR memory as reserved memory for each remote processor on the
SOC. This memory region has been reserved in the Linux device tree file as
reserved-memory. Out of this reserved memory for R5 core some memory is
reserved for shared memory.

The shared memory is divided into two distinct regions:
one for the A53 -> R5 data path (Tx buffer for Linux), and other for R5 -> A53
data path (Rx buffer for Linux).

Four entities total shared memory size, number of packets, buffer slot size and
base address of buffer has been hardcoded into the firmware code for both the
Tx and Rx buffer. These four entities are informed by the R5 core and Linux
retrieves these info from message received using icve_rpmsg_cb.

Using the Base Address for Tx or Rx shared memory received and the value of
number of packets, buffer slot size received, buffer's head address, shared
memory buffer buffer's base address and tail address is calculated in the driver.

Linux driver uses ioremap function to translate these physical addresses
calculated into virtual address. Linux uses these virtual addresses to send
packets to remote cores using icve_start_xmit function.

It has been agreed upon by design that the remote core will use a particular
start address of buffer and Linux will also use it, and it has been harcoded in
the firmware in the remote core. Since this address has been harcoded in the
firmware, it can be hardcoded in the Linux driver code and then a check can be
made if the address received from remote core matches with the address
hardcoded in the Linux driver.

But viewing the driver from a generic perspective, the driver can interact with
different firmware whose start address for the shared memory region will not
match the one hardcoded into the Linux driver.

This is why it has been decided to hardcode the start address in the firmware
and it will be sent by the remote core to Linux and Linux will use it.

Kindly suggest in what other ways can the driver get to know the start address
of the shared memory if not informed by the remote core. Also how to do a check
if the address is valid.

Thanks and regards,
Yojana Mallik
Re: [PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
Posted by Andrew Lunn 1 year, 8 months ago
> The Linux Remoteproc driver which initializes remote processor cores carves out
> a section from DDR memory as reserved memory for each remote processor on the
> SOC. This memory region has been reserved in the Linux device tree file as
> reserved-memory. Out of this reserved memory for R5 core some memory is
> reserved for shared memory.

I don't know much about rpmsg, so i read the documentation:

https://docs.kernel.org/staging/rpmsg.html

There is no mention of carving out a region of DDR memory. It says
nothing about memory reserved in DT.

The API is pretty much: Please send these bytes to the peer. Call this
callback when bytes have been received from a peer.

> The shared memory is divided into two distinct regions:
> one for the A53 -> R5 data path (Tx buffer for Linux), and other for R5 -> A53
> data path (Rx buffer for Linux).

As i said in my previous message. Forget about the A54->R5. Think
about a generic architecture. You have an RPMSG facility using the API
described in the documentation. You have a block of shared
memory. That memory could be VRAM, it could be a dual port SRAM, a PCI
BAR region, or some DDR which both CPU have mapped into their address
space. Design a protocol around that. This might mean you need to
modify your firmware. It might mean you need to throw away your
firmware and start again. But for mainline, we want something generic
which any vendor can use with a wide range of hardware, with as few
assumptions as possible.

	Andrew
Re: [PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
Posted by Andrew Lunn 1 year, 8 months ago
> As i said in my previous message. Forget about the A54->R5. Think
> about a generic architecture. You have an RPMSG facility using the API
> described in the documentation. You have a block of shared
> memory. That memory could be VRAM, it could be a dual port SRAM, a PCI
> BAR region, or some DDR which both CPU have mapped into their address
> space. Design a protocol around that. This might mean you need to
> modify your firmware. It might mean you need to throw away your
> firmware and start again. But for mainline, we want something generic
> which any vendor can use with a wide range of hardware, with as few
> assumptions as possible.

Just adding to my own email. You might want to split the overall
problem into two. It could be, you have 95% of the code in a generic
driver framework. You instantiate it by calling something like:

void *cookie rpmsg_ethernet_shared_mem_create(struct rpmsg_endpoint *ept,
					      void __iomem *shared_memory,
					      size_t size);

The cookie is then used with

int rpmsg_ethernet_cb(cookie, void *data, int len);

which gets called from the rpmsg .callback function.

You then have a vendor specific part which is responsible for creating
the rpmsg endpoint, finding the shared memory, and mapping it into the
address space ready for use.

All data structures inside the shared memory need to be position
independent, since there is no guaranteed the CPUs have it mapped to
the same address, or even have the same size addresses. The easy way
to do that is specify everything as offsets to the base of the memory,
making it easy to validate the offset is actually within the shared
memory.

As i said, i've not used rpmsg, so this might in practice need to be
different. But the basic idea should be O.K, a vendor specific chunk
of code to do the basic setup, and then hand over the memory and RPMSG
endpoint to generic code which does all the real work.

	 Andrew
Re: [PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
Posted by Siddharth Vadapalli 1 year, 8 months ago
On Fri, May 31, 2024 at 12:10:05PM +0530, Yojana Mallik wrote:
> Register the RPMsg driver as network device and add support for
> basic ethernet functionality by using the shared memory for data
> plane.
> 
> The shared memory layout is as below, with the region between
> PKT_1_LEN to PKT_N modelled as circular buffer.
> 
> -------------------------
> |          HEAD         |
> -------------------------
> |          TAIL         |
> -------------------------
> |       PKT_1_LEN       |
> |         PKT_1         |
> -------------------------
> |       PKT_2_LEN       |
> |         PKT_2         |
> -------------------------
> |           .           |
> |           .           |
> -------------------------
> |       PKT_N_LEN       |
> |         PKT_N         |
> -------------------------
> 
> The offset between the HEAD and TAIL is polled to process the Rx packets.
> 
> Signed-off-by: Yojana Mallik <y-mallik@ti.com>
> ---
>  drivers/net/ethernet/ti/icve_rpmsg_common.h   |  86 ++++
>  drivers/net/ethernet/ti/inter_core_virt_eth.c | 453 +++++++++++++++++-
>  drivers/net/ethernet/ti/inter_core_virt_eth.h |  35 +-
>  3 files changed, 570 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/net/ethernet/ti/icve_rpmsg_common.h b/drivers/net/ethernet/ti/icve_rpmsg_common.h
> index 7cd157479d4d..2e3833de14bd 100644
> --- a/drivers/net/ethernet/ti/icve_rpmsg_common.h
> +++ b/drivers/net/ethernet/ti/icve_rpmsg_common.h
> @@ -15,14 +15,58 @@ enum icve_msg_type {
>  	ICVE_NOTIFY_MSG,
>  };

[...]

>  
>  #endif /* __ICVE_RPMSG_COMMON_H__ */
> diff --git a/drivers/net/ethernet/ti/inter_core_virt_eth.c b/drivers/net/ethernet/ti/inter_core_virt_eth.c
> index bea822d2373a..d96547d317fe 100644
> --- a/drivers/net/ethernet/ti/inter_core_virt_eth.c
> +++ b/drivers/net/ethernet/ti/inter_core_virt_eth.c
> @@ -6,11 +6,145 @@
>  
>  #include "inter_core_virt_eth.h"

[...]

>  
> +static int create_request(struct icve_common *common,
> +			  enum icve_rpmsg_type rpmsg_type)
> +{
> +	struct message *msg = &common->send_msg;
> +	int ret = 0;
> +
> +	msg->msg_hdr.src_id = common->port->port_id;
> +	msg->req_msg.type = rpmsg_type;
> +
> +	switch (rpmsg_type) {
> +	case ICVE_REQ_SHM_INFO:
> +		msg->msg_hdr.msg_type = ICVE_REQUEST_MSG;
> +		break;
> +	case ICVE_REQ_SET_MAC_ADDR:
> +		msg->msg_hdr.msg_type = ICVE_REQUEST_MSG;
> +		ether_addr_copy(msg->req_msg.mac_addr.addr,
> +				common->port->ndev->dev_addr);
> +		break;
> +	case ICVE_NOTIFY_PORT_UP:
> +	case ICVE_NOTIFY_PORT_DOWN:
> +		msg->msg_hdr.msg_type = ICVE_NOTIFY_MSG;
> +		break;
> +	default:
> +		ret = -EINVAL;
> +		dev_err(common->dev, "Invalid RPMSG request\n");
> +	};
> +	return ret;
> +}
> +
> +static int icve_create_send_request(struct icve_common *common,
> +				    enum icve_rpmsg_type rpmsg_type,
> +				    bool wait)
> +{
> +	unsigned long flags;
> +	int ret;
> +
> +	if (wait)
> +		reinit_completion(&common->sync_msg);
> +
> +	spin_lock_irqsave(&common->send_msg_lock, flags);
> +	create_request(common, rpmsg_type);

Why isn't the return value of create_request() being checked?
If it is guaranteed to always return 0 based on the design, convert it
to a void function.

> +	rpmsg_send(common->rpdev->ept, (void *)(&common->send_msg),
> +		   sizeof(common->send_msg));
> +	spin_unlock_irqrestore(&common->send_msg_lock, flags);
> +
> +	if (wait) {
> +		ret = wait_for_completion_timeout(&common->sync_msg,
> +						  ICVE_REQ_TIMEOUT);
> +
> +		if (!ret) {
> +			dev_err(common->dev, "Failed to receive response within %ld jiffies\n",
> +				ICVE_REQ_TIMEOUT);
> +			ret = -ETIMEDOUT;
> +			return ret;
> +		}
> +	}
> +	return ret;
> +}
> +
> +static void icve_state_machine(struct work_struct *work)
> +{
> +	struct delayed_work *dwork = to_delayed_work(work);
> +	struct icve_common *common;
> +	struct icve_port *port;
> +
> +	common = container_of(dwork, struct icve_common, state_work);
> +	port = common->port;
> +
> +	mutex_lock(&common->state_lock);
> +
> +	switch (common->state) {
> +	case ICVE_STATE_PROBE:
> +		break;
> +	case ICVE_STATE_OPEN:
> +		icve_create_send_request(common, ICVE_REQ_SHM_INFO, false);

The return value of icve_create_send_request() is not being checked. Is
it guaranteed to succeed? Where is the error handling path if
icve_create_send_request() fails?

> +		break;
> +	case ICVE_STATE_CLOSE:
> +		break;
> +	case ICVE_STATE_READY:
> +		icve_create_send_request(common, ICVE_REQ_SET_MAC_ADDR, false);

Same here and at all other places where icve_create_send_request() is
being invoked. The icve_create_send_request() seems to be newly added in
this version of the series and wasn't there in the RFC patch. This should
be mentioned in the Changelog.

[...]

Regards,
Siddharth.
Re: [PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
Posted by Siddharth Vadapalli 1 year, 8 months ago
On Fri, May 31, 2024 at 12:10:05PM +0530, Yojana Mallik wrote:
> Register the RPMsg driver as network device and add support for
> basic ethernet functionality by using the shared memory for data
> plane.
> 
> The shared memory layout is as below, with the region between
> PKT_1_LEN to PKT_N modelled as circular buffer.
> 
> -------------------------
> |          HEAD         |
> -------------------------
> |          TAIL         |
> -------------------------
> |       PKT_1_LEN       |
> |         PKT_1         |
> -------------------------
> |       PKT_2_LEN       |
> |         PKT_2         |
> -------------------------
> |           .           |
> |           .           |
> -------------------------
> |       PKT_N_LEN       |
> |         PKT_N         |
> -------------------------
> 
> The offset between the HEAD and TAIL is polled to process the Rx packets.

The author of this patch from the RFC series is:
Ravi Gunasekaran <r-gunasekaran@ti.com>
The authorship should be preserved across versions unless the
implementation has changed drastically requiring major rework
(which doesn't seem to be the case for this patch based on the
Changelog).

> 
> Signed-off-by: Yojana Mallik <y-mallik@ti.com>
> ---
>  drivers/net/ethernet/ti/icve_rpmsg_common.h   |  86 ++++
>  drivers/net/ethernet/ti/inter_core_virt_eth.c | 453 +++++++++++++++++-
>  drivers/net/ethernet/ti/inter_core_virt_eth.h |  35 +-
>  3 files changed, 570 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/net/ethernet/ti/icve_rpmsg_common.h b/drivers/net/ethernet/ti/icve_rpmsg_common.h
> index 7cd157479d4d..2e3833de14bd 100644
> --- a/drivers/net/ethernet/ti/icve_rpmsg_common.h
> +++ b/drivers/net/ethernet/ti/icve_rpmsg_common.h
> @@ -15,14 +15,58 @@ enum icve_msg_type {
>  	ICVE_NOTIFY_MSG,
>  };

[...]

>  
> +
>  #endif /* __ICVE_RPMSG_COMMON_H__ */
> diff --git a/drivers/net/ethernet/ti/inter_core_virt_eth.c b/drivers/net/ethernet/ti/inter_core_virt_eth.c
> index bea822d2373a..d96547d317fe 100644
> --- a/drivers/net/ethernet/ti/inter_core_virt_eth.c
> +++ b/drivers/net/ethernet/ti/inter_core_virt_eth.c
> @@ -6,11 +6,145 @@
>  
>  #include "inter_core_virt_eth.h"
>  
> +#define ICVE_MIN_PACKET_SIZE ETH_ZLEN
> +#define ICVE_MAX_PACKET_SIZE 1540 //(ETH_FRAME_LEN + ETH_FCS_LEN)

Is the commented portion above required?

> +#define ICVE_MAX_TX_QUEUES 1
> +#define ICVE_MAX_RX_QUEUES 1
> +
> +#define PKT_LEN_SIZE_TYPE sizeof(u32)
> +#define MAGIC_NUM_SIZE_TYPE sizeof(u32)
> +
> +/* 4 bytes to hold packet length and ICVE_MAX_PACKET_SIZE to hold packet */
> +#define ICVE_BUFFER_SIZE \
> +	(ICVE_MAX_PACKET_SIZE + PKT_LEN_SIZE_TYPE + MAGIC_NUM_SIZE_TYPE)
> +
> +#define RX_POLL_TIMEOUT 1000 /* 1000usec */

The macro name could be updated to contain "USEC" to make it clear that
the units are in microseconds. Same comment applies to other macros
below where they can be named to contain the units.

> +#define RX_POLL_JIFFIES (jiffies + usecs_to_jiffies(RX_POLL_TIMEOUT))
> +
> +#define STATE_MACHINE_TIME msecs_to_jiffies(100)
> +#define ICVE_REQ_TIMEOUT msecs_to_jiffies(100)
> +
> +#define icve_ndev_to_priv(ndev) ((struct icve_ndev_priv *)netdev_priv(ndev))
> +#define icve_ndev_to_port(ndev) (icve_ndev_to_priv(ndev)->port)

[...]

>  	u32 msg_type = msg->msg_hdr.msg_type;
>  	u32 rpmsg_type;
>  
> @@ -24,11 +158,79 @@ static int icve_rpmsg_cb(struct rpmsg_device *rpdev, void *data, int len,
>  		rpmsg_type = msg->resp_msg.type;
>  		dev_dbg(common->dev, "Msg type = %d; RPMsg type = %d\n",
>  			msg_type, rpmsg_type);
> +		switch (rpmsg_type) {
> +		case ICVE_RESP_SHM_INFO:
> +			/* Retrieve Tx and Rx shared memory info from msg */
> +			port->tx_buffer->head =

[...]

> +					sizeof(*port->rx_buffer->tail));
> +
> +			port->icve_rx_max_buffers =
> +				msg->resp_msg.shm_info.shm_info_rx.num_pkt_bufs;
> +
> +			mutex_lock(&common->state_lock);
> +			common->state = ICVE_STATE_READY;
> +			mutex_unlock(&common->state_lock);
> +
> +			mod_delayed_work(system_wq,
> +					 &common->state_work,
> +					 STATE_MACHINE_TIME);
> +
> +			break;
> +		case ICVE_RESP_SET_MAC_ADDR:
> +			break;
> +		}
> +
>  		break;
> +
>  	case ICVE_NOTIFY_MSG:
>  		rpmsg_type = msg->notify_msg.type;
> -		dev_dbg(common->dev, "Msg type = %d; RPMsg type = %d\n",
> -			msg_type, rpmsg_type);

Why does the debug message above have to be deleted? If it was not
required, it could have been omitted in the previous patch itself,
rather than adding it in the previous patch and removing it here.

> +		switch (rpmsg_type) {
> +		case ICVE_NOTIFY_REMOTE_READY:
> +			mutex_lock(&common->state_lock);
> +			common->state = ICVE_STATE_RUNNING;
> +			mutex_unlock(&common->state_lock);
> +
> +			mod_delayed_work(system_wq,
> +					 &common->state_work,
> +					 STATE_MACHINE_TIME);
> +			break;
> +		case ICVE_NOTIFY_PORT_UP:
> +		case ICVE_NOTIFY_PORT_DOWN:
> +			break;
> +		}
>  		break;
>  	default:

[...]

>  }
>  
> diff --git a/drivers/net/ethernet/ti/inter_core_virt_eth.h b/drivers/net/ethernet/ti/inter_core_virt_eth.h
> index 91a3aba96996..4fc420cb9eab 100644
> --- a/drivers/net/ethernet/ti/inter_core_virt_eth.h
> +++ b/drivers/net/ethernet/ti/inter_core_virt_eth.h
> @@ -14,14 +14,45 @@
>  #include <linux/rpmsg.h>
>  #include "icve_rpmsg_common.h"
>  
> +enum icve_state {
> +	ICVE_STATE_PROBE,
> +	ICVE_STATE_OPEN,
> +	ICVE_STATE_CLOSE,
> +	ICVE_STATE_READY,
> +	ICVE_STATE_RUNNING,
> +
> +};
> +
>  struct icve_port {
> +	struct icve_shared_mem *tx_buffer; /* Write buffer for data to be consumed remote side */
> +	struct icve_shared_mem *rx_buffer; /* Read buffer for data to be consumed by this driver */
> +	struct timer_list rx_timer;
>  	struct icve_common *common;
> -} __packed;

Is the "__packed" attribute no longer required, or was it overlooked?

> +	struct napi_struct rx_napi;
> +	u8 local_mac_addr[ETH_ALEN];
> +	struct net_device *ndev;
> +	u32 icve_tx_max_buffers;
> +	u32 icve_rx_max_buffers;
> +	u32 port_id;
> +};
>  
>  struct icve_common {
>  	struct rpmsg_device *rpdev;
> +	spinlock_t send_msg_lock; /* Acquire this lock while sending RPMsg */
> +	spinlock_t recv_msg_lock; /* Acquire this lock while processing received RPMsg */
> +	struct message send_msg;
> +	struct message recv_msg;
>  	struct icve_port *port;
>  	struct device *dev;
> -} __packed;

Same comment here as well.

[...]

There seem to be a lot of changes compared to the RFC patch which
haven't been mentioned in the Changelog. Please mention all the changes
when posting new versions.

Regards,
Siddharth.
Re: [PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
Posted by Andrew Lunn 1 year, 8 months ago
> > +#define ICVE_MIN_PACKET_SIZE ETH_ZLEN
> > +#define ICVE_MAX_PACKET_SIZE 1540 //(ETH_FRAME_LEN + ETH_FCS_LEN)
> 
> Is the commented portion above required?

I would actually say the comment part is better, since it gives an
idea where the number comes from. However, 1514 + 4 != 1540. So there
is something missing here.

> >  struct icve_port {
> > +	struct icve_shared_mem *tx_buffer; /* Write buffer for data to be consumed remote side */
> > +	struct icve_shared_mem *rx_buffer; /* Read buffer for data to be consumed by this driver */
> > +	struct timer_list rx_timer;
> >  	struct icve_common *common;
> > -} __packed;
> 
> Is the "__packed" attribute no longer required, or was it overlooked?

Why is packed even needed? This is not a message structure to be
passed over the RPC is it?

I think this is the second time code has been added in one patch, and
then removed in the next. That is bad practice and suggests the
overall code quality is not good. Please do some internal reviews.

	Andrew
Re: [PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
Posted by kernel test robot 1 year, 8 months ago
Hi Yojana,

kernel test robot noticed the following build warnings:

[auto build test WARNING on net-next/main]

url:    https://github.com/intel-lab-lkp/linux/commits/Yojana-Mallik/net-ethernet-ti-RPMsg-based-shared-memory-ethernet-driver/20240531-144258
base:   net-next/main
patch link:    https://lore.kernel.org/r/20240531064006.1223417-3-y-mallik%40ti.com
patch subject: [PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
config: x86_64-allyesconfig (https://download.01.org/0day-ci/archive/20240601/202406011038.AwLZhQpy-lkp@intel.com/config)
compiler: clang version 18.1.5 (https://github.com/llvm/llvm-project 617a15a9eac96088ae5e9134248d8236e34b91b1)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240601/202406011038.AwLZhQpy-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202406011038.AwLZhQpy-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> drivers/net/ethernet/ti/inter_core_virt_eth.c:76:6: warning: variable 'ret' is used uninitialized whenever 'if' condition is false [-Wsometimes-uninitialized]
      76 |         if (wait) {
         |             ^~~~
   drivers/net/ethernet/ti/inter_core_virt_eth.c:87:9: note: uninitialized use occurs here
      87 |         return ret;
         |                ^~~
   drivers/net/ethernet/ti/inter_core_virt_eth.c:76:2: note: remove the 'if' if its condition is always true
      76 |         if (wait) {
         |         ^~~~~~~~~
   drivers/net/ethernet/ti/inter_core_virt_eth.c:65:9: note: initialize the variable 'ret' to silence this warning
      65 |         int ret;
         |                ^
         |                 = 0
   drivers/net/ethernet/ti/inter_core_virt_eth.c:330:24: error: use of undeclared identifier 'icve_del_mc_addr'
     330 |         __dev_mc_unsync(ndev, icve_del_mc_addr);
         |                               ^
   drivers/net/ethernet/ti/inter_core_virt_eth.c:331:26: error: no member named 'mc_list' in 'struct icve_common'
     331 |         __hw_addr_init(&common->mc_list);
         |                         ~~~~~~  ^
   drivers/net/ethernet/ti/inter_core_virt_eth.c:337:28: error: no member named 'rx_mode_work' in 'struct icve_common'
     337 |         cancel_work_sync(&common->rx_mode_work);
         |                           ~~~~~~  ^
   1 warning and 3 errors generated.


vim +76 drivers/net/ethernet/ti/inter_core_virt_eth.c

    59	
    60	static int icve_create_send_request(struct icve_common *common,
    61					    enum icve_rpmsg_type rpmsg_type,
    62					    bool wait)
    63	{
    64		unsigned long flags;
    65		int ret;
    66	
    67		if (wait)
    68			reinit_completion(&common->sync_msg);
    69	
    70		spin_lock_irqsave(&common->send_msg_lock, flags);
    71		create_request(common, rpmsg_type);
    72		rpmsg_send(common->rpdev->ept, (void *)(&common->send_msg),
    73			   sizeof(common->send_msg));
    74		spin_unlock_irqrestore(&common->send_msg_lock, flags);
    75	
  > 76		if (wait) {
    77			ret = wait_for_completion_timeout(&common->sync_msg,
    78							  ICVE_REQ_TIMEOUT);
    79	
    80			if (!ret) {
    81				dev_err(common->dev, "Failed to receive response within %ld jiffies\n",
    82					ICVE_REQ_TIMEOUT);
    83				ret = -ETIMEDOUT;
    84				return ret;
    85			}
    86		}
    87		return ret;
    88	}
    89	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Re: [PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
Posted by Yojana Mallik 1 year, 8 months ago

On 6/1/24 08:43, kernel test robot wrote:
> Hi Yojana,
> 
> kernel test robot noticed the following build warnings:
> 
> [auto build test WARNING on net-next/main]
> 
> url:    https://github.com/intel-lab-lkp/linux/commits/Yojana-Mallik/net-ethernet-ti-RPMsg-based-shared-memory-ethernet-driver/20240531-144258
> base:   net-next/main
> patch link:    https://lore.kernel.org/r/20240531064006.1223417-3-y-mallik%40ti.com
> patch subject: [PATCH net-next v2 2/3] net: ethernet: ti: Register the RPMsg driver as network device
> config: x86_64-allyesconfig (https://download.01.org/0day-ci/archive/20240601/202406011038.AwLZhQpy-lkp@intel.com/config)
> compiler: clang version 18.1.5 (https://github.com/llvm/llvm-project 617a15a9eac96088ae5e9134248d8236e34b91b1)
> reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240601/202406011038.AwLZhQpy-lkp@intel.com/reproduce)
> 
> If you fix the issue in a separate patch/commit (i.e. not just a new version of
> the same patch/commit), kindly add following tags
> | Reported-by: kernel test robot <lkp@intel.com>
> | Closes: https://lore.kernel.org/oe-kbuild-all/202406011038.AwLZhQpy-lkp@intel.com/
> 
> All warnings (new ones prefixed by >>):
> 
>>> drivers/net/ethernet/ti/inter_core_virt_eth.c:76:6: warning: variable 'ret' is used uninitialized whenever 'if' condition is false [-Wsometimes-uninitialized]
>       76 |         if (wait) {
>          |             ^~~~
>    drivers/net/ethernet/ti/inter_core_virt_eth.c:87:9: note: uninitialized use occurs here
>       87 |         return ret;
>          |                ^~~
>    drivers/net/ethernet/ti/inter_core_virt_eth.c:76:2: note: remove the 'if' if its condition is always true
>       76 |         if (wait) {
>          |         ^~~~~~~~~
>    drivers/net/ethernet/ti/inter_core_virt_eth.c:65:9: note: initialize the variable 'ret' to silence this warning
>       65 |         int ret;
>          |                ^
>          |                 = 0
>    drivers/net/ethernet/ti/inter_core_virt_eth.c:330:24: error: use of undeclared identifier 'icve_del_mc_addr'
>      330 |         __dev_mc_unsync(ndev, icve_del_mc_addr);
>          |                               ^
>    drivers/net/ethernet/ti/inter_core_virt_eth.c:331:26: error: no member named 'mc_list' in 'struct icve_common'
>      331 |         __hw_addr_init(&common->mc_list);
>          |                         ~~~~~~  ^
>    drivers/net/ethernet/ti/inter_core_virt_eth.c:337:28: error: no member named 'rx_mode_work' in 'struct icve_common'
>      337 |         cancel_work_sync(&common->rx_mode_work);
>          |                           ~~~~~~  ^
>    1 warning and 3 errors generated.
> 
> 
> vim +76 drivers/net/ethernet/ti/inter_core_virt_eth.c
> 
>     59	
>     60	static int icve_create_send_request(struct icve_common *common,
>     61					    enum icve_rpmsg_type rpmsg_type,
>     62					    bool wait)
>     63	{
>     64		unsigned long flags;
>     65		int ret;
>     66	
>     67		if (wait)
>     68			reinit_completion(&common->sync_msg);
>     69	
>     70		spin_lock_irqsave(&common->send_msg_lock, flags);
>     71		create_request(common, rpmsg_type);
>     72		rpmsg_send(common->rpdev->ept, (void *)(&common->send_msg),
>     73			   sizeof(common->send_msg));
>     74		spin_unlock_irqrestore(&common->send_msg_lock, flags);
>     75	
>   > 76		if (wait) {
>     77			ret = wait_for_completion_timeout(&common->sync_msg,
>     78							  ICVE_REQ_TIMEOUT);
>     79	
>     80			if (!ret) {
>     81				dev_err(common->dev, "Failed to receive response within %ld jiffies\n",
>     82					ICVE_REQ_TIMEOUT);
>     83				ret = -ETIMEDOUT;
>     84				return ret;
>     85			}
>     86		}
>     87		return ret;
>     88	}
>     89	
> 

I will fix all these issues in v3.

Regards,
Yojana Mallik