Wire the capture-side AF_PACKET socket into filter-redirector.
When the redirector owns an AF_PACKET capture socket, install a read
handler that drains PACKET_OUTGOING frames from the TAP device and forwards
them into the existing redirector chardev path. Reuse the normal chardev
packet framing so downstream code sees the same transport format as it
already handles today, and keep redirector statistics updated through the
same helper.
AF_PACKET delivers a raw Ethernet frame while the redirector chardev path
can carry packets with an empty vnet header wrapper. When the backend has a
vnet header length, prepend an empty legacy virtio-net header before the
frame so the captured packet can move through the same chardev transport as
regular redirector traffic.
Hook the fd handler up during setup, status changes and VM state changes so
capture is only active when the redirector is enabled and allowed to run in
the current VM state.
Signed-off-by: Cindy Lu <lulu@redhat.com>
---
net/filter-mirror.c | 121 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 121 insertions(+)
diff --git a/net/filter-mirror.c b/net/filter-mirror.c
index d9f6a11d6b..e114ddb7d1 100644
--- a/net/filter-mirror.c
+++ b/net/filter-mirror.c
@@ -43,6 +43,7 @@ DECLARE_INSTANCE_CHECKER(MirrorState, FILTER_REDIRECTOR,
TYPE_FILTER_REDIRECTOR)
#define REDIRECTOR_MAX_LEN NET_BUFSIZE
+#define REDIRECTOR_AF_PACKET_WRAP_LEN sizeof(struct virtio_net_hdr)
struct MirrorState {
NetFilterState parent_obj;
@@ -181,6 +182,17 @@ static int redirector_chr_can_read(void *opaque)
return REDIRECTOR_MAX_LEN;
}
+static bool filter_redirector_input_active(NetFilterState *nf, bool enable)
+{
+ MirrorState *s = FILTER_REDIRECTOR(nf);
+
+ if (!enable) {
+ return false;
+ }
+
+ return runstate_is_running() || s->enable_when_stopped;
+}
+
static void redirector_chr_read(void *opaque, const uint8_t *buf, int size)
{
NetFilterState *nf = opaque;
@@ -217,6 +229,91 @@ static void redirector_chr_event(void *opaque, QEMUChrEvent event)
}
}
+static int filter_redirector_send_chardev_iov(MirrorState *s,
+ const struct iovec *iov,
+ int iovcnt)
+{
+ int ret;
+
+ if (!qemu_chr_fe_backend_connected(&s->chr_out)) {
+ return 0;
+ }
+
+ ret = filter_send(s, iov, iovcnt);
+ if (ret > 0) {
+ s->outdev_packets++;
+ s->outdev_bytes += ret;
+ }
+
+ return ret;
+}
+
+static void filter_redirector_capture_netdev_read(void *opaque)
+{
+ NetFilterState *nf = opaque;
+ MirrorState *s = FILTER_REDIRECTOR(nf);
+ char vnet_hdr[REDIRECTOR_AF_PACKET_WRAP_LEN] = { 0 };
+ struct iovec iov[2];
+ struct sockaddr_ll sll;
+ socklen_t sll_len;
+ ssize_t len;
+ size_t wrap_vnet_hdr_len;
+ int iovcnt;
+ int ret;
+
+ if (!s->in_netbuf || s->in_netfd < 0) {
+ return;
+ }
+
+ for (;;) {
+ sll_len = sizeof(sll);
+ len = recvfrom(s->in_netfd, s->in_netbuf, REDIRECTOR_MAX_LEN, 0,
+ (struct sockaddr *)&sll, &sll_len);
+ if (len <= 0) {
+ break;
+ }
+
+ if (sll.sll_pkttype != PACKET_OUTGOING) {
+ continue;
+ }
+
+ /*
+ * AF_PACKET gives us a raw Ethernet frame. Wrap it as a regular
+ * redirector payload by prepending an empty legacy virtio-net header,
+ * so the downstream chardev path can treat it like a normal packet.
+ */
+ wrap_vnet_hdr_len = qemu_get_vnet_hdr_len(nf->netdev) ?
+ REDIRECTOR_AF_PACKET_WRAP_LEN : 0;
+ if (len + wrap_vnet_hdr_len > REDIRECTOR_MAX_LEN) {
+ error_report("filter redirector packet too large after wrap(%zd)",
+ len);
+ continue;
+ }
+
+ iov[0].iov_base = s->in_netbuf;
+ iov[0].iov_len = len;
+ iovcnt = 1;
+ if (wrap_vnet_hdr_len) {
+ iov[0].iov_base = vnet_hdr;
+ iov[0].iov_len = wrap_vnet_hdr_len;
+ iov[1].iov_base = s->in_netbuf;
+ iov[1].iov_len = len;
+ iovcnt = 2;
+ }
+
+ ret = filter_redirector_send_chardev_iov(s, iov, iovcnt);
+ if (ret < 0) {
+ error_report("filter redirector send failed(%s)", strerror(-ret));
+ }
+ }
+
+ if (len < 0 && errno != EAGAIN && errno != EWOULDBLOCK &&
+ errno != EINTR) {
+ error_report("filter redirector read netdev failed(%s)",
+ strerror(errno));
+ }
+}
+
static ssize_t filter_mirror_receive_iov(NetFilterState *nf,
NetClientState *sender,
unsigned flags,
@@ -353,6 +450,14 @@ static void filter_redirector_vm_state_change(void *opaque, bool running,
NetFilterState *nf = opaque;
MirrorState *s = FILTER_REDIRECTOR(nf);
NetClientState *nc = nf->netdev;
+ bool active = filter_redirector_input_active(nf, nf->on);
+
+ if (s->in_netfd >= 0) {
+ qemu_set_fd_handler(s->in_netfd,
+ active ?
+ filter_redirector_capture_netdev_read : NULL,
+ NULL, active ? nf : NULL);
+ }
if (!running && s->enable_when_stopped && nc->info->read_poll) {
nc->info->read_poll(nc, true);
@@ -448,6 +553,7 @@ static void filter_redirector_setup(NetFilterState *nf, Error **errp)
{
MirrorState *s = FILTER_REDIRECTOR(nf);
Chardev *chr;
+ bool active = filter_redirector_input_active(nf, nf->on);
if (!s->indev && !s->outdev) {
error_setg(errp, "filter redirector needs 'indev' or "
@@ -501,6 +607,13 @@ static void filter_redirector_setup(NetFilterState *nf, Error **errp)
s->vmsentry = qemu_add_vm_change_state_handler(
filter_redirector_vm_state_change, nf);
+ if (s->in_netfd >= 0) {
+ qemu_set_fd_handler(s->in_netfd,
+ active ?
+ filter_redirector_capture_netdev_read : NULL,
+ NULL, active ? nf : NULL);
+ }
+
filter_redirector_maybe_enable_read_poll(nf);
filter_redirector_refresh_allow_send_when_stopped(nf);
@@ -509,6 +622,7 @@ static void filter_redirector_setup(NetFilterState *nf, Error **errp)
static void filter_redirector_status_changed(NetFilterState *nf, Error **errp)
{
MirrorState *s = FILTER_REDIRECTOR(nf);
+ bool active = filter_redirector_input_active(nf, nf->on);
if (s->indev) {
if (nf->on) {
@@ -521,6 +635,13 @@ static void filter_redirector_status_changed(NetFilterState *nf, Error **errp)
}
}
+ if (s->in_netfd >= 0) {
+ qemu_set_fd_handler(s->in_netfd,
+ active ?
+ filter_redirector_capture_netdev_read : NULL,
+ NULL, active ? nf : NULL);
+ }
+
if (nf->on) {
filter_redirector_maybe_enable_read_poll(nf);
}
--
2.52.0
© 2016 - 2026 Red Hat, Inc.