1
The following changes since commit 5dae13cd71f0755a1395b5a4cde635b8a6ee3f58:
1
The following changes since commit d9a4282c4b690e45d25c2b933f318bb41eeb271d:
2
2
3
Merge remote-tracking branch 'remotes/rth/tags/pull-or-20170214' into staging (2017-02-14 09:55:48 +0000)
3
Merge tag 'pull-tcg-20250308' of https://gitlab.com/rth7680/qemu into staging (2025-03-09 11:45:00 +0800)
4
4
5
are available in the git repository at:
5
are available in the Git repository at:
6
6
7
https://github.com/jasowang/qemu.git tags/net-pull-request
7
https://github.com/jasowang/qemu.git tags/net-pull-request
8
8
9
for you to fetch changes up to 4154c7e03fa55b4cf52509a83d50d6c09d743b77:
9
for you to fetch changes up to ac2ff9b840ce82cc7d5fd9ce4fd3019a434d7dc9:
10
10
11
net: e1000e: fix an infinite loop issue (2017-02-15 11:18:57 +0800)
11
tap-linux: Open ipvtap and macvtap (2025-03-10 17:07:16 +0800)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
-----BEGIN PGP SIGNATURE-----
15
16
iQEzBAABCAAdFiEEIV1G9IJGaJ7HfzVi7wSWWzmNYhEFAmfO1zkACgkQ7wSWWzmN
17
YhET+wf+PkaGeFTNUrOtWpl35fSMKlmOVbb1fkPfuhVBmeY2Vh1EIN3OjqnzdV0F
18
wxpuk+wwmFiuV1n6RNuMHQ0nz1mhgsSlZh93N5rArC/PUr3iViaT0cb82RjwxhaI
19
RODBhhy7V9WxEhT9hR8sCP2ky2mrKgcYbjiIEw+IvFZOVQa58rMr2h/cbAb/iH4l
20
7T9Wba03JBqOS6qgzSFZOMxvqnYdVjhqXN8M6W9ngRJOjPEAkTB6Evwep6anRjcM
21
mCUOgkf2sgQwKve8pYAeTMkzXFctvTc/qCU4ZbN8XcoKVVxe2jllGQqdOpMskPEf
22
slOuINeW5M0K5gyjsb/huqcOTfDI2A==
23
=/Y0+
24
-----END PGP SIGNATURE-----
14
25
15
----------------------------------------------------------------
26
----------------------------------------------------------------
16
Li Qiang (1):
27
Akihiko Odaki (3):
17
net: e1000e: fix an infinite loop issue
28
util/iov: Do not assert offset is in iov
29
Revert "hw/net/net_tx_pkt: Fix overrun in update_sctp_checksum()"
30
tap-linux: Open ipvtap and macvtap
18
31
19
Paolo Bonzini (1):
32
Eugenio Pérez (2):
20
net: e1000e: fix dead code in e1000e_write_packet_to_guest
33
net: parameterize the removing client from nc list
34
net: move backend cleanup to NIC cleanup
21
35
22
Prasad J Pandit (1):
36
hw/net/net_tx_pkt.c | 4 ----
23
net: imx: limit buffer descriptor count
37
include/qemu/iov.h | 5 +++--
24
38
net/net.c | 44 ++++++++++++++++++++++++++++++++++----------
25
Thomas Huth (1):
39
net/tap-linux.c | 17 ++++++++++++++---
26
net: Mark 'vlan' parameter as deprecated
40
net/vhost-vdpa.c | 8 --------
27
41
util/iov.c | 5 -----
28
Zhang Chen (1):
42
6 files changed, 51 insertions(+), 32 deletions(-)
29
colo-compare: sort TCP packet queue by sequence number
30
31
hw/net/e1000e_core.c | 9 +++++++--
32
hw/net/imx_fec.c | 10 ++++++----
33
net/colo-compare.c | 19 +++++++++++++++++++
34
net/net.c | 6 ++++++
35
4 files changed, 38 insertions(+), 6 deletions(-)
36
43
37
44
45
diff view generated by jsdifflib
1
From: Prasad J Pandit <pjp@fedoraproject.org>
1
From: Eugenio Pérez <eperezma@redhat.com>
2
2
3
i.MX Fast Ethernet Controller uses buffer descriptors to manage
3
This change is used in later commits so we can avoid the removal of the
4
data flow to/fro receive & transmit queues. While transmitting
4
netclient if it is delayed.
5
packets, it could continue to read buffer descriptors if a buffer
6
descriptor has length of zero and has crafted values in bd.flags.
7
Set an upper limit to number of buffer descriptors.
8
5
9
Reported-by: Li Qiang <liqiang6-s@360.cn>
6
No functional change intended.
10
Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org>
7
8
Reviewed-by: Si-Wei Liu <si-wei.liu@oracle.com>
9
Acked-by: Jason Wang <jasowang@redhat.com>
10
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
11
Signed-off-by: Jason Wang <jasowang@redhat.com>
11
Signed-off-by: Jason Wang <jasowang@redhat.com>
12
---
12
---
13
hw/net/imx_fec.c | 10 ++++++----
13
net/net.c | 13 ++++++++-----
14
1 file changed, 6 insertions(+), 4 deletions(-)
14
1 file changed, 8 insertions(+), 5 deletions(-)
15
15
16
diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c
16
diff --git a/net/net.c b/net/net.c
17
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/net/imx_fec.c
18
--- a/net/net.c
19
+++ b/hw/net/imx_fec.c
19
+++ b/net/net.c
20
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ NetClientState *qemu_get_peer(NetClientState *nc, int queue_index)
21
} \
21
return ncs->peer;
22
} while (0)
22
}
23
23
24
+#define IMX_MAX_DESC 1024
24
-static void qemu_cleanup_net_client(NetClientState *nc)
25
+
25
+static void qemu_cleanup_net_client(NetClientState *nc,
26
static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
26
+ bool remove_from_net_clients)
27
{
27
{
28
static char tmp[20];
28
- QTAILQ_REMOVE(&net_clients, nc, next);
29
@@ -XXX,XX +XXX,XX @@ static void imx_eth_update(IMXFECState *s)
29
+ if (remove_from_net_clients) {
30
30
+ QTAILQ_REMOVE(&net_clients, nc, next);
31
static void imx_fec_do_tx(IMXFECState *s)
31
+ }
32
{
32
33
- int frame_size = 0;
33
if (nc->info->cleanup) {
34
+ int frame_size = 0, descnt = 0;
34
nc->info->cleanup(nc);
35
uint8_t frame[ENET_MAX_FRAME_SIZE];
35
@@ -XXX,XX +XXX,XX @@ void qemu_del_net_client(NetClientState *nc)
36
uint8_t *ptr = frame;
36
}
37
uint32_t addr = s->tx_descriptor;
37
38
38
for (i = 0; i < queues; i++) {
39
- while (1) {
39
- qemu_cleanup_net_client(ncs[i]);
40
+ while (descnt++ < IMX_MAX_DESC) {
40
+ qemu_cleanup_net_client(ncs[i], true);
41
IMXFECBufDesc bd;
41
}
42
int len;
42
43
43
return;
44
@@ -XXX,XX +XXX,XX @@ static void imx_fec_do_tx(IMXFECState *s)
44
}
45
45
46
static void imx_enet_do_tx(IMXFECState *s)
46
for (i = 0; i < queues; i++) {
47
{
47
- qemu_cleanup_net_client(ncs[i]);
48
- int frame_size = 0;
48
+ qemu_cleanup_net_client(ncs[i], true);
49
+ int frame_size = 0, descnt = 0;
49
qemu_free_net_client(ncs[i]);
50
uint8_t frame[ENET_MAX_FRAME_SIZE];
50
}
51
uint8_t *ptr = frame;
51
}
52
uint32_t addr = s->tx_descriptor;
52
@@ -XXX,XX +XXX,XX @@ void qemu_del_nic(NICState *nic)
53
53
for (i = queues - 1; i >= 0; i--) {
54
- while (1) {
54
NetClientState *nc = qemu_get_subqueue(nic, i);
55
+ while (descnt++ < IMX_MAX_DESC) {
55
56
IMXENETBufDesc bd;
56
- qemu_cleanup_net_client(nc);
57
int len;
57
+ qemu_cleanup_net_client(nc, true);
58
qemu_free_net_client(nc);
59
}
58
60
59
--
61
--
60
2.7.4
62
2.42.0
61
63
62
64
diff view generated by jsdifflib
1
From: Thomas Huth <thuth@redhat.com>
1
From: Eugenio Pérez <eperezma@redhat.com>
2
2
3
The 'vlan' parameter is a continuous source of confusion for the users,
3
Commit a0d7215e33 ("vhost-vdpa: do not cleanup the vdpa/vhost-net
4
many people mix it up with the more common term VLAN (the link layer
4
structures if peer nic is present") effectively delayed the backend
5
packet encapsulation), and even if they realize that the QEMU 'vlan' is
5
cleanup, allowing the frontend or the guest to access it resources as
6
rather some kind of network hub emulation, there is still a high risk
6
long as the frontend is still visible to the guest.
7
that they configure their QEMU networking in a wrong way with this
8
parameter (e.g. by hooking NICs together, so they get a 'loopback'
9
between one and the other NIC).
10
Thus at one point in time, we should finally get rid of the 'vlan'
11
feature in QEMU. Let's do a first step in this direction by declaring
12
the 'vlan' parameter as deprecated and informing the users to use the
13
'netdev' parameter instead.
14
7
15
Signed-off-by: Thomas Huth <thuth@redhat.com>
8
However it does not clean up the resources until the qemu process is
9
over. This causes an effective leak if the device is deleted with
10
device_del, as there is no way to close the vdpa device. This makes
11
impossible to re-add that device to this or other QEMU instances until
12
the first instance of QEMU is finished.
13
14
Move the cleanup from qemu_cleanup to the NIC deletion and to
15
net_cleanup.
16
17
Fixes: a0d7215e33 ("vhost-vdpa: do not cleanup the vdpa/vhost-net structures if peer nic is present")
18
Reported-by: Lei Yang <leiyang@redhat.com>
19
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
20
Signed-off-by: Jonah Palmer <jonah.palmer@oracle.com>
16
Signed-off-by: Jason Wang <jasowang@redhat.com>
21
Signed-off-by: Jason Wang <jasowang@redhat.com>
17
---
22
---
18
net/net.c | 6 ++++++
23
net/net.c | 33 +++++++++++++++++++++++++++------
19
1 file changed, 6 insertions(+)
24
net/vhost-vdpa.c | 8 --------
25
2 files changed, 27 insertions(+), 14 deletions(-)
20
26
21
diff --git a/net/net.c b/net/net.c
27
diff --git a/net/net.c b/net/net.c
22
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
23
--- a/net/net.c
29
--- a/net/net.c
24
+++ b/net/net.c
30
+++ b/net/net.c
25
@@ -XXX,XX +XXX,XX @@ static int net_client_init1(const void *object, bool is_netdev, Error **errp)
31
@@ -XXX,XX +XXX,XX @@ void qemu_del_net_client(NetClientState *nc)
26
const Netdev *netdev;
32
object_unparent(OBJECT(nf));
27
const char *name;
33
}
28
NetClientState *peer = NULL;
34
29
+ static bool vlan_warned;
35
- /* If there is a peer NIC, delete and cleanup client, but do not free. */
30
36
+ /*
31
if (is_netdev) {
37
+ * If there is a peer NIC, transfer ownership to it. Delete the client
32
netdev = object;
38
+ * from net_client list but do not cleanup nor free. This way NIC can
33
@@ -XXX,XX +XXX,XX @@ static int net_client_init1(const void *object, bool is_netdev, Error **errp)
39
+ * still access to members of the backend.
34
!opts->u.nic.data->has_netdev) {
40
+ *
35
peer = net_hub_add_port(net->has_vlan ? net->vlan : 0, NULL);
41
+ * The cleanup and free will be done when the NIC is free.
42
+ */
43
if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
44
NICState *nic = qemu_get_nic(nc->peer);
45
if (nic->peer_deleted) {
46
@@ -XXX,XX +XXX,XX @@ void qemu_del_net_client(NetClientState *nc)
47
48
for (i = 0; i < queues; i++) {
49
ncs[i]->peer->link_down = true;
50
+ QTAILQ_REMOVE(&net_clients, ncs[i], next);
36
}
51
}
52
53
if (nc->peer->info->link_status_changed) {
54
nc->peer->info->link_status_changed(nc->peer);
55
}
56
57
- for (i = 0; i < queues; i++) {
58
- qemu_cleanup_net_client(ncs[i], true);
59
- }
60
-
61
return;
62
}
63
64
@@ -XXX,XX +XXX,XX @@ void qemu_del_nic(NICState *nic)
65
66
for (i = 0; i < queues; i++) {
67
NetClientState *nc = qemu_get_subqueue(nic, i);
68
- /* If this is a peer NIC and peer has already been deleted, free it now. */
69
+ /*
70
+ * If this is a peer NIC and peer has already been deleted, clean it up
71
+ * and free it now.
72
+ */
73
if (nic->peer_deleted) {
74
+ qemu_cleanup_net_client(nc->peer, false);
75
qemu_free_net_client(nc->peer);
76
} else if (nc->peer) {
77
/* if there are RX packets pending, complete them */
78
@@ -XXX,XX +XXX,XX @@ void net_cleanup(void)
79
* of the latest NET_CLIENT_DRIVER_NIC, and operate on *p as we walk
80
* the list.
81
*
82
+ * However, the NIC may have peers that trust to be clean beyond this
83
+ * point. For example, if they have been removed with device_del.
84
+ *
85
* The 'nc' variable isn't part of the list traversal; it's purely
86
* for convenience as too much '(*p)->' has a tendency to make the
87
* readers' eyes bleed.
88
@@ -XXX,XX +XXX,XX @@ void net_cleanup(void)
89
while (*p) {
90
nc = *p;
91
if (nc->info->type == NET_CLIENT_DRIVER_NIC) {
92
+ NICState *nic = qemu_get_nic(nc);
37
+
93
+
38
+ if (net->has_vlan && !vlan_warned) {
94
+ if (nic->peer_deleted) {
39
+ error_report("'vlan' is deprecated. Please use 'netdev' instead.");
95
+ int queues = MAX(nic->conf->peers.queues, 1);
40
+ vlan_warned = true;
96
+
41
+ }
97
+ for (int i = 0; i < queues; i++) {
42
}
98
+ nc = qemu_get_subqueue(nic, i);
43
99
+ qemu_cleanup_net_client(nc->peer, false);
44
if (net_client_init_fun[netdev->type](netdev, name, peer, errp) < 0) {
100
+ }
101
+ }
102
+
103
/* Skip NET_CLIENT_DRIVER_NIC entries */
104
p = &QTAILQ_NEXT(nc, next);
105
} else {
106
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
107
index XXXXXXX..XXXXXXX 100644
108
--- a/net/vhost-vdpa.c
109
+++ b/net/vhost-vdpa.c
110
@@ -XXX,XX +XXX,XX @@ static void vhost_vdpa_cleanup(NetClientState *nc)
111
{
112
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
113
114
- /*
115
- * If a peer NIC is attached, do not cleanup anything.
116
- * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
117
- * when the guest is shutting down.
118
- */
119
- if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
120
- return;
121
- }
122
munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
123
munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
124
if (s->vhost_net) {
45
--
125
--
46
2.7.4
126
2.42.0
47
127
48
128
diff view generated by jsdifflib
1
From: Li Qiang <liq3ea@gmail.com>
1
From: Akihiko Odaki <akihiko.odaki@daynix.com>
2
2
3
This issue is like the issue in e1000 network card addressed in
3
iov_from_buf(), iov_to_buf(), iov_memset(), and iov_copy() asserts
4
this commit:
4
that the given offset fits in the iov while tolerating the specified
5
e1000: eliminate infinite loops on out-of-bounds transfer start.
5
number of bytes to operate with to be greater than the size of iov.
6
This is inconsistent so remove the assertions.
6
7
7
Signed-off-by: Li Qiang <liqiang6-s@360.cn>
8
Asserting the offset fits in the iov makes sense if it is expected that
8
Reviewed-by: Dmitry Fleytman <dmitry@daynix.com>
9
there are other operations that process the content before the offset
10
and the content is processed in order. Under this expectation, the
11
offset should point to the end of bytes that are previously processed
12
and fit in the iov. However, this expectation depends on the details of
13
the caller, and did not hold true at least one case and required code to
14
check iov_size(), which is added with commit 83ddb3dbba2e
15
("hw/net/net_tx_pkt: Fix overrun in update_sctp_checksum()").
16
17
Adding such a check is inefficient and error-prone. These functions
18
already tolerate the specified number of bytes to operate with to be
19
greater than the size of iov to avoid such checks so remove the
20
assertions to tolerate invalid offset as well. They return the number of
21
bytes they operated with so their callers can still check the returned
22
value to ensure there are sufficient space at the given offset.
23
24
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
9
Signed-off-by: Jason Wang <jasowang@redhat.com>
25
Signed-off-by: Jason Wang <jasowang@redhat.com>
10
---
26
---
11
hw/net/e1000e_core.c | 7 ++++++-
27
include/qemu/iov.h | 5 +++--
12
1 file changed, 6 insertions(+), 1 deletion(-)
28
util/iov.c | 5 -----
29
2 files changed, 3 insertions(+), 7 deletions(-)
13
30
14
diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c
31
diff --git a/include/qemu/iov.h b/include/qemu/iov.h
15
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/net/e1000e_core.c
33
--- a/include/qemu/iov.h
17
+++ b/hw/net/e1000e_core.c
34
+++ b/include/qemu/iov.h
18
@@ -XXX,XX +XXX,XX @@ typedef struct E1000E_RingInfo_st {
35
@@ -XXX,XX +XXX,XX @@ size_t iov_size(const struct iovec *iov, const unsigned int iov_cnt);
19
static inline bool
36
* only part of data will be copied, up to the end of the iovec.
20
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
37
* Number of bytes actually copied will be returned, which is
21
{
38
* min(bytes, iov_size(iov)-offset)
22
- return core->mac[r->dh] == core->mac[r->dt];
39
- * `Offset' must point to the inside of iovec.
23
+ return core->mac[r->dh] == core->mac[r->dt] ||
40
+ * Returns 0 when `offset' points to the outside of iovec.
24
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
41
*/
42
size_t iov_from_buf_full(const struct iovec *iov, unsigned int iov_cnt,
43
size_t offset, const void *buf, size_t bytes);
44
@@ -XXX,XX +XXX,XX @@ iov_to_buf(const struct iovec *iov, const unsigned int iov_cnt,
45
/**
46
* Set data bytes pointed out by iovec `iov' of size `iov_cnt' elements,
47
* starting at byte offset `start', to value `fillc', repeating it
48
- * `bytes' number of times. `Offset' must point to the inside of iovec.
49
+ * `bytes' number of times.
50
* If `bytes' is large enough, only last bytes portion of iovec,
51
* up to the end of it, will be filled with the specified value.
52
* Function return actual number of bytes processed, which is
53
* min(size, iov_size(iov) - offset).
54
+ * Returns 0 when `offset' points to the outside of iovec.
55
*/
56
size_t iov_memset(const struct iovec *iov, const unsigned int iov_cnt,
57
size_t offset, int fillc, size_t bytes);
58
diff --git a/util/iov.c b/util/iov.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/util/iov.c
61
+++ b/util/iov.c
62
@@ -XXX,XX +XXX,XX @@ size_t iov_from_buf_full(const struct iovec *iov, unsigned int iov_cnt,
63
offset -= iov[i].iov_len;
64
}
65
}
66
- assert(offset == 0);
67
return done;
25
}
68
}
26
69
27
static inline uint64_t
70
@@ -XXX,XX +XXX,XX @@ size_t iov_to_buf_full(const struct iovec *iov, const unsigned int iov_cnt,
28
@@ -XXX,XX +XXX,XX @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
71
offset -= iov[i].iov_len;
29
desc_size = core->rx_desc_buf_size;
30
}
72
}
31
73
}
32
+ if (e1000e_ring_empty(core, rxi)) {
74
- assert(offset == 0);
33
+ return;
75
return done;
34
+ }
76
}
35
+
77
36
base = e1000e_ring_head_descr(core, rxi);
78
@@ -XXX,XX +XXX,XX @@ size_t iov_memset(const struct iovec *iov, const unsigned int iov_cnt,
37
79
offset -= iov[i].iov_len;
38
pci_dma_read(d, base, &desc, core->rx_desc_len);
80
}
81
}
82
- assert(offset == 0);
83
return done;
84
}
85
86
@@ -XXX,XX +XXX,XX @@ unsigned iov_copy(struct iovec *dst_iov, unsigned int dst_iov_cnt,
87
bytes -= len;
88
offset = 0;
89
}
90
- assert(offset == 0);
91
return j;
92
}
93
94
@@ -XXX,XX +XXX,XX @@ size_t qemu_iovec_concat_iov(QEMUIOVector *dst,
95
soffset -= src_iov[i].iov_len;
96
}
97
}
98
- assert(soffset == 0); /* offset beyond end of src */
99
100
return done;
101
}
39
--
102
--
40
2.7.4
103
2.42.0
41
42
diff view generated by jsdifflib
1
From: Zhang Chen <zhangchen.fnst@cn.fujitsu.com>
1
From: Akihiko Odaki <akihiko.odaki@daynix.com>
2
2
3
Improve efficiency of TCP packet comparison.
3
This reverts commit 83ddb3dbba2ee0f1767442ae6ee665058aeb1093.
4
4
5
Signed-off-by: Zhang Chen <zhangchen.fnst@cn.fujitsu.com>
5
The added check is no longer necessary due to a change of
6
Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com>
6
iov_from_buf().
7
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
7
Signed-off-by: Jason Wang <jasowang@redhat.com>
10
Signed-off-by: Jason Wang <jasowang@redhat.com>
8
---
11
---
9
net/colo-compare.c | 19 +++++++++++++++++++
12
hw/net/net_tx_pkt.c | 4 ----
10
1 file changed, 19 insertions(+)
13
1 file changed, 4 deletions(-)
11
14
12
diff --git a/net/colo-compare.c b/net/colo-compare.c
15
diff --git a/hw/net/net_tx_pkt.c b/hw/net/net_tx_pkt.c
13
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
14
--- a/net/colo-compare.c
17
--- a/hw/net/net_tx_pkt.c
15
+++ b/net/colo-compare.c
18
+++ b/hw/net/net_tx_pkt.c
16
@@ -XXX,XX +XXX,XX @@ static int compare_chr_send(CharBackend *out,
19
@@ -XXX,XX +XXX,XX @@ bool net_tx_pkt_update_sctp_checksum(struct NetTxPkt *pkt)
17
const uint8_t *buf,
20
uint32_t csum = 0;
18
uint32_t size);
21
struct iovec *pl_start_frag = pkt->vec + NET_TX_PKT_PL_START_FRAG;
19
22
20
+static gint seq_sorter(Packet *a, Packet *b, gpointer data)
23
- if (iov_size(pl_start_frag, pkt->payload_frags) < 8 + sizeof(csum)) {
21
+{
24
- return false;
22
+ struct tcphdr *atcp, *btcp;
25
- }
23
+
26
-
24
+ atcp = (struct tcphdr *)(a->transport_header);
27
if (iov_from_buf(pl_start_frag, pkt->payload_frags, 8, &csum, sizeof(csum)) < sizeof(csum)) {
25
+ btcp = (struct tcphdr *)(b->transport_header);
28
return false;
26
+ return ntohl(atcp->th_seq) - ntohl(btcp->th_seq);
29
}
27
+}
28
+
29
/*
30
* Return 0 on success, if return -1 means the pkt
31
* is unsupported(arp and ipv6) and will be sent later
32
@@ -XXX,XX +XXX,XX @@ static int packet_enqueue(CompareState *s, int mode)
33
if (g_queue_get_length(&conn->primary_list) <=
34
MAX_QUEUE_SIZE) {
35
g_queue_push_tail(&conn->primary_list, pkt);
36
+ if (conn->ip_proto == IPPROTO_TCP) {
37
+ g_queue_sort(&conn->primary_list,
38
+ (GCompareDataFunc)seq_sorter,
39
+ NULL);
40
+ }
41
} else {
42
error_report("colo compare primary queue size too big,"
43
"drop packet");
44
@@ -XXX,XX +XXX,XX @@ static int packet_enqueue(CompareState *s, int mode)
45
if (g_queue_get_length(&conn->secondary_list) <=
46
MAX_QUEUE_SIZE) {
47
g_queue_push_tail(&conn->secondary_list, pkt);
48
+ if (conn->ip_proto == IPPROTO_TCP) {
49
+ g_queue_sort(&conn->secondary_list,
50
+ (GCompareDataFunc)seq_sorter,
51
+ NULL);
52
+ }
53
} else {
54
error_report("colo compare secondary queue size too big,"
55
"drop packet");
56
--
30
--
57
2.7.4
31
2.42.0
58
32
59
33
diff view generated by jsdifflib
1
From: Paolo Bonzini <pbonzini@redhat.com>
1
From: Akihiko Odaki <akihiko.odaki@daynix.com>
2
2
3
Because is_first is declared inside a loop, it is always true. The store
3
ipvtap and macvtap create a file for each interface unlike tuntap, which
4
is dead, and so is the "else" branch of "if (is_first)". is_last is
4
creates one file shared by all interfaces. Try to open a file dedicated
5
okay though.
5
to the interface first for ipvtap and macvtap.
6
6
7
Reported by Coverity.
7
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
8
9
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
10
Reviewed-by: Dmitry Fleytman <dmitry@daynix.com>
11
Signed-off-by: Jason Wang <jasowang@redhat.com>
8
Signed-off-by: Jason Wang <jasowang@redhat.com>
12
---
9
---
13
hw/net/e1000e_core.c | 2 +-
10
net/tap-linux.c | 17 ++++++++++++++---
14
1 file changed, 1 insertion(+), 1 deletion(-)
11
1 file changed, 14 insertions(+), 3 deletions(-)
15
12
16
diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c
13
diff --git a/net/tap-linux.c b/net/tap-linux.c
17
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/net/e1000e_core.c
15
--- a/net/tap-linux.c
19
+++ b/hw/net/e1000e_core.c
16
+++ b/net/tap-linux.c
20
@@ -XXX,XX +XXX,XX @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
17
@@ -XXX,XX +XXX,XX @@ int tap_open(char *ifname, int ifname_size, int *vnet_hdr,
21
const E1000E_RingInfo *rxi;
18
int len = sizeof(struct virtio_net_hdr);
22
size_t ps_hdr_len = 0;
19
unsigned int features;
23
bool do_ps = e1000e_do_ps(core, pkt, &ps_hdr_len);
20
24
+ bool is_first = true;
21
- fd = RETRY_ON_EINTR(open(PATH_NET_TUN, O_RDWR));
25
22
+
26
rxi = rxr->i;
23
+ ret = if_nametoindex(ifname);
27
24
+ if (ret) {
28
@@ -XXX,XX +XXX,XX @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
25
+ g_autofree char *file = g_strdup_printf("/dev/tap%d", ret);
29
hwaddr ba[MAX_PS_BUFFERS];
26
+ fd = open(file, O_RDWR);
30
e1000e_ba_state bastate = { { 0 } };
27
+ } else {
31
bool is_last = false;
28
+ fd = -1;
32
- bool is_first = true;
29
+ }
33
30
+
34
desc_size = total_size - desc_offset;
31
if (fd < 0) {
35
32
- error_setg_errno(errp, errno, "could not open %s", PATH_NET_TUN);
33
- return -1;
34
+ fd = RETRY_ON_EINTR(open(PATH_NET_TUN, O_RDWR));
35
+ if (fd < 0) {
36
+ error_setg_errno(errp, errno, "could not open %s", PATH_NET_TUN);
37
+ return -1;
38
+ }
39
}
40
memset(&ifr, 0, sizeof(ifr));
41
ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
36
--
42
--
37
2.7.4
43
2.42.0
38
39
diff view generated by jsdifflib