src/network/bridge_driver_linux.c | 64 +++------- .../nat-default-linux.args | 116 ++++++++++++++++++ 2 files changed, 131 insertions(+), 49 deletions(-)
Automated performance tests found that network-centric workloads suffered
a 20 percent decrease when the host libvirt was updated from 5.0.0 to
5.1.0. On the test hosts libvirtd is enabled to start at boot and the
"default" network is defined, but it is not set to autostart.
libvirt 5.1.0 introduced private firewall chains with commit 5f1e6a7d.
The chains are created at libvirtd start, which triggers loading the
conntrack kernel module. Subsequent tracking and processing of
connections resulted in the performance hit. Prior to commit 5f1e6a7d
the conntrack module would not be loaded until starting a network,
when libvirt added rules to the builtin chains.
Restore the behavior of previous libvirt versions by delaying
the creation of private chains until the first network is started.
Signed-off-by: Jim Fehlig <jfehlig@suse.com>
---
I briefly discussed this issue with Daniel on IRC and just now finding
time to bring it to the list for broader discussion. The adjustment to
the test file feels hacky. The whole approach might by hacky, hence the
RFC.
src/network/bridge_driver_linux.c | 64 +++-------
.../nat-default-linux.args | 116 ++++++++++++++++++
2 files changed, 131 insertions(+), 49 deletions(-)
diff --git a/src/network/bridge_driver_linux.c b/src/network/bridge_driver_linux.c
index f2827543ca..a3a09caeba 100644
--- a/src/network/bridge_driver_linux.c
+++ b/src/network/bridge_driver_linux.c
@@ -35,44 +35,10 @@ VIR_LOG_INIT("network.bridge_driver_linux");
#define PROC_NET_ROUTE "/proc/net/route"
-static virErrorPtr errInitV4;
-static virErrorPtr errInitV6;
+static bool pvtChainsCreated;
void networkPreReloadFirewallRules(bool startup)
{
- bool created = false;
- int rc;
-
- /* We create global rules upfront as we don't want
- * the perf hit of conditionally figuring out whether
- * to create them each time a network is started.
- *
- * Any errors here are saved to be reported at time
- * of starting the network though as that makes them
- * more likely to be seen by a human
- */
- rc = iptablesSetupPrivateChains(VIR_FIREWALL_LAYER_IPV4);
- if (rc < 0) {
- errInitV4 = virSaveLastError();
- virResetLastError();
- } else {
- virFreeError(errInitV4);
- errInitV4 = NULL;
- }
- if (rc)
- created = true;
-
- rc = iptablesSetupPrivateChains(VIR_FIREWALL_LAYER_IPV6);
- if (rc < 0) {
- errInitV6 = virSaveLastError();
- virResetLastError();
- } else {
- virFreeError(errInitV6);
- errInitV6 = NULL;
- }
- if (rc)
- created = true;
-
/*
* If this is initial startup, and we just created the
* top level private chains we either
@@ -86,8 +52,8 @@ void networkPreReloadFirewallRules(bool startup)
* rules will be present. Thus we can safely just tell it
* to always delete from the builin chain
*/
- if (startup && created)
- iptablesSetDeletePrivate(false);
+ if (startup)
+ iptablesSetDeletePrivate(true);
}
@@ -701,19 +667,19 @@ int networkAddFirewallRules(virNetworkDefPtr def)
virFirewallPtr fw = NULL;
int ret = -1;
- if (errInitV4 &&
- (virNetworkDefGetIPByIndex(def, AF_INET, 0) ||
- virNetworkDefGetRouteByIndex(def, AF_INET, 0))) {
- virSetError(errInitV4);
- return -1;
- }
+ if (!pvtChainsCreated) {
+ if (iptablesSetupPrivateChains(VIR_FIREWALL_LAYER_IPV4) < 0 &&
+ (virNetworkDefGetIPByIndex(def, AF_INET, 0) ||
+ virNetworkDefGetRouteByIndex(def, AF_INET, 0)))
+ return -1;
- if (errInitV6 &&
- (virNetworkDefGetIPByIndex(def, AF_INET6, 0) ||
- virNetworkDefGetRouteByIndex(def, AF_INET6, 0) ||
- def->ipv6nogw)) {
- virSetError(errInitV6);
- return -1;
+ if (iptablesSetupPrivateChains(VIR_FIREWALL_LAYER_IPV6) < 0 &&
+ (virNetworkDefGetIPByIndex(def, AF_INET6, 0) ||
+ virNetworkDefGetRouteByIndex(def, AF_INET6, 0) ||
+ def->ipv6nogw))
+ return -1;
+
+ pvtChainsCreated = true;
}
if (def->bridgeZone) {
diff --git a/tests/networkxml2firewalldata/nat-default-linux.args b/tests/networkxml2firewalldata/nat-default-linux.args
index c9d523d043..61d620b592 100644
--- a/tests/networkxml2firewalldata/nat-default-linux.args
+++ b/tests/networkxml2firewalldata/nat-default-linux.args
@@ -1,5 +1,121 @@
iptables \
--table filter \
+--list-rules
+iptables \
+--table nat \
+--list-rules
+iptables \
+--table mangle \
+--list-rules
+iptables \
+--table filter \
+--new-chain LIBVIRT_INP
+iptables \
+--table filter \
+--insert INPUT \
+--jump LIBVIRT_INP
+iptables \
+--table filter \
+--new-chain LIBVIRT_OUT
+iptables \
+--table filter \
+--insert OUTPUT \
+--jump LIBVIRT_OUT
+iptables \
+--table filter \
+--new-chain LIBVIRT_FWO
+iptables \
+--table filter \
+--insert FORWARD \
+--jump LIBVIRT_FWO
+iptables \
+--table filter \
+--new-chain LIBVIRT_FWI
+iptables \
+--table filter \
+--insert FORWARD \
+--jump LIBVIRT_FWI
+iptables \
+--table filter \
+--new-chain LIBVIRT_FWX
+iptables \
+--table filter \
+--insert FORWARD \
+--jump LIBVIRT_FWX
+iptables \
+--table nat \
+--new-chain LIBVIRT_PRT
+iptables \
+--table nat \
+--insert POSTROUTING \
+--jump LIBVIRT_PRT
+iptables \
+--table mangle \
+--new-chain LIBVIRT_PRT
+iptables \
+--table mangle \
+--insert POSTROUTING \
+--jump LIBVIRT_PRT
+ip6tables \
+--table filter \
+--list-rules
+ip6tables \
+--table nat \
+--list-rules
+ip6tables \
+--table mangle \
+--list-rules
+ip6tables \
+--table filter \
+--new-chain LIBVIRT_INP
+ip6tables \
+--table filter \
+--insert INPUT \
+--jump LIBVIRT_INP
+ip6tables \
+--table filter \
+--new-chain LIBVIRT_OUT
+ip6tables \
+--table filter \
+--insert OUTPUT \
+--jump LIBVIRT_OUT
+ip6tables \
+--table filter \
+--new-chain LIBVIRT_FWO
+ip6tables \
+--table filter \
+--insert FORWARD \
+--jump LIBVIRT_FWO
+ip6tables \
+--table filter \
+--new-chain LIBVIRT_FWI
+ip6tables \
+--table filter \
+--insert FORWARD \
+--jump LIBVIRT_FWI
+ip6tables \
+--table filter \
+--new-chain LIBVIRT_FWX
+ip6tables \
+--table filter \
+--insert FORWARD \
+--jump LIBVIRT_FWX
+ip6tables \
+--table nat \
+--new-chain LIBVIRT_PRT
+ip6tables \
+--table nat \
+--insert POSTROUTING \
+--jump LIBVIRT_PRT
+ip6tables \
+--table mangle \
+--new-chain LIBVIRT_PRT
+ip6tables \
+--table mangle \
+--insert POSTROUTING \
+--jump LIBVIRT_PRT
+iptables \
+--table filter \
--insert LIBVIRT_INP \
--in-interface virbr0 \
--protocol tcp \
--
2.21.0
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
On 4/30/19 10:34 PM, Jim Fehlig wrote: > Automated performance tests found that network-centric workloads suffered > a 20 percent decrease when the host libvirt was updated from 5.0.0 to > 5.1.0. On the test hosts libvirtd is enabled to start at boot and the > "default" network is defined, but it is not set to autostart. > > libvirt 5.1.0 introduced private firewall chains with commit 5f1e6a7d. > The chains are created at libvirtd start, which triggers loading the > conntrack kernel module. Subsequent tracking and processing of > connections resulted in the performance hit. Prior to commit 5f1e6a7d > the conntrack module would not be loaded until starting a network, > when libvirt added rules to the builtin chains. > > Restore the behavior of previous libvirt versions by delaying > the creation of private chains until the first network is started. > > Signed-off-by: Jim Fehlig <jfehlig@suse.com> > --- > > I briefly discussed this issue with Daniel on IRC and just now finding > time to bring it to the list for broader discussion. The adjustment to > the test file feels hacky. The whole approach might by hacky, hence the > RFC. > > src/network/bridge_driver_linux.c | 64 +++------- > .../nat-default-linux.args | 116 ++++++++++++++++++ > 2 files changed, 131 insertions(+), 49 deletions(-) I like this. I was under impression that these rules are created if and only if there's a NATed network found at startup when libvirt is reading the config files. But that doesn't seem to be the case - we create the rules even if all users have is one network with <forward mode='open'/>. You have my +1. I'm not going give explicit ACK (at least for now) until Dan speaks his mind. He had some opinions and good ideas when I tried to fix a problem in the same area of the code. Michal -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On Tue, Apr 30, 2019 at 02:34:44PM -0600, Jim Fehlig wrote: > Automated performance tests found that network-centric workloads suffered > a 20 percent decrease when the host libvirt was updated from 5.0.0 to > 5.1.0. On the test hosts libvirtd is enabled to start at boot and the > "default" network is defined, but it is not set to autostart. > > libvirt 5.1.0 introduced private firewall chains with commit 5f1e6a7d. > The chains are created at libvirtd start, which triggers loading the > conntrack kernel module. Subsequent tracking and processing of > connections resulted in the performance hit. Prior to commit 5f1e6a7d > the conntrack module would not be loaded until starting a network, > when libvirt added rules to the builtin chains. > > Restore the behavior of previous libvirt versions by delaying > the creation of private chains until the first network is started. > > Signed-off-by: Jim Fehlig <jfehlig@suse.com> > --- > > I briefly discussed this issue with Daniel on IRC and just now finding > time to bring it to the list for broader discussion. The adjustment to > the test file feels hacky. The whole approach might by hacky, hence the > RFC. The test file hackyness is something we had before, so not a big deal imho. > > src/network/bridge_driver_linux.c | 64 +++------- > .../nat-default-linux.args | 116 ++++++++++++++++++ > 2 files changed, 131 insertions(+), 49 deletions(-) > > diff --git a/src/network/bridge_driver_linux.c b/src/network/bridge_driver_linux.c > index f2827543ca..a3a09caeba 100644 > --- a/src/network/bridge_driver_linux.c > +++ b/src/network/bridge_driver_linux.c > @@ -35,44 +35,10 @@ VIR_LOG_INIT("network.bridge_driver_linux"); > > #define PROC_NET_ROUTE "/proc/net/route" > > -static virErrorPtr errInitV4; > -static virErrorPtr errInitV6; > +static bool pvtChainsCreated; > > void networkPreReloadFirewallRules(bool startup) > { > - bool created = false; > - int rc; > - > - /* We create global rules upfront as we don't want > - * the perf hit of conditionally figuring out whether > - * to create them each time a network is started. > - * > - * Any errors here are saved to be reported at time > - * of starting the network though as that makes them > - * more likely to be seen by a human > - */ > - rc = iptablesSetupPrivateChains(VIR_FIREWALL_LAYER_IPV4); > - if (rc < 0) { > - errInitV4 = virSaveLastError(); > - virResetLastError(); > - } else { > - virFreeError(errInitV4); > - errInitV4 = NULL; > - } > - if (rc) > - created = true; > - > - rc = iptablesSetupPrivateChains(VIR_FIREWALL_LAYER_IPV6); > - if (rc < 0) { > - errInitV6 = virSaveLastError(); > - virResetLastError(); > - } else { > - virFreeError(errInitV6); > - errInitV6 = NULL; > - } > - if (rc) > - created = true; > - > /* > * If this is initial startup, and we just created the > * top level private chains we either > @@ -86,8 +52,8 @@ void networkPreReloadFirewallRules(bool startup) > * rules will be present. Thus we can safely just tell it > * to always delete from the builin chain > */ > - if (startup && created) > - iptablesSetDeletePrivate(false); > + if (startup) > + iptablesSetDeletePrivate(true); This is problematic. It means that when upgrading libvirt we will never delete the legacy rules from the built-in chains. We needed to create the chains upfront, so that when we recreate rules for existing running networks, we'll upgrade them to use the libvirt chains instead of built-in chains. So I think we'll need to keep the code to create libvirt chains in this networkPreReloadFirewallRules, but *only* run it if we have existing virtual networks that are active. That way when libvirt starts on fresh boot, chains will be crated only when a network is started. If libvirt is upgraded on running host, then we'll still do thoings early. > } > > > @@ -701,19 +667,19 @@ int networkAddFirewallRules(virNetworkDefPtr def) > virFirewallPtr fw = NULL; > int ret = -1; > > - if (errInitV4 && > - (virNetworkDefGetIPByIndex(def, AF_INET, 0) || > - virNetworkDefGetRouteByIndex(def, AF_INET, 0))) { > - virSetError(errInitV4); > - return -1; > - } > + if (!pvtChainsCreated) { > + if (iptablesSetupPrivateChains(VIR_FIREWALL_LAYER_IPV4) < 0 && > + (virNetworkDefGetIPByIndex(def, AF_INET, 0) || > + virNetworkDefGetRouteByIndex(def, AF_INET, 0))) > + return -1; > > - if (errInitV6 && > - (virNetworkDefGetIPByIndex(def, AF_INET6, 0) || > - virNetworkDefGetRouteByIndex(def, AF_INET6, 0) || > - def->ipv6nogw)) { > - virSetError(errInitV6); > - return -1; > + if (iptablesSetupPrivateChains(VIR_FIREWALL_LAYER_IPV6) < 0 && > + (virNetworkDefGetIPByIndex(def, AF_INET6, 0) || > + virNetworkDefGetRouteByIndex(def, AF_INET6, 0) || > + def->ipv6nogw)) > + return -1; > + > + pvtChainsCreated = true; > } > > if (def->bridgeZone) { > diff --git a/tests/networkxml2firewalldata/nat-default-linux.args b/tests/networkxml2firewalldata/nat-default-linux.args > index c9d523d043..61d620b592 100644 > --- a/tests/networkxml2firewalldata/nat-default-linux.args > +++ b/tests/networkxml2firewalldata/nat-default-linux.args > @@ -1,5 +1,121 @@ > iptables \ > --table filter \ > +--list-rules > +iptables \ > +--table nat \ > +--list-rules > +iptables \ > +--table mangle \ > +--list-rules > +iptables \ > +--table filter \ > +--new-chain LIBVIRT_INP > +iptables \ > +--table filter \ > +--insert INPUT \ > +--jump LIBVIRT_INP > +iptables \ > +--table filter \ > +--new-chain LIBVIRT_OUT > +iptables \ > +--table filter \ > +--insert OUTPUT \ > +--jump LIBVIRT_OUT > +iptables \ > +--table filter \ > +--new-chain LIBVIRT_FWO > +iptables \ > +--table filter \ > +--insert FORWARD \ > +--jump LIBVIRT_FWO > +iptables \ > +--table filter \ > +--new-chain LIBVIRT_FWI > +iptables \ > +--table filter \ > +--insert FORWARD \ > +--jump LIBVIRT_FWI > +iptables \ > +--table filter \ > +--new-chain LIBVIRT_FWX > +iptables \ > +--table filter \ > +--insert FORWARD \ > +--jump LIBVIRT_FWX > +iptables \ > +--table nat \ > +--new-chain LIBVIRT_PRT > +iptables \ > +--table nat \ > +--insert POSTROUTING \ > +--jump LIBVIRT_PRT > +iptables \ > +--table mangle \ > +--new-chain LIBVIRT_PRT > +iptables \ > +--table mangle \ > +--insert POSTROUTING \ > +--jump LIBVIRT_PRT > +ip6tables \ > +--table filter \ > +--list-rules > +ip6tables \ > +--table nat \ > +--list-rules > +ip6tables \ > +--table mangle \ > +--list-rules > +ip6tables \ > +--table filter \ > +--new-chain LIBVIRT_INP > +ip6tables \ > +--table filter \ > +--insert INPUT \ > +--jump LIBVIRT_INP > +ip6tables \ > +--table filter \ > +--new-chain LIBVIRT_OUT > +ip6tables \ > +--table filter \ > +--insert OUTPUT \ > +--jump LIBVIRT_OUT > +ip6tables \ > +--table filter \ > +--new-chain LIBVIRT_FWO > +ip6tables \ > +--table filter \ > +--insert FORWARD \ > +--jump LIBVIRT_FWO > +ip6tables \ > +--table filter \ > +--new-chain LIBVIRT_FWI > +ip6tables \ > +--table filter \ > +--insert FORWARD \ > +--jump LIBVIRT_FWI > +ip6tables \ > +--table filter \ > +--new-chain LIBVIRT_FWX > +ip6tables \ > +--table filter \ > +--insert FORWARD \ > +--jump LIBVIRT_FWX > +ip6tables \ > +--table nat \ > +--new-chain LIBVIRT_PRT > +ip6tables \ > +--table nat \ > +--insert POSTROUTING \ > +--jump LIBVIRT_PRT > +ip6tables \ > +--table mangle \ > +--new-chain LIBVIRT_PRT > +ip6tables \ > +--table mangle \ > +--insert POSTROUTING \ > +--jump LIBVIRT_PRT > +iptables \ > +--table filter \ > --insert LIBVIRT_INP \ > --in-interface virbr0 \ > --protocol tcp \ > -- > 2.21.0 > Regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :| -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On 5/7/19 4:36 AM, Daniel P. Berrangé wrote: > On Tue, Apr 30, 2019 at 02:34:44PM -0600, Jim Fehlig wrote: >> Automated performance tests found that network-centric workloads suffered >> a 20 percent decrease when the host libvirt was updated from 5.0.0 to >> 5.1.0. On the test hosts libvirtd is enabled to start at boot and the >> "default" network is defined, but it is not set to autostart. >> >> libvirt 5.1.0 introduced private firewall chains with commit 5f1e6a7d. >> The chains are created at libvirtd start, which triggers loading the >> conntrack kernel module. Subsequent tracking and processing of >> connections resulted in the performance hit. Prior to commit 5f1e6a7d >> the conntrack module would not be loaded until starting a network, >> when libvirt added rules to the builtin chains. >> >> Restore the behavior of previous libvirt versions by delaying >> the creation of private chains until the first network is started. >> >> Signed-off-by: Jim Fehlig <jfehlig@suse.com> >> --- >> >> I briefly discussed this issue with Daniel on IRC and just now finding >> time to bring it to the list for broader discussion. The adjustment to >> the test file feels hacky. The whole approach might by hacky, hence the >> RFC. > > The test file hackyness is something we had before, so not a big > deal imho. > >> >> src/network/bridge_driver_linux.c | 64 +++------- >> .../nat-default-linux.args | 116 ++++++++++++++++++ >> 2 files changed, 131 insertions(+), 49 deletions(-) >> >> diff --git a/src/network/bridge_driver_linux.c b/src/network/bridge_driver_linux.c >> index f2827543ca..a3a09caeba 100644 >> --- a/src/network/bridge_driver_linux.c >> +++ b/src/network/bridge_driver_linux.c >> @@ -35,44 +35,10 @@ VIR_LOG_INIT("network.bridge_driver_linux"); >> >> #define PROC_NET_ROUTE "/proc/net/route" >> >> -static virErrorPtr errInitV4; >> -static virErrorPtr errInitV6; >> +static bool pvtChainsCreated; >> >> void networkPreReloadFirewallRules(bool startup) >> { >> - bool created = false; >> - int rc; >> - >> - /* We create global rules upfront as we don't want >> - * the perf hit of conditionally figuring out whether >> - * to create them each time a network is started. >> - * >> - * Any errors here are saved to be reported at time >> - * of starting the network though as that makes them >> - * more likely to be seen by a human >> - */ >> - rc = iptablesSetupPrivateChains(VIR_FIREWALL_LAYER_IPV4); >> - if (rc < 0) { >> - errInitV4 = virSaveLastError(); >> - virResetLastError(); >> - } else { >> - virFreeError(errInitV4); >> - errInitV4 = NULL; >> - } >> - if (rc) >> - created = true; >> - >> - rc = iptablesSetupPrivateChains(VIR_FIREWALL_LAYER_IPV6); >> - if (rc < 0) { >> - errInitV6 = virSaveLastError(); >> - virResetLastError(); >> - } else { >> - virFreeError(errInitV6); >> - errInitV6 = NULL; >> - } >> - if (rc) >> - created = true; >> - >> /* >> * If this is initial startup, and we just created the >> * top level private chains we either >> @@ -86,8 +52,8 @@ void networkPreReloadFirewallRules(bool startup) >> * rules will be present. Thus we can safely just tell it >> * to always delete from the builin chain >> */ >> - if (startup && created) >> - iptablesSetDeletePrivate(false); >> + if (startup) >> + iptablesSetDeletePrivate(true); > > This is problematic. It means that when upgrading libvirt we will > never delete the legacy rules from the built-in chains. > > We needed to create the chains upfront, so that when we recreate > rules for existing running networks, we'll upgrade them to use the > libvirt chains instead of built-in chains. > > So I think we'll need to keep the code to create libvirt chains > in this networkPreReloadFirewallRules, but *only* run it if we > have existing virtual networks that are active. > > That way when libvirt starts on fresh boot, chains will be crated > only when a network is started. If libvirt is upgraded on running > host, then we'll still do thoings early. Thanks for the comments. I didn't have time to work on a V2 after travel and before a vacation. I'll be gone next week but will pick this up the following week after returning. Regards, Jim > >> } >> >> >> @@ -701,19 +667,19 @@ int networkAddFirewallRules(virNetworkDefPtr def) >> virFirewallPtr fw = NULL; >> int ret = -1; >> >> - if (errInitV4 && >> - (virNetworkDefGetIPByIndex(def, AF_INET, 0) || >> - virNetworkDefGetRouteByIndex(def, AF_INET, 0))) { >> - virSetError(errInitV4); >> - return -1; >> - } >> + if (!pvtChainsCreated) { >> + if (iptablesSetupPrivateChains(VIR_FIREWALL_LAYER_IPV4) < 0 && >> + (virNetworkDefGetIPByIndex(def, AF_INET, 0) || >> + virNetworkDefGetRouteByIndex(def, AF_INET, 0))) >> + return -1; >> >> - if (errInitV6 && >> - (virNetworkDefGetIPByIndex(def, AF_INET6, 0) || >> - virNetworkDefGetRouteByIndex(def, AF_INET6, 0) || >> - def->ipv6nogw)) { >> - virSetError(errInitV6); >> - return -1; >> + if (iptablesSetupPrivateChains(VIR_FIREWALL_LAYER_IPV6) < 0 && >> + (virNetworkDefGetIPByIndex(def, AF_INET6, 0) || >> + virNetworkDefGetRouteByIndex(def, AF_INET6, 0) || >> + def->ipv6nogw)) >> + return -1; >> + >> + pvtChainsCreated = true; >> } >> >> if (def->bridgeZone) { >> diff --git a/tests/networkxml2firewalldata/nat-default-linux.args b/tests/networkxml2firewalldata/nat-default-linux.args >> index c9d523d043..61d620b592 100644 >> --- a/tests/networkxml2firewalldata/nat-default-linux.args >> +++ b/tests/networkxml2firewalldata/nat-default-linux.args >> @@ -1,5 +1,121 @@ >> iptables \ >> --table filter \ >> +--list-rules >> +iptables \ >> +--table nat \ >> +--list-rules >> +iptables \ >> +--table mangle \ >> +--list-rules >> +iptables \ >> +--table filter \ >> +--new-chain LIBVIRT_INP >> +iptables \ >> +--table filter \ >> +--insert INPUT \ >> +--jump LIBVIRT_INP >> +iptables \ >> +--table filter \ >> +--new-chain LIBVIRT_OUT >> +iptables \ >> +--table filter \ >> +--insert OUTPUT \ >> +--jump LIBVIRT_OUT >> +iptables \ >> +--table filter \ >> +--new-chain LIBVIRT_FWO >> +iptables \ >> +--table filter \ >> +--insert FORWARD \ >> +--jump LIBVIRT_FWO >> +iptables \ >> +--table filter \ >> +--new-chain LIBVIRT_FWI >> +iptables \ >> +--table filter \ >> +--insert FORWARD \ >> +--jump LIBVIRT_FWI >> +iptables \ >> +--table filter \ >> +--new-chain LIBVIRT_FWX >> +iptables \ >> +--table filter \ >> +--insert FORWARD \ >> +--jump LIBVIRT_FWX >> +iptables \ >> +--table nat \ >> +--new-chain LIBVIRT_PRT >> +iptables \ >> +--table nat \ >> +--insert POSTROUTING \ >> +--jump LIBVIRT_PRT >> +iptables \ >> +--table mangle \ >> +--new-chain LIBVIRT_PRT >> +iptables \ >> +--table mangle \ >> +--insert POSTROUTING \ >> +--jump LIBVIRT_PRT >> +ip6tables \ >> +--table filter \ >> +--list-rules >> +ip6tables \ >> +--table nat \ >> +--list-rules >> +ip6tables \ >> +--table mangle \ >> +--list-rules >> +ip6tables \ >> +--table filter \ >> +--new-chain LIBVIRT_INP >> +ip6tables \ >> +--table filter \ >> +--insert INPUT \ >> +--jump LIBVIRT_INP >> +ip6tables \ >> +--table filter \ >> +--new-chain LIBVIRT_OUT >> +ip6tables \ >> +--table filter \ >> +--insert OUTPUT \ >> +--jump LIBVIRT_OUT >> +ip6tables \ >> +--table filter \ >> +--new-chain LIBVIRT_FWO >> +ip6tables \ >> +--table filter \ >> +--insert FORWARD \ >> +--jump LIBVIRT_FWO >> +ip6tables \ >> +--table filter \ >> +--new-chain LIBVIRT_FWI >> +ip6tables \ >> +--table filter \ >> +--insert FORWARD \ >> +--jump LIBVIRT_FWI >> +ip6tables \ >> +--table filter \ >> +--new-chain LIBVIRT_FWX >> +ip6tables \ >> +--table filter \ >> +--insert FORWARD \ >> +--jump LIBVIRT_FWX >> +ip6tables \ >> +--table nat \ >> +--new-chain LIBVIRT_PRT >> +ip6tables \ >> +--table nat \ >> +--insert POSTROUTING \ >> +--jump LIBVIRT_PRT >> +ip6tables \ >> +--table mangle \ >> +--new-chain LIBVIRT_PRT >> +ip6tables \ >> +--table mangle \ >> +--insert POSTROUTING \ >> +--jump LIBVIRT_PRT >> +iptables \ >> +--table filter \ >> --insert LIBVIRT_INP \ >> --in-interface virbr0 \ >> --protocol tcp \ >> -- >> 2.21.0 >> > > Regards, > Daniel > -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On Fri, May 10, 2019 at 04:02:07PM -0600, Jim Fehlig wrote: > On 5/7/19 4:36 AM, Daniel P. Berrangé wrote: > > On Tue, Apr 30, 2019 at 02:34:44PM -0600, Jim Fehlig wrote: > > > Automated performance tests found that network-centric workloads suffered > > > a 20 percent decrease when the host libvirt was updated from 5.0.0 to > > > 5.1.0. On the test hosts libvirtd is enabled to start at boot and the > > > "default" network is defined, but it is not set to autostart. > > > > > > libvirt 5.1.0 introduced private firewall chains with commit 5f1e6a7d. > > > The chains are created at libvirtd start, which triggers loading the > > > conntrack kernel module. Subsequent tracking and processing of > > > connections resulted in the performance hit. Prior to commit 5f1e6a7d > > > the conntrack module would not be loaded until starting a network, > > > when libvirt added rules to the builtin chains. > > > > > > Restore the behavior of previous libvirt versions by delaying > > > the creation of private chains until the first network is started. > > > > > > Signed-off-by: Jim Fehlig <jfehlig@suse.com> > > > --- > > > > > > I briefly discussed this issue with Daniel on IRC and just now finding > > > time to bring it to the list for broader discussion. The adjustment to > > > the test file feels hacky. The whole approach might by hacky, hence the > > > RFC. > > > > The test file hackyness is something we had before, so not a big > > deal imho. > > > > > > > > src/network/bridge_driver_linux.c | 64 +++------- > > > .../nat-default-linux.args | 116 ++++++++++++++++++ > > > 2 files changed, 131 insertions(+), 49 deletions(-) > > > > > > diff --git a/src/network/bridge_driver_linux.c b/src/network/bridge_driver_linux.c > > > index f2827543ca..a3a09caeba 100644 > > > --- a/src/network/bridge_driver_linux.c > > > +++ b/src/network/bridge_driver_linux.c > > > @@ -35,44 +35,10 @@ VIR_LOG_INIT("network.bridge_driver_linux"); > > > #define PROC_NET_ROUTE "/proc/net/route" > > > -static virErrorPtr errInitV4; > > > -static virErrorPtr errInitV6; > > > +static bool pvtChainsCreated; > > > void networkPreReloadFirewallRules(bool startup) > > > { > > > - bool created = false; > > > - int rc; > > > - > > > - /* We create global rules upfront as we don't want > > > - * the perf hit of conditionally figuring out whether > > > - * to create them each time a network is started. > > > - * > > > - * Any errors here are saved to be reported at time > > > - * of starting the network though as that makes them > > > - * more likely to be seen by a human > > > - */ > > > - rc = iptablesSetupPrivateChains(VIR_FIREWALL_LAYER_IPV4); > > > - if (rc < 0) { > > > - errInitV4 = virSaveLastError(); > > > - virResetLastError(); > > > - } else { > > > - virFreeError(errInitV4); > > > - errInitV4 = NULL; > > > - } > > > - if (rc) > > > - created = true; > > > - > > > - rc = iptablesSetupPrivateChains(VIR_FIREWALL_LAYER_IPV6); > > > - if (rc < 0) { > > > - errInitV6 = virSaveLastError(); > > > - virResetLastError(); > > > - } else { > > > - virFreeError(errInitV6); > > > - errInitV6 = NULL; > > > - } > > > - if (rc) > > > - created = true; > > > - > > > /* > > > * If this is initial startup, and we just created the > > > * top level private chains we either > > > @@ -86,8 +52,8 @@ void networkPreReloadFirewallRules(bool startup) > > > * rules will be present. Thus we can safely just tell it > > > * to always delete from the builin chain > > > */ > > > - if (startup && created) > > > - iptablesSetDeletePrivate(false); > > > + if (startup) > > > + iptablesSetDeletePrivate(true); > > > > This is problematic. It means that when upgrading libvirt we will > > never delete the legacy rules from the built-in chains. > > > > We needed to create the chains upfront, so that when we recreate > > rules for existing running networks, we'll upgrade them to use the > > libvirt chains instead of built-in chains. > > > > So I think we'll need to keep the code to create libvirt chains > > in this networkPreReloadFirewallRules, but *only* run it if we > > have existing virtual networks that are active. > > > > That way when libvirt starts on fresh boot, chains will be crated > > only when a network is started. If libvirt is upgraded on running > > host, then we'll still do thoings early. > > Thanks for the comments. I didn't have time to work on a V2 after travel and > before a vacation. I'll be gone next week but will pick this up the > following week after returning. Don't worry about it, I'll probably get time to do an updated patch this week. Regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :| -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
© 2016 - 2024 Red Hat, Inc.