Fortunately, we have qemu wrappers so it's sufficient to put
lock/unlock call only there.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
---
src/qemu/qemu_security.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 107 insertions(+)
diff --git a/src/qemu/qemu_security.c b/src/qemu/qemu_security.c
index af3be42854..527563947c 100644
--- a/src/qemu/qemu_security.c
+++ b/src/qemu/qemu_security.c
@@ -26,6 +26,7 @@
#include "qemu_domain.h"
#include "qemu_security.h"
#include "virlog.h"
+#include "locking/domain_lock.h"
#define VIR_FROM_THIS VIR_FROM_QEMU
@@ -39,6 +40,12 @@ qemuSecuritySetAllLabel(virQEMUDriverPtr driver,
{
int ret = -1;
qemuDomainObjPrivatePtr priv = vm->privateData;
+ bool locked = false;
+
+ if (virDomainLockMetadataLock(driver->lockManager, vm) < 0)
+ goto cleanup;
+
+ locked = true;
if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) &&
virSecurityManagerTransactionStart(driver->securityManager) < 0)
@@ -55,9 +62,17 @@ qemuSecuritySetAllLabel(virQEMUDriverPtr driver,
vm->pid) < 0)
goto cleanup;
+ locked = false;
+
+ if (virDomainLockMetadataUnlock(driver->lockManager, vm) < 0)
+ goto cleanup;
+
ret = 0;
cleanup:
virSecurityManagerTransactionAbort(driver->securityManager);
+ if (locked &&
+ virDomainLockMetadataUnlock(driver->lockManager, vm) < 0)
+ VIR_WARN("unable to release metadata lock");
return ret;
}
@@ -68,6 +83,10 @@ qemuSecurityRestoreAllLabel(virQEMUDriverPtr driver,
bool migrated)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
+ bool unlock = true;
+
+ if (virDomainLockMetadataLock(driver->lockManager, vm) < 0)
+ unlock = false;
/* In contrast to qemuSecuritySetAllLabel, do not use
* secdriver transactions here. This function is called from
@@ -79,6 +98,10 @@ qemuSecurityRestoreAllLabel(virQEMUDriverPtr driver,
vm->def,
migrated,
priv->chardevStdioLogd);
+
+ if (unlock &&
+ virDomainLockMetadataUnlock(driver->lockManager, vm) < 0)
+ VIR_WARN("unable to release metadata lock");
}
@@ -88,6 +111,12 @@ qemuSecuritySetDiskLabel(virQEMUDriverPtr driver,
virDomainDiskDefPtr disk)
{
int ret = -1;
+ bool locked = false;
+
+ if (virDomainLockMetadataDiskLock(driver->lockManager, vm, disk) < 0)
+ goto cleanup;
+
+ locked = true;
if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) &&
virSecurityManagerTransactionStart(driver->securityManager) < 0)
@@ -103,9 +132,17 @@ qemuSecuritySetDiskLabel(virQEMUDriverPtr driver,
vm->pid) < 0)
goto cleanup;
+ locked = false;
+
+ if (virDomainLockMetadataDiskUnlock(driver->lockManager, vm, disk) < 0)
+ goto cleanup;
+
ret = 0;
cleanup:
virSecurityManagerTransactionAbort(driver->securityManager);
+ if (locked &&
+ virDomainLockMetadataDiskUnlock(driver->lockManager, vm, disk) < 0)
+ VIR_WARN("unable to release disk metadata lock");
return ret;
}
@@ -116,6 +153,12 @@ qemuSecurityRestoreDiskLabel(virQEMUDriverPtr driver,
virDomainDiskDefPtr disk)
{
int ret = -1;
+ bool locked = false;
+
+ if (virDomainLockMetadataDiskLock(driver->lockManager, vm, disk) < 0)
+ goto cleanup;
+
+ locked = true;
if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) &&
virSecurityManagerTransactionStart(driver->securityManager) < 0)
@@ -131,9 +174,17 @@ qemuSecurityRestoreDiskLabel(virQEMUDriverPtr driver,
vm->pid) < 0)
goto cleanup;
+ locked = false;
+
+ if (virDomainLockMetadataDiskUnlock(driver->lockManager, vm, disk) < 0)
+ goto cleanup;
+
ret = 0;
cleanup:
virSecurityManagerTransactionAbort(driver->securityManager);
+ if (locked &&
+ virDomainLockMetadataDiskUnlock(driver->lockManager, vm, disk) < 0)
+ VIR_WARN("unable to release disk metadata lock");
return ret;
}
@@ -144,6 +195,12 @@ qemuSecuritySetImageLabel(virQEMUDriverPtr driver,
virStorageSourcePtr src)
{
int ret = -1;
+ bool locked = false;
+
+ if (virDomainLockMetadataImageLock(driver->lockManager, vm, src) < 0)
+ goto cleanup;
+
+ locked = true;
if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) &&
virSecurityManagerTransactionStart(driver->securityManager) < 0)
@@ -159,9 +216,17 @@ qemuSecuritySetImageLabel(virQEMUDriverPtr driver,
vm->pid) < 0)
goto cleanup;
+ locked = false;
+
+ if (virDomainLockMetadataImageUnlock(driver->lockManager, vm, src) < 0)
+ goto cleanup;
+
ret = 0;
cleanup:
virSecurityManagerTransactionAbort(driver->securityManager);
+ if (locked &&
+ virDomainLockMetadataImageUnlock(driver->lockManager, vm, src) < 0)
+ VIR_WARN("unable to release image metadata lock");
return ret;
}
@@ -172,6 +237,12 @@ qemuSecurityRestoreImageLabel(virQEMUDriverPtr driver,
virStorageSourcePtr src)
{
int ret = -1;
+ bool locked = false;
+
+ if (virDomainLockMetadataImageLock(driver->lockManager, vm, src) < 0)
+ goto cleanup;
+
+ locked = true;
if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) &&
virSecurityManagerTransactionStart(driver->securityManager) < 0)
@@ -187,9 +258,17 @@ qemuSecurityRestoreImageLabel(virQEMUDriverPtr driver,
vm->pid) < 0)
goto cleanup;
+ locked = false;
+
+ if (virDomainLockMetadataImageUnlock(driver->lockManager, vm, src) < 0)
+ goto cleanup;
+
ret = 0;
cleanup:
virSecurityManagerTransactionAbort(driver->securityManager);
+ if (locked &&
+ virDomainLockMetadataImageUnlock(driver->lockManager, vm, src) < 0)
+ VIR_WARN("unable to release image metadata lock");
return ret;
}
@@ -258,6 +337,12 @@ qemuSecuritySetMemoryLabel(virQEMUDriverPtr driver,
virDomainMemoryDefPtr mem)
{
int ret = -1;
+ bool locked = false;
+
+ if (virDomainLockMetadataMemLock(driver->lockManager, vm, mem) < 0)
+ goto cleanup;
+
+ locked = true;
if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) &&
virSecurityManagerTransactionStart(driver->securityManager) < 0)
@@ -273,9 +358,17 @@ qemuSecuritySetMemoryLabel(virQEMUDriverPtr driver,
vm->pid) < 0)
goto cleanup;
+ locked = false;
+
+ if (virDomainLockMetadataMemUnlock(driver->lockManager, vm, mem) < 0)
+ goto cleanup;
+
ret = 0;
cleanup:
virSecurityManagerTransactionAbort(driver->securityManager);
+ if (locked &&
+ virDomainLockMetadataMemUnlock(driver->lockManager, vm, mem) < 0)
+ VIR_WARN("unable to release memory metadata lock");
return ret;
}
@@ -286,6 +379,12 @@ qemuSecurityRestoreMemoryLabel(virQEMUDriverPtr driver,
virDomainMemoryDefPtr mem)
{
int ret = -1;
+ bool locked = false;
+
+ if (virDomainLockMetadataMemLock(driver->lockManager, vm, mem) < 0)
+ goto cleanup;
+
+ locked = true;
if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) &&
virSecurityManagerTransactionStart(driver->securityManager) < 0)
@@ -301,9 +400,17 @@ qemuSecurityRestoreMemoryLabel(virQEMUDriverPtr driver,
vm->pid) < 0)
goto cleanup;
+ locked = false;
+
+ if (virDomainLockMetadataMemUnlock(driver->lockManager, vm, mem) < 0)
+ goto cleanup;
+
ret = 0;
cleanup:
virSecurityManagerTransactionAbort(driver->securityManager);
+ if (locked &&
+ virDomainLockMetadataMemUnlock(driver->lockManager, vm, mem) < 0)
+ VIR_WARN("unable to release memory metadata lock");
return ret;
}
--
2.16.4
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list
On 08/14/2018 07:19 AM, Michal Privoznik wrote: > Fortunately, we have qemu wrappers so it's sufficient to put > lock/unlock call only there. > Kind of sparse... Maybe a few words related to what benefit locking the metadata provides. There is a danger in all this too if the unlock does fail since metadata locks are wait type locks any subsequent lock will wait. > Signed-off-by: Michal Privoznik <mprivozn@redhat.com> > --- > src/qemu/qemu_security.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 107 insertions(+) > > diff --git a/src/qemu/qemu_security.c b/src/qemu/qemu_security.c > index af3be42854..527563947c 100644 > --- a/src/qemu/qemu_security.c > +++ b/src/qemu/qemu_security.c > @@ -26,6 +26,7 @@ > #include "qemu_domain.h" > #include "qemu_security.h" > #include "virlog.h" > +#include "locking/domain_lock.h" > > #define VIR_FROM_THIS VIR_FROM_QEMU > > @@ -39,6 +40,12 @@ qemuSecuritySetAllLabel(virQEMUDriverPtr driver, > { > int ret = -1; > qemuDomainObjPrivatePtr priv = vm->privateData; > + bool locked = false; > + > + if (virDomainLockMetadataLock(driver->lockManager, vm) < 0) > + goto cleanup; > + > + locked = true; > > if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) && > virSecurityManagerTransactionStart(driver->securityManager) < 0) > @@ -55,9 +62,17 @@ qemuSecuritySetAllLabel(virQEMUDriverPtr driver, > vm->pid) < 0) > goto cleanup; > > + locked = false; Technically not true yet. Also , I think two such attempts with the second eliciting the VIR_WARN is perfectly reasonable (IOW: on failure of the following goto cleanup with locked == true, try again, and spew that message). This repeats a few times, but I'll note just once. > + > + if (virDomainLockMetadataUnlock(driver->lockManager, vm) < 0) > + goto cleanup; > + > ret = 0; > cleanup: > virSecurityManagerTransactionAbort(driver->securityManager); > + if (locked && > + virDomainLockMetadataUnlock(driver->lockManager, vm) < 0) > + VIR_WARN("unable to release metadata lock"); Should we add the @stdin_path and @vm->pid and/or vm->name here? It may help some day. Similar comments for other new VIR_WARN's, but the parameters change... > return ret; > } > > @@ -68,6 +83,10 @@ qemuSecurityRestoreAllLabel(virQEMUDriverPtr driver, > bool migrated) > { > qemuDomainObjPrivatePtr priv = vm->privateData; > + bool unlock = true; > + > + if (virDomainLockMetadataLock(driver->lockManager, vm) < 0) > + unlock = false; > > /* In contrast to qemuSecuritySetAllLabel, do not use > * secdriver transactions here. This function is called from > @@ -79,6 +98,10 @@ qemuSecurityRestoreAllLabel(virQEMUDriverPtr driver, > vm->def, > migrated, > priv->chardevStdioLogd); > + > + if (unlock && > + virDomainLockMetadataUnlock(driver->lockManager, vm) < 0) > + VIR_WARN("unable to release metadata lock"); > } > > > @@ -88,6 +111,12 @@ qemuSecuritySetDiskLabel(virQEMUDriverPtr driver, > virDomainDiskDefPtr disk) > { > int ret = -1; > + bool locked = false; > + > + if (virDomainLockMetadataDiskLock(driver->lockManager, vm, disk) < 0) > + goto cleanup; > + > + locked = true; > > if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) && > virSecurityManagerTransactionStart(driver->securityManager) < 0) > @@ -103,9 +132,17 @@ qemuSecuritySetDiskLabel(virQEMUDriverPtr driver, > vm->pid) < 0) > goto cleanup; > > + locked = false; > + > + if (virDomainLockMetadataDiskUnlock(driver->lockManager, vm, disk) < 0) > + goto cleanup; > + > ret = 0; > cleanup: > virSecurityManagerTransactionAbort(driver->securityManager); > + if (locked && > + virDomainLockMetadataDiskUnlock(driver->lockManager, vm, disk) < 0) > + VIR_WARN("unable to release disk metadata lock"); > return ret; > } > > @@ -116,6 +153,12 @@ qemuSecurityRestoreDiskLabel(virQEMUDriverPtr driver, > virDomainDiskDefPtr disk) > { > int ret = -1; > + bool locked = false; > + > + if (virDomainLockMetadataDiskLock(driver->lockManager, vm, disk) < 0) > + goto cleanup; > + > + locked = true; > > if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) && > virSecurityManagerTransactionStart(driver->securityManager) < 0) > @@ -131,9 +174,17 @@ qemuSecurityRestoreDiskLabel(virQEMUDriverPtr driver, > vm->pid) < 0) > goto cleanup; > > + locked = false; > + > + if (virDomainLockMetadataDiskUnlock(driver->lockManager, vm, disk) < 0) > + goto cleanup; > + > ret = 0; > cleanup: > virSecurityManagerTransactionAbort(driver->securityManager); > + if (locked && > + virDomainLockMetadataDiskUnlock(driver->lockManager, vm, disk) < 0) > + VIR_WARN("unable to release disk metadata lock"); > return ret; > } > > @@ -144,6 +195,12 @@ qemuSecuritySetImageLabel(virQEMUDriverPtr driver, > virStorageSourcePtr src) > { > int ret = -1; > + bool locked = false; > + > + if (virDomainLockMetadataImageLock(driver->lockManager, vm, src) < 0) > + goto cleanup; > + > + locked = true; > > if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) && > virSecurityManagerTransactionStart(driver->securityManager) < 0) > @@ -159,9 +216,17 @@ qemuSecuritySetImageLabel(virQEMUDriverPtr driver, > vm->pid) < 0) > goto cleanup; > > + locked = false; > + > + if (virDomainLockMetadataImageUnlock(driver->lockManager, vm, src) < 0) > + goto cleanup; > + > ret = 0; > cleanup: > virSecurityManagerTransactionAbort(driver->securityManager); > + if (locked && > + virDomainLockMetadataImageUnlock(driver->lockManager, vm, src) < 0) > + VIR_WARN("unable to release image metadata lock"); > return ret; > } > > @@ -172,6 +237,12 @@ qemuSecurityRestoreImageLabel(virQEMUDriverPtr driver, > virStorageSourcePtr src) > { > int ret = -1; > + bool locked = false; > + > + if (virDomainLockMetadataImageLock(driver->lockManager, vm, src) < 0) > + goto cleanup; > + > + locked = true; > > if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) && > virSecurityManagerTransactionStart(driver->securityManager) < 0) > @@ -187,9 +258,17 @@ qemuSecurityRestoreImageLabel(virQEMUDriverPtr driver, > vm->pid) < 0) > goto cleanup; > > + locked = false; > + > + if (virDomainLockMetadataImageUnlock(driver->lockManager, vm, src) < 0) > + goto cleanup; > + > ret = 0; > cleanup: > virSecurityManagerTransactionAbort(driver->securityManager); > + if (locked && > + virDomainLockMetadataImageUnlock(driver->lockManager, vm, src) < 0) > + VIR_WARN("unable to release image metadata lock"); > return ret; > } > Just below here is HostdevLabel processing... For a SCSI host device that would seem to cover the type of resource described in the cover letter to patch6 - at least for iSCSI LUNs (and of course vHBA LUNs). Need to look at the hostdev->mode and hostdev->source.subsys.type and dig into the subsys.u.scsi to get to the src->path. Other than that nothing else sticks out to me. John > @@ -258,6 +337,12 @@ qemuSecuritySetMemoryLabel(virQEMUDriverPtr driver, > virDomainMemoryDefPtr mem) > { > int ret = -1; > + bool locked = false; > + > + if (virDomainLockMetadataMemLock(driver->lockManager, vm, mem) < 0) > + goto cleanup; > + > + locked = true; > > if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) && > virSecurityManagerTransactionStart(driver->securityManager) < 0) > @@ -273,9 +358,17 @@ qemuSecuritySetMemoryLabel(virQEMUDriverPtr driver, > vm->pid) < 0) > goto cleanup; > > + locked = false; > + > + if (virDomainLockMetadataMemUnlock(driver->lockManager, vm, mem) < 0) > + goto cleanup; > + > ret = 0; > cleanup: > virSecurityManagerTransactionAbort(driver->securityManager); > + if (locked && > + virDomainLockMetadataMemUnlock(driver->lockManager, vm, mem) < 0) > + VIR_WARN("unable to release memory metadata lock"); > return ret; > } > > @@ -286,6 +379,12 @@ qemuSecurityRestoreMemoryLabel(virQEMUDriverPtr driver, > virDomainMemoryDefPtr mem) > { > int ret = -1; > + bool locked = false; > + > + if (virDomainLockMetadataMemLock(driver->lockManager, vm, mem) < 0) > + goto cleanup; > + > + locked = true; > > if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) && > virSecurityManagerTransactionStart(driver->securityManager) < 0) > @@ -301,9 +400,17 @@ qemuSecurityRestoreMemoryLabel(virQEMUDriverPtr driver, > vm->pid) < 0) > goto cleanup; > > + locked = false; > + > + if (virDomainLockMetadataMemUnlock(driver->lockManager, vm, mem) < 0) > + goto cleanup; > + > ret = 0; > cleanup: > virSecurityManagerTransactionAbort(driver->securityManager); > + if (locked && > + virDomainLockMetadataMemUnlock(driver->lockManager, vm, mem) < 0) > + VIR_WARN("unable to release memory metadata lock"); > return ret; > } > > -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On 08/17/2018 08:38 PM, John Ferlan wrote: > > > On 08/14/2018 07:19 AM, Michal Privoznik wrote: >> Fortunately, we have qemu wrappers so it's sufficient to put >> lock/unlock call only there. >> > > Kind of sparse... Maybe a few words related to what benefit locking the > metadata provides. There is a danger in all this too if the unlock does > fail since metadata locks are wait type locks any subsequent lock will wait. > >> Signed-off-by: Michal Privoznik <mprivozn@redhat.com> >> --- >> src/qemu/qemu_security.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++ >> 1 file changed, 107 insertions(+) >> >> diff --git a/src/qemu/qemu_security.c b/src/qemu/qemu_security.c >> index af3be42854..527563947c 100644 >> --- a/src/qemu/qemu_security.c >> +++ b/src/qemu/qemu_security.c >> @@ -26,6 +26,7 @@ >> #include "qemu_domain.h" >> #include "qemu_security.h" >> #include "virlog.h" >> +#include "locking/domain_lock.h" >> >> #define VIR_FROM_THIS VIR_FROM_QEMU >> >> @@ -39,6 +40,12 @@ qemuSecuritySetAllLabel(virQEMUDriverPtr driver, >> { >> int ret = -1; >> qemuDomainObjPrivatePtr priv = vm->privateData; >> + bool locked = false; >> + >> + if (virDomainLockMetadataLock(driver->lockManager, vm) < 0) >> + goto cleanup; >> + >> + locked = true; >> >> if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) && >> virSecurityManagerTransactionStart(driver->securityManager) < 0) >> @@ -55,9 +62,17 @@ qemuSecuritySetAllLabel(virQEMUDriverPtr driver, >> vm->pid) < 0) >> goto cleanup; >> >> + locked = false; > > Technically not true yet. Also , I think two such attempts with the > second eliciting the VIR_WARN is perfectly reasonable (IOW: on failure > of the following goto cleanup with locked == true, try again, and spew > that message). > > This repeats a few times, but I'll note just once. I don't think it is safe to unlock something twice if we've locked it just once. > >> + >> + if (virDomainLockMetadataUnlock(driver->lockManager, vm) < 0) >> + goto cleanup; >> + >> ret = 0; >> cleanup: >> virSecurityManagerTransactionAbort(driver->securityManager); >> + if (locked && >> + virDomainLockMetadataUnlock(driver->lockManager, vm) < 0) >> + VIR_WARN("unable to release metadata lock"); > > Should we add the @stdin_path and @vm->pid and/or vm->name here? It may > help some day. Similar comments for other new VIR_WARN's, but the > parameters change... Ah, good point. Okay. > >> return ret; >> } >> >> @@ -68,6 +83,10 @@ qemuSecurityRestoreAllLabel(virQEMUDriverPtr driver, >> bool migrated) >> { >> qemuDomainObjPrivatePtr priv = vm->privateData; >> + bool unlock = true; >> + >> + if (virDomainLockMetadataLock(driver->lockManager, vm) < 0) >> + unlock = false; >> >> /* In contrast to qemuSecuritySetAllLabel, do not use >> * secdriver transactions here. This function is called from >> @@ -79,6 +98,10 @@ qemuSecurityRestoreAllLabel(virQEMUDriverPtr driver, >> vm->def, >> migrated, >> priv->chardevStdioLogd); >> + >> + if (unlock && >> + virDomainLockMetadataUnlock(driver->lockManager, vm) < 0) >> + VIR_WARN("unable to release metadata lock"); >> } >> >> >> @@ -88,6 +111,12 @@ qemuSecuritySetDiskLabel(virQEMUDriverPtr driver, >> virDomainDiskDefPtr disk) >> { >> int ret = -1; >> + bool locked = false; >> + >> + if (virDomainLockMetadataDiskLock(driver->lockManager, vm, disk) < 0) >> + goto cleanup; >> + >> + locked = true; >> >> if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) && >> virSecurityManagerTransactionStart(driver->securityManager) < 0) >> @@ -103,9 +132,17 @@ qemuSecuritySetDiskLabel(virQEMUDriverPtr driver, >> vm->pid) < 0) >> goto cleanup; >> >> + locked = false; >> + >> + if (virDomainLockMetadataDiskUnlock(driver->lockManager, vm, disk) < 0) >> + goto cleanup; >> + >> ret = 0; >> cleanup: >> virSecurityManagerTransactionAbort(driver->securityManager); >> + if (locked && >> + virDomainLockMetadataDiskUnlock(driver->lockManager, vm, disk) < 0) >> + VIR_WARN("unable to release disk metadata lock"); >> return ret; >> } >> >> @@ -116,6 +153,12 @@ qemuSecurityRestoreDiskLabel(virQEMUDriverPtr driver, >> virDomainDiskDefPtr disk) >> { >> int ret = -1; >> + bool locked = false; >> + >> + if (virDomainLockMetadataDiskLock(driver->lockManager, vm, disk) < 0) >> + goto cleanup; >> + >> + locked = true; >> >> if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) && >> virSecurityManagerTransactionStart(driver->securityManager) < 0) >> @@ -131,9 +174,17 @@ qemuSecurityRestoreDiskLabel(virQEMUDriverPtr driver, >> vm->pid) < 0) >> goto cleanup; >> >> + locked = false; >> + >> + if (virDomainLockMetadataDiskUnlock(driver->lockManager, vm, disk) < 0) >> + goto cleanup; >> + >> ret = 0; >> cleanup: >> virSecurityManagerTransactionAbort(driver->securityManager); >> + if (locked && >> + virDomainLockMetadataDiskUnlock(driver->lockManager, vm, disk) < 0) >> + VIR_WARN("unable to release disk metadata lock"); >> return ret; >> } >> >> @@ -144,6 +195,12 @@ qemuSecuritySetImageLabel(virQEMUDriverPtr driver, >> virStorageSourcePtr src) >> { >> int ret = -1; >> + bool locked = false; >> + >> + if (virDomainLockMetadataImageLock(driver->lockManager, vm, src) < 0) >> + goto cleanup; >> + >> + locked = true; >> >> if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) && >> virSecurityManagerTransactionStart(driver->securityManager) < 0) >> @@ -159,9 +216,17 @@ qemuSecuritySetImageLabel(virQEMUDriverPtr driver, >> vm->pid) < 0) >> goto cleanup; >> >> + locked = false; >> + >> + if (virDomainLockMetadataImageUnlock(driver->lockManager, vm, src) < 0) >> + goto cleanup; >> + >> ret = 0; >> cleanup: >> virSecurityManagerTransactionAbort(driver->securityManager); >> + if (locked && >> + virDomainLockMetadataImageUnlock(driver->lockManager, vm, src) < 0) >> + VIR_WARN("unable to release image metadata lock"); >> return ret; >> } >> >> @@ -172,6 +237,12 @@ qemuSecurityRestoreImageLabel(virQEMUDriverPtr driver, >> virStorageSourcePtr src) >> { >> int ret = -1; >> + bool locked = false; >> + >> + if (virDomainLockMetadataImageLock(driver->lockManager, vm, src) < 0) >> + goto cleanup; >> + >> + locked = true; >> >> if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) && >> virSecurityManagerTransactionStart(driver->securityManager) < 0) >> @@ -187,9 +258,17 @@ qemuSecurityRestoreImageLabel(virQEMUDriverPtr driver, >> vm->pid) < 0) >> goto cleanup; >> >> + locked = false; >> + >> + if (virDomainLockMetadataImageUnlock(driver->lockManager, vm, src) < 0) >> + goto cleanup; >> + >> ret = 0; >> cleanup: >> virSecurityManagerTransactionAbort(driver->securityManager); >> + if (locked && >> + virDomainLockMetadataImageUnlock(driver->lockManager, vm, src) < 0) >> + VIR_WARN("unable to release image metadata lock"); >> return ret; >> } >> > > Just below here is HostdevLabel processing... For a SCSI host device > that would seem to cover the type of resource described in the cover > letter to patch6 - at least for iSCSI LUNs (and of course vHBA LUNs). > > Need to look at the hostdev->mode and hostdev->source.subsys.type and > dig into the subsys.u.scsi to get to the src->path. Oh, this was the reason I made VIR_WARN() so sparse. I did not wanted to get into business of getting paths from all the complicated structs we have. I'll just put vm->def->name and vm->pid into the warn message. Michal -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On Tue, Aug 14, 2018 at 01:19:43PM +0200, Michal Privoznik wrote: > Fortunately, we have qemu wrappers so it's sufficient to put > lock/unlock call only there. > > Signed-off-by: Michal Privoznik <mprivozn@redhat.com> > --- > src/qemu/qemu_security.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 107 insertions(+) > > diff --git a/src/qemu/qemu_security.c b/src/qemu/qemu_security.c > index af3be42854..527563947c 100644 > --- a/src/qemu/qemu_security.c > +++ b/src/qemu/qemu_security.c > @@ -26,6 +26,7 @@ > #include "qemu_domain.h" > #include "qemu_security.h" > #include "virlog.h" > +#include "locking/domain_lock.h" > > #define VIR_FROM_THIS VIR_FROM_QEMU > > @@ -39,6 +40,12 @@ qemuSecuritySetAllLabel(virQEMUDriverPtr driver, > { > int ret = -1; > qemuDomainObjPrivatePtr priv = vm->privateData; > + bool locked = false; > + > + if (virDomainLockMetadataLock(driver->lockManager, vm) < 0) > + goto cleanup; > + > + locked = true; > > if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) && > virSecurityManagerTransactionStart(driver->securityManager) < 0) > @@ -55,9 +62,17 @@ qemuSecuritySetAllLabel(virQEMUDriverPtr driver, > vm->pid) < 0) > goto cleanup; > > + locked = false; > + > + if (virDomainLockMetadataUnlock(driver->lockManager, vm) < 0) > + goto cleanup; > + > ret = 0; > cleanup: > virSecurityManagerTransactionAbort(driver->securityManager); > + if (locked && > + virDomainLockMetadataUnlock(driver->lockManager, vm) < 0) > + VIR_WARN("unable to release metadata lock"); > return ret; > } I'm wondering if this is really the right level to plug in the metadata locking ? What about if we just pass a virLockManagerPtr to the security drivers and let them lock each resource at the time they need to modify its metadata. This will trivially guarantee that we always lock the exact set of files that we'll be changing metadata on. eg in SELinux driver the virSecuritySELinuxSetFileconHelper method would directly call virLockManagerAcquire & virLockManagerRelease, avoiding the entire virDomainLock abstraction which was really focused around managing the content locks around lifecycle state changes. Regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :| -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On 08/20/2018 05:16 PM, Daniel P. Berrangé wrote: > On Tue, Aug 14, 2018 at 01:19:43PM +0200, Michal Privoznik wrote: >> Fortunately, we have qemu wrappers so it's sufficient to put >> lock/unlock call only there. >> >> Signed-off-by: Michal Privoznik <mprivozn@redhat.com> >> --- >> src/qemu/qemu_security.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++ >> 1 file changed, 107 insertions(+) >> >> diff --git a/src/qemu/qemu_security.c b/src/qemu/qemu_security.c >> index af3be42854..527563947c 100644 >> --- a/src/qemu/qemu_security.c >> +++ b/src/qemu/qemu_security.c >> @@ -26,6 +26,7 @@ >> #include "qemu_domain.h" >> #include "qemu_security.h" >> #include "virlog.h" >> +#include "locking/domain_lock.h" >> >> #define VIR_FROM_THIS VIR_FROM_QEMU >> >> @@ -39,6 +40,12 @@ qemuSecuritySetAllLabel(virQEMUDriverPtr driver, >> { >> int ret = -1; >> qemuDomainObjPrivatePtr priv = vm->privateData; >> + bool locked = false; >> + >> + if (virDomainLockMetadataLock(driver->lockManager, vm) < 0) >> + goto cleanup; >> + >> + locked = true; >> >> if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT) && >> virSecurityManagerTransactionStart(driver->securityManager) < 0) >> @@ -55,9 +62,17 @@ qemuSecuritySetAllLabel(virQEMUDriverPtr driver, >> vm->pid) < 0) >> goto cleanup; >> >> + locked = false; >> + >> + if (virDomainLockMetadataUnlock(driver->lockManager, vm) < 0) >> + goto cleanup; >> + >> ret = 0; >> cleanup: >> virSecurityManagerTransactionAbort(driver->securityManager); >> + if (locked && >> + virDomainLockMetadataUnlock(driver->lockManager, vm) < 0) >> + VIR_WARN("unable to release metadata lock"); >> return ret; >> } > > I'm wondering if this is really the right level to plug in the metadata > locking ? What about if we just pass a virLockManagerPtr to the security > drivers and let them lock each resource at the time they need to modify > its metadata. This will trivially guarantee that we always lock the exact > set of files that we'll be changing metadata on. > > eg in SELinux driver the virSecuritySELinuxSetFileconHelper method > would directly call virLockManagerAcquire & virLockManagerRelease, > avoiding the entire virDomainLock abstraction which was really > focused around managing the content locks around lifecycle state > changes. > Yeah, I vaguely recall writing code like this (when I was trying to solve this some time ago). Okay, let me see if that's feasible. But boy, this is getting hairy. Michal -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On 08/20/2018 07:17 PM, Michal Prívozník wrote: > On 08/20/2018 05:16 PM, Daniel P. Berrangé wrote: >> On Tue, Aug 14, 2018 at 01:19:43PM +0200, Michal Privoznik wrote: >>> Fortunately, we have qemu wrappers so it's sufficient to put >>> lock/unlock call only there. >>> >>> Signed-off-by: Michal Privoznik <mprivozn@redhat.com> >>> --- >>> src/qemu/qemu_security.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++ >>> 1 file changed, 107 insertions(+) >>> >> I'm wondering if this is really the right level to plug in the metadata >> locking ? What about if we just pass a virLockManagerPtr to the security >> drivers and let them lock each resource at the time they need to modify >> its metadata. This will trivially guarantee that we always lock the exact >> set of files that we'll be changing metadata on. >> >> eg in SELinux driver the virSecuritySELinuxSetFileconHelper method >> would directly call virLockManagerAcquire & virLockManagerRelease, >> avoiding the entire virDomainLock abstraction which was really >> focused around managing the content locks around lifecycle state >> changes. >> > > Yeah, I vaguely recall writing code like this (when I was trying to > solve this some time ago). Okay, let me see if that's feasible. > > But boy, this is getting hairy. So as I'm writing these patches I came to realize couple of things: a) instead of domain PID we should pass libvirtd PID because we want to release the locks if libvirtd dies not domain. b) that, however, leads to a problem because virLockManagerLockDaemonAcquire() closes the socket to virtlockd causing it to kill the owner of the lock, i.e. it kills libvirtd immediately. c) even if I hack around b) so that we connect only once for each lock+unlock pair call, we would still connect dozens of times when starting a domain (all the paths we label times all active secdrivers). So we should share connection here too. Now question is how do do this effectively and cleanly (code-wise). For solving b) we can have a new flag, say VIR_LOCK_MANAGER_KEEP_CONNECT that would cause virLockManagerLockDaemonAcquire() to save the (connection, program, counter) tuple somewhere into lock driver private data so that virLockManagerLockDaemonRelease() called with the same flag can re-use the data. For c) I guess we will need to open the connection in virLockManagerLockDaemonNew(), put the socket FD into event loop so that EOF is caught and initiate reopen in that case. However, I'm not sure if this is desirable - to be constantly connected to virtlockd. Michal -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On Thu, Aug 23, 2018 at 04:54:01PM +0200, Michal Privoznik wrote: > On 08/20/2018 07:17 PM, Michal Prívozník wrote: > > On 08/20/2018 05:16 PM, Daniel P. Berrangé wrote: > >> On Tue, Aug 14, 2018 at 01:19:43PM +0200, Michal Privoznik wrote: > >>> Fortunately, we have qemu wrappers so it's sufficient to put > >>> lock/unlock call only there. > >>> > >>> Signed-off-by: Michal Privoznik <mprivozn@redhat.com> > >>> --- > >>> src/qemu/qemu_security.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++ > >>> 1 file changed, 107 insertions(+) > >>> > > > >> I'm wondering if this is really the right level to plug in the metadata > >> locking ? What about if we just pass a virLockManagerPtr to the security > >> drivers and let them lock each resource at the time they need to modify > >> its metadata. This will trivially guarantee that we always lock the exact > >> set of files that we'll be changing metadata on. > >> > >> eg in SELinux driver the virSecuritySELinuxSetFileconHelper method > >> would directly call virLockManagerAcquire & virLockManagerRelease, > >> avoiding the entire virDomainLock abstraction which was really > >> focused around managing the content locks around lifecycle state > >> changes. > >> > > > > Yeah, I vaguely recall writing code like this (when I was trying to > > solve this some time ago). Okay, let me see if that's feasible. > > > > But boy, this is getting hairy. > > So as I'm writing these patches I came to realize couple of things: > > a) instead of domain PID we should pass libvirtd PID because we want to > release the locks if libvirtd dies not domain. > > b) that, however, leads to a problem because > virLockManagerLockDaemonAcquire() closes the socket to virtlockd causing > it to kill the owner of the lock, i.e. it kills libvirtd immediately. This is fine ;-P > c) even if I hack around b) so that we connect only once for each > lock+unlock pair call, we would still connect dozens of times when > starting a domain (all the paths we label times all active secdrivers). > So we should share connection here too. Yeah, makes sense. > Now question is how do do this effectively and cleanly (code-wise). For > solving b) we can have a new flag, say VIR_LOCK_MANAGER_KEEP_CONNECT > that would cause virLockManagerLockDaemonAcquire() to save the > (connection, program, counter) tuple somewhere into lock driver private > data so that virLockManagerLockDaemonRelease() called with the same flag > can re-use the data. > > For c) I guess we will need to open the connection in > virLockManagerLockDaemonNew(), put the socket FD into event loop so that > EOF is caught and initiate reopen in that case. However, I'm not sure if > this is desirable - to be constantly connected to virtlockd. Can we use a model similar to what I did for the shared secondary driver connections. By default a call to virGetConnectNetwork() will open a new connection. To avoid opening & closing 100's of connections though, some places will call virSetConnectNetwork() to store a pre-opened connection in a thread local. That stays open until virSetConnectNetwork is called again passing in a NULL. We would put such a cache around any bits of code that triggers many connection calls to virlockd. Regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :| -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On 08/23/2018 05:01 PM, Daniel P. Berrangé wrote: > On Thu, Aug 23, 2018 at 04:54:01PM +0200, Michal Privoznik wrote: >> On 08/20/2018 07:17 PM, Michal Prívozník wrote: >>> On 08/20/2018 05:16 PM, Daniel P. Berrangé wrote: >>>> On Tue, Aug 14, 2018 at 01:19:43PM +0200, Michal Privoznik wrote: >>>>> Fortunately, we have qemu wrappers so it's sufficient to put >>>>> lock/unlock call only there. >>>>> >>>>> Signed-off-by: Michal Privoznik <mprivozn@redhat.com> >>>>> --- >>>>> src/qemu/qemu_security.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++ >>>>> 1 file changed, 107 insertions(+) >>>>> >> >> >>>> I'm wondering if this is really the right level to plug in the metadata >>>> locking ? What about if we just pass a virLockManagerPtr to the security >>>> drivers and let them lock each resource at the time they need to modify >>>> its metadata. This will trivially guarantee that we always lock the exact >>>> set of files that we'll be changing metadata on. >>>> >>>> eg in SELinux driver the virSecuritySELinuxSetFileconHelper method >>>> would directly call virLockManagerAcquire & virLockManagerRelease, >>>> avoiding the entire virDomainLock abstraction which was really >>>> focused around managing the content locks around lifecycle state >>>> changes. >>>> >>> >>> Yeah, I vaguely recall writing code like this (when I was trying to >>> solve this some time ago). Okay, let me see if that's feasible. >>> >>> But boy, this is getting hairy. >> >> So as I'm writing these patches I came to realize couple of things: >> >> a) instead of domain PID we should pass libvirtd PID because we want to >> release the locks if libvirtd dies not domain. >> >> b) that, however, leads to a problem because >> virLockManagerLockDaemonAcquire() closes the socket to virtlockd causing >> it to kill the owner of the lock, i.e. it kills libvirtd immediately. > > This is fine ;-P > >> c) even if I hack around b) so that we connect only once for each >> lock+unlock pair call, we would still connect dozens of times when >> starting a domain (all the paths we label times all active secdrivers). >> So we should share connection here too. > > Yeah, makes sense. > >> Now question is how do do this effectively and cleanly (code-wise). For >> solving b) we can have a new flag, say VIR_LOCK_MANAGER_KEEP_CONNECT >> that would cause virLockManagerLockDaemonAcquire() to save the >> (connection, program, counter) tuple somewhere into lock driver private >> data so that virLockManagerLockDaemonRelease() called with the same flag >> can re-use the data. >> >> For c) I guess we will need to open the connection in >> virLockManagerLockDaemonNew(), put the socket FD into event loop so that >> EOF is caught and initiate reopen in that case. However, I'm not sure if >> this is desirable - to be constantly connected to virtlockd. > > Can we use a model similar to what I did for the shared secondary > driver connections. > > By default a call to virGetConnectNetwork() will open a new connection. > > To avoid opening & closing 100's of connections though, some places > will call virSetConnectNetwork() to store a pre-opened connection in > a thread local. That stays open until virSetConnectNetwork is called > again passing in a NULL. > > We would put such a cache around any bits of code that triggers > many connection calls to virlockd. Actually, would sharing connection for case c) work? Consider the following scenario: two threads A and B starting two different domains, both of them which want to relabel /dev/blah. Now, say thread A gets to lock the path first. It does so, and proceed to chown(). Then thread B wants to lock the same path. It tries to do so, but has to wait until A unlocks it. However, at this point virtlockd is stuck in virFileLock() call (it waits for the lock to be released). Because virtlockd runs single threaded it doesn't matter anymore that A will try to unlock the path eventually. virtlockd has deadlocked. I don't see any way around this :( Michal -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On Thu, Aug 23, 2018 at 05:45:42PM +0200, Michal Privoznik wrote: > On 08/23/2018 05:01 PM, Daniel P. Berrangé wrote: > > On Thu, Aug 23, 2018 at 04:54:01PM +0200, Michal Privoznik wrote: > >> On 08/20/2018 07:17 PM, Michal Prívozník wrote: > >>> On 08/20/2018 05:16 PM, Daniel P. Berrangé wrote: > >>>> On Tue, Aug 14, 2018 at 01:19:43PM +0200, Michal Privoznik wrote: > >>>>> Fortunately, we have qemu wrappers so it's sufficient to put > >>>>> lock/unlock call only there. > >>>>> > >>>>> Signed-off-by: Michal Privoznik <mprivozn@redhat.com> > >>>>> --- > >>>>> src/qemu/qemu_security.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++ > >>>>> 1 file changed, 107 insertions(+) > >>>>> > >> > >> > >>>> I'm wondering if this is really the right level to plug in the metadata > >>>> locking ? What about if we just pass a virLockManagerPtr to the security > >>>> drivers and let them lock each resource at the time they need to modify > >>>> its metadata. This will trivially guarantee that we always lock the exact > >>>> set of files that we'll be changing metadata on. > >>>> > >>>> eg in SELinux driver the virSecuritySELinuxSetFileconHelper method > >>>> would directly call virLockManagerAcquire & virLockManagerRelease, > >>>> avoiding the entire virDomainLock abstraction which was really > >>>> focused around managing the content locks around lifecycle state > >>>> changes. > >>>> > >>> > >>> Yeah, I vaguely recall writing code like this (when I was trying to > >>> solve this some time ago). Okay, let me see if that's feasible. > >>> > >>> But boy, this is getting hairy. > >> > >> So as I'm writing these patches I came to realize couple of things: > >> > >> a) instead of domain PID we should pass libvirtd PID because we want to > >> release the locks if libvirtd dies not domain. > >> > >> b) that, however, leads to a problem because > >> virLockManagerLockDaemonAcquire() closes the socket to virtlockd causing > >> it to kill the owner of the lock, i.e. it kills libvirtd immediately. > > > > This is fine ;-P > > > >> c) even if I hack around b) so that we connect only once for each > >> lock+unlock pair call, we would still connect dozens of times when > >> starting a domain (all the paths we label times all active secdrivers). > >> So we should share connection here too. > > > > Yeah, makes sense. > > > >> Now question is how do do this effectively and cleanly (code-wise). For > >> solving b) we can have a new flag, say VIR_LOCK_MANAGER_KEEP_CONNECT > >> that would cause virLockManagerLockDaemonAcquire() to save the > >> (connection, program, counter) tuple somewhere into lock driver private > >> data so that virLockManagerLockDaemonRelease() called with the same flag > >> can re-use the data. > >> > >> For c) I guess we will need to open the connection in > >> virLockManagerLockDaemonNew(), put the socket FD into event loop so that > >> EOF is caught and initiate reopen in that case. However, I'm not sure if > >> this is desirable - to be constantly connected to virtlockd. > > > > Can we use a model similar to what I did for the shared secondary > > driver connections. > > > > By default a call to virGetConnectNetwork() will open a new connection. > > > > To avoid opening & closing 100's of connections though, some places > > will call virSetConnectNetwork() to store a pre-opened connection in > > a thread local. That stays open until virSetConnectNetwork is called > > again passing in a NULL. > > > > We would put such a cache around any bits of code that triggers > > many connection calls to virlockd. > > Actually, would sharing connection for case c) work? > > Consider the following scenario: two threads A and B starting two > different domains, both of them which want to relabel /dev/blah. > > Now, say thread A gets to lock the path first. It does so, and proceed > to chown(). > > Then thread B wants to lock the same path. It tries to do so, but has to > wait until A unlocks it. However, at this point virtlockd is stuck in > virFileLock() call (it waits for the lock to be released). > > Because virtlockd runs single threaded it doesn't matter anymore that A > will try to unlock the path eventually. virtlockd has deadlocked. > > > I don't see any way around this :( Oh right, yes, that kills the idea of using a WAIT flag for lockspaces, unless we want to introduce threads ingto virtlockd, but we can't do that because Linux has totally fubard locking across execve() :-( Regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :| -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On 08/23/2018 05:51 PM, Daniel P. Berrangé wrote: > On Thu, Aug 23, 2018 at 05:45:42PM +0200, Michal Privoznik wrote: >> On 08/23/2018 05:01 PM, Daniel P. Berrangé wrote: >>> On Thu, Aug 23, 2018 at 04:54:01PM +0200, Michal Privoznik wrote: >>>> On 08/20/2018 07:17 PM, Michal Prívozník wrote: >>>>> On 08/20/2018 05:16 PM, Daniel P. Berrangé wrote: >>>>>> On Tue, Aug 14, 2018 at 01:19:43PM +0200, Michal Privoznik wrote: >>>>>>> Fortunately, we have qemu wrappers so it's sufficient to put >>>>>>> lock/unlock call only there. >>>>>>> >>>>>>> Signed-off-by: Michal Privoznik <mprivozn@redhat.com> >>>>>>> --- >>>>>>> src/qemu/qemu_security.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++ >>>>>>> 1 file changed, 107 insertions(+) >>>>>>> >>>> >>>> > > Oh right, yes, that kills the idea of using a WAIT flag for lockspaces, > unless we want to introduce threads ingto virtlockd, but we can't do > that because Linux has totally fubard locking across execve() :-( But we need WAIT. I guess we do not want to do: lockForMetadata(const char *path) { int retries; while (retries) { rc = try_lock(path, wait=false); if (!rc) break; if (errno = EAGAIN && retries--) {sleep(.1); continue;} errorOut(); } } However, if we make sure that virtlockd never forks() nor execs() we have nothing to fear about. Or am I missing something? And to be 100% sure we can always provide dummy impl for fork() + execve() in src/locking/lock_daemon.c which fails everytime. Michal -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On 08/23/2018 06:14 PM, Michal Privoznik wrote: > On 08/23/2018 05:51 PM, Daniel P. Berrangé wrote: >> On Thu, Aug 23, 2018 at 05:45:42PM +0200, Michal Privoznik wrote: >>> On 08/23/2018 05:01 PM, Daniel P. Berrangé wrote: >>>> On Thu, Aug 23, 2018 at 04:54:01PM +0200, Michal Privoznik wrote: >>>>> On 08/20/2018 07:17 PM, Michal Prívozník wrote: >>>>>> On 08/20/2018 05:16 PM, Daniel P. Berrangé wrote: >>>>>>> On Tue, Aug 14, 2018 at 01:19:43PM +0200, Michal Privoznik wrote: >>>>>>>> Fortunately, we have qemu wrappers so it's sufficient to put >>>>>>>> lock/unlock call only there. >>>>>>>> >>>>>>>> Signed-off-by: Michal Privoznik <mprivozn@redhat.com> >>>>>>>> --- >>>>>>>> src/qemu/qemu_security.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++ >>>>>>>> 1 file changed, 107 insertions(+) >>>>>>>> >>>>> >>>>> > > >> >> Oh right, yes, that kills the idea of using a WAIT flag for lockspaces, >> unless we want to introduce threads ingto virtlockd, but we can't do >> that because Linux has totally fubard locking across execve() :-( Actually, it's problem with open() + close() not execve(). And no dumy implementation (as I'm suggesting below) will not help us with that. > > But we need WAIT. I guess we do not want to do: > > lockForMetadata(const char *path) { > int retries; > > while (retries) { > rc = try_lock(path, wait=false); > > if (!rc) break; > if (errno = EAGAIN && retries--) {sleep(.1); continue;} > > errorOut(); > } > } > > However, if we make sure that virtlockd never forks() nor execs() we > have nothing to fear about. Or am I missing something? And to be 100% > sure we can always provide dummy impl for fork() + execve() in > src/locking/lock_daemon.c which fails everytime. I work around this by putting a mutex into secdriver so that only one thread can do lock(). The others have to wait until the thread unlocks() the path. This way we leave virtlockd with just one thread. In my testing everything works like charm. Michal -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On Fri, Aug 24, 2018 at 02:01:52PM +0200, Michal Privoznik wrote: > On 08/23/2018 06:14 PM, Michal Privoznik wrote: > > On 08/23/2018 05:51 PM, Daniel P. Berrangé wrote: > >> On Thu, Aug 23, 2018 at 05:45:42PM +0200, Michal Privoznik wrote: > >>> On 08/23/2018 05:01 PM, Daniel P. Berrangé wrote: > >>>> On Thu, Aug 23, 2018 at 04:54:01PM +0200, Michal Privoznik wrote: > >>>>> On 08/20/2018 07:17 PM, Michal Prívozník wrote: > >>>>>> On 08/20/2018 05:16 PM, Daniel P. Berrangé wrote: > >>>>>>> On Tue, Aug 14, 2018 at 01:19:43PM +0200, Michal Privoznik wrote: > >>>>>>>> Fortunately, we have qemu wrappers so it's sufficient to put > >>>>>>>> lock/unlock call only there. > >>>>>>>> > >>>>>>>> Signed-off-by: Michal Privoznik <mprivozn@redhat.com> > >>>>>>>> --- > >>>>>>>> src/qemu/qemu_security.c | 107 +++++++++++++++++++++++++++++++++++++++++++++++ > >>>>>>>> 1 file changed, 107 insertions(+) > >>>>>>>> > >>>>> > >>>>> > > > > > >> > >> Oh right, yes, that kills the idea of using a WAIT flag for lockspaces, > >> unless we want to introduce threads ingto virtlockd, but we can't do > >> that because Linux has totally fubard locking across execve() :-( > > Actually, it's problem with open() + close() not execve(). And no dumy > implementation (as I'm suggesting below) will not help us with that. No, I really do mean execve() here. The open() + close() issue is the awful standards compliant behaviour. The execve() issue with threads is a trainwreck of Linuxs own making: https://bugzilla.redhat.com/show_bug.cgi?id=1552621 > > But we need WAIT. I guess we do not want to do: > > > > lockForMetadata(const char *path) { > > int retries; > > > > while (retries) { > > rc = try_lock(path, wait=false); > > > > if (!rc) break; > > if (errno = EAGAIN && retries--) {sleep(.1); continue;} > > > > errorOut(); > > } > > } > > > > However, if we make sure that virtlockd never forks() nor execs() we > > have nothing to fear about. Or am I missing something? And to be 100% > > sure we can always provide dummy impl for fork() + execve() in > > src/locking/lock_daemon.c which fails everytime. > > I work around this by putting a mutex into secdriver so that only one > thread can do lock(). The others have to wait until the thread unlocks() > the path. This way we leave virtlockd with just one thread. In my > testing everything works like charm. That sounds reasonable, so we don't need the _WAIT behaviour in virtlockd itself, as everything will wait in the secdriver instead. At least for now, until we modularize the startup process with the shim. Guess that's just one more todo item to solve for the shim so not the end of the world. Regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :| -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On 08/24/2018 02:53 PM, Daniel P. Berrangé wrote: > > That sounds reasonable, so we don't need the _WAIT behaviour in > virtlockd itself, as everything will wait in the secdriver instead. > At least for now, until we modularize the startup process with the > shim. Guess that's just one more todo item to solve for the shim > so not the end of the world. Hold on, we do need _WAIT so that we mutually exclude other virtlockd-s from other hosts fiddling with seclabels on a shared NFS. However, we will not deadlock on a single host, that's what I'm saying. Michal -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
On Fri, Aug 24, 2018 at 03:29:24PM +0200, Michal Privoznik wrote: > On 08/24/2018 02:53 PM, Daniel P. Berrangé wrote: > > > > > That sounds reasonable, so we don't need the _WAIT behaviour in > > virtlockd itself, as everything will wait in the secdriver instead. > > At least for now, until we modularize the startup process with the > > shim. Guess that's just one more todo item to solve for the shim > > so not the end of the world. > > Hold on, we do need _WAIT so that we mutually exclude other virtlockd-s > from other hosts fiddling with seclabels on a shared NFS. However, we > will not deadlock on a single host, that's what I'm saying. Right but later when multiple clients are permitted to connect to the same virtlockd, the API they will use has a designed in deadlock :-( Regards, Daniel -- |: https://berrange.com -o- https://www.flickr.com/photos/dberrange :| |: https://libvirt.org -o- https://fstop138.berrange.com :| |: https://entangle-photo.org -o- https://www.instagram.com/dberrange :| -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
© 2016 - 2024 Red Hat, Inc.