backends/tpm/tpm_util.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-)
From: Marc-André Lureau <marcandre.lureau@redhat.com>
Replace select() with poll() to fix a crash when QEMU has a large number
of FDs.
Fixes:
https://bugzilla.redhat.com/show_bug.cgi?id=2020133
Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
---
backends/tpm/tpm_util.c | 12 +++---------
1 file changed, 3 insertions(+), 9 deletions(-)
diff --git a/backends/tpm/tpm_util.c b/backends/tpm/tpm_util.c
index a6e6d3e72f..5f4c9f5b6f 100644
--- a/backends/tpm/tpm_util.c
+++ b/backends/tpm/tpm_util.c
@@ -112,12 +112,9 @@ static int tpm_util_request(int fd,
void *response,
size_t responselen)
{
- fd_set readfds;
+ GPollFD fds[1] = { {.fd = fd, .events = G_IO_IN } };
int n;
- struct timeval tv = {
- .tv_sec = 1,
- .tv_usec = 0,
- };
+ int timeout = 1000;
n = write(fd, request, requestlen);
if (n < 0) {
@@ -127,11 +124,8 @@ static int tpm_util_request(int fd,
return -EFAULT;
}
- FD_ZERO(&readfds);
- FD_SET(fd, &readfds);
-
/* wait for a second */
- n = select(fd + 1, &readfds, NULL, NULL, &tv);
+ n = RETRY_ON_EINTR(g_poll(fds, 1, timeout));
if (n != 1) {
return -errno;
}
--
2.41.0
On 9/11/23 07:36, marcandre.lureau@redhat.com wrote: > From: Marc-Andr Lureau <marcandre.lureau@redhat.com> > > Replace select() with poll() to fix a crash when QEMU has a large number > of FDs. > > Fixes: > https://bugzilla.redhat.com/show_bug.cgi?id=2020133 The description there seems wrong. It's a limit of the POSIX API not the vTPM device driver. > > Signed-off-by: Marc-Andr Lureau <marcandre.lureau@redhat.com> Reviewed-by: Stefan Berger <stefanb@linux.ibm.com> > --- > backends/tpm/tpm_util.c | 12 +++--------- > 1 file changed, 3 insertions(+), 9 deletions(-) > > diff --git a/backends/tpm/tpm_util.c b/backends/tpm/tpm_util.c > index a6e6d3e72f..5f4c9f5b6f 100644 > --- a/backends/tpm/tpm_util.c > +++ b/backends/tpm/tpm_util.c > @@ -112,12 +112,9 @@ static int tpm_util_request(int fd, > void *response, > size_t responselen) > { > - fd_set readfds; > + GPollFD fds[1] = { {.fd = fd, .events = G_IO_IN } }; > int n; > - struct timeval tv = { > - .tv_sec = 1, > - .tv_usec = 0, > - }; > + int timeout = 1000; > > n = write(fd, request, requestlen); > if (n < 0) { > @@ -127,11 +124,8 @@ static int tpm_util_request(int fd, > return -EFAULT; > } > > - FD_ZERO(&readfds); > - FD_SET(fd, &readfds); > - > /* wait for a second */ > - n = select(fd + 1, &readfds, NULL, NULL, &tv); > + n = RETRY_ON_EINTR(g_poll(fds, 1, timeout)); > if (n != 1) { > return -errno; > }
11.09.2023 14:36, marcandre.lureau@redhat.com: > From: Marc-André Lureau <marcandre.lureau@redhat.com> > > Replace select() with poll() to fix a crash when QEMU has a large number > of FDs. > > Fixes: > https://bugzilla.redhat.com/show_bug.cgi?id=2020133 > > Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com> > --- > backends/tpm/tpm_util.c | 12 +++--------- > 1 file changed, 3 insertions(+), 9 deletions(-) > > diff --git a/backends/tpm/tpm_util.c b/backends/tpm/tpm_util.c > index a6e6d3e72f..5f4c9f5b6f 100644 > --- a/backends/tpm/tpm_util.c > +++ b/backends/tpm/tpm_util.c > @@ -112,12 +112,9 @@ static int tpm_util_request(int fd, > void *response, > size_t responselen) > { > - fd_set readfds; > + GPollFD fds[1] = { {.fd = fd, .events = G_IO_IN } }; > int n; > - struct timeval tv = { > - .tv_sec = 1, > - .tv_usec = 0, > - }; > + int timeout = 1000; You don't need a variable for this in poll(). Besides, this is clear in the context of this patch, which says tv_sec=1. Without this context, it becomes suspicious and catches an eye: too long timeout? > n = write(fd, request, requestlen); > if (n < 0) { > @@ -127,11 +124,8 @@ static int tpm_util_request(int fd, > return -EFAULT; > } > > - FD_ZERO(&readfds); > - FD_SET(fd, &readfds); > - > /* wait for a second */ > - n = select(fd + 1, &readfds, NULL, NULL, &tv); > + n = RETRY_ON_EINTR(g_poll(fds, 1, timeout)); It's much better IMHO to use "1000" directly here, esp. since the comment says about a second. > if (n != 1) { > return -errno; > } Other than that, Reviewed-by: Michael Tokarev <mjt@tls.msk.ru> /mjt
© 2016 - 2024 Red Hat, Inc.