Skip to content
Snippets Groups Projects
Commit de814108 authored by Andrea Bolognani's avatar Andrea Bolognani
Browse files

patches: Add backports

Specifically

  * backport/qemu_domain-Don-t-unref-NULL-hash-table-in-qemuDomainRefr.patch

which Closes: #1030671 and

  * backport/qemu-Jump-to-cleanup-label-on-umount-failure.patch
  * backport/qemu_namespace-Deal-with-nested-mounts-when-umount-ing-de.patch
  * backport/qemuProcessRefreshDisks-Don-t-skip-filling-of-disk-inform.patch
  * backport/qemu_extdevice-Do-cleanup-host-only-for-VIR_DOMAIN_TPM_TY.patch
  * backport/qemu-blockjob-Handle-pending-blockjob-state-only-when-we-.patch
  * backport/rpc-client-Don-t-check-return-value-of-virNetMessageNew.patch
  * backport/rpc-Don-t-warn-about-max_client_requests-in-single-thread.patch

which address some other random issues.
parent d43d1e72
No related branches found
No related tags found
1 merge request!174Backport a few fixes
Pipeline #505403 passed
Showing
with 440 additions and 0 deletions
From: Jim Fehlig <jfehlig@suse.com>
Date: Mon, 6 Feb 2023 10:40:12 -0700
Subject: qemu: Jump to cleanup label on umount failure
Similar to other error paths in qemuDomainUnshareNamespace(), jump to
the cleanup label on umount error instead of directly returning -1.
Signed-off-by: Jim Fehlig <jfehlig@suse.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
(cherry picked from commit c3f16cea3bef578c498c720aa90c677ee9511e2f)
Forwarded: not-needed
Origin: https://gitlab.com/libvirt/libvirt/-/commit/c3f16cea3bef578c498c720aa90c677ee9511e2f
---
src/qemu/qemu_namespace.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/qemu/qemu_namespace.c b/src/qemu/qemu_namespace.c
index 5769a4d..833313d 100644
--- a/src/qemu/qemu_namespace.c
+++ b/src/qemu/qemu_namespace.c
@@ -779,7 +779,7 @@ qemuDomainUnshareNamespace(virQEMUDriverConfig *cfg,
#if defined(__linux__)
if (umount("/dev") < 0) {
virReportSystemError(errno, "%s", _("failed to umount devfs on /dev"));
- return -1;
+ goto cleanup;
}
#endif /* !defined(__linux__) */
From: Peter Krempa <pkrempa@redhat.com>
Date: Fri, 10 Feb 2023 17:16:43 +0100
Subject: qemu: blockjob: Handle 'pending' blockjob state only when we need it
The 'pending' state needs to be handled by the blockjob code only when
the snapshot code requests a block-commit without auto-finalization.
If we always handle it we fail to properly remove the blockjob data for
the 'blockdev-create' job as that also transitions trhough 'pending' but
we'd never update it once it reaches 'concluded' as the code already
thinks that the job has finished and is no longer watching it.
Introduce a 'processPending' property into block job data and set it
only when we know that we need to process 'pending'.
Fixes: 90d9bc9d74a5157167548b26c00b1a016655e295
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=2168769
Signed-off-by: Peter Krempa <pkrempa@redhat.com>
Reviewed-by: Pavel Hrdina <phrdina@redhat.com>
(cherry picked from commit c433c2434c0459df98ed3355ef615e341acd9009)
Forwarded: not-needed
Origin: https://gitlab.com/libvirt/libvirt/-/commit/c433c2434c0459df98ed3355ef615e341acd9009
---
src/qemu/qemu_block.c | 1 +
src/qemu/qemu_blockjob.c | 19 ++++++++++---------
src/qemu/qemu_blockjob.h | 4 ++++
3 files changed, 15 insertions(+), 9 deletions(-)
diff --git a/src/qemu/qemu_block.c b/src/qemu/qemu_block.c
index e865aa1..b1057e3 100644
--- a/src/qemu/qemu_block.c
+++ b/src/qemu/qemu_block.c
@@ -3367,6 +3367,7 @@ qemuBlockCommit(virDomainObj *vm,
if (!(job = qemuBlockJobDiskNewCommit(vm, disk, top_parent, topSource,
baseSource,
flags & VIR_DOMAIN_BLOCK_COMMIT_DELETE,
+ autofinalize,
flags)))
goto cleanup;
diff --git a/src/qemu/qemu_blockjob.c b/src/qemu/qemu_blockjob.c
index cb2d05d..a20cf1d 100644
--- a/src/qemu/qemu_blockjob.c
+++ b/src/qemu/qemu_blockjob.c
@@ -274,6 +274,7 @@ qemuBlockJobDiskNewCommit(virDomainObj *vm,
virStorageSource *top,
virStorageSource *base,
bool delete_imgs,
+ virTristateBool autofinalize,
unsigned int jobflags)
{
g_autoptr(qemuBlockJobData) job = NULL;
@@ -290,6 +291,7 @@ qemuBlockJobDiskNewCommit(virDomainObj *vm,
job->data.commit.top = top;
job->data.commit.base = base;
job->data.commit.deleteCommittedImages = delete_imgs;
+ job->processPending = autofinalize == VIR_TRISTATE_BOOL_NO;
job->jobflags = jobflags;
if (qemuBlockJobRegister(job, vm, disk, true) < 0)
@@ -532,8 +534,6 @@ qemuBlockJobRefreshJobs(virDomainObj *vm)
if (job->state == QEMU_BLOCKJOB_STATE_NEW ||
job->state == QEMU_BLOCKJOB_STATE_RUNNING)
job->newstate = newstate;
- } else if (newstate == QEMU_BLOCKJOB_STATE_PENDING) {
- job->newstate = newstate;
}
/* don't update the job otherwise */
}
@@ -1568,13 +1568,14 @@ qemuBlockJobEventProcess(virQEMUDriver *driver,
case QEMU_BLOCKJOB_STATE_PENDING:
/* Similarly as for 'ready' state we should handle it only when
- * previous state was 'new' or 'running' as there are other cases
- * when it can be emitted by QEMU. Currently we need this only when
- * deleting non-active external snapshots. */
- if (job->state == QEMU_BLOCKJOB_STATE_NEW ||
- job->state == QEMU_BLOCKJOB_STATE_RUNNING) {
- job->state = job->newstate;
- qemuDomainSaveStatus(vm);
+ * previous state was 'new' or 'running' and only if the blockjob code
+ * is handling finalization of the job explicitly. */
+ if (job->processPending) {
+ if (job->state == QEMU_BLOCKJOB_STATE_NEW ||
+ job->state == QEMU_BLOCKJOB_STATE_RUNNING) {
+ job->state = job->newstate;
+ qemuDomainSaveStatus(vm);
+ }
}
job->newstate = -1;
break;
diff --git a/src/qemu/qemu_blockjob.h b/src/qemu/qemu_blockjob.h
index e9b283d..f1ac43b 100644
--- a/src/qemu/qemu_blockjob.h
+++ b/src/qemu/qemu_blockjob.h
@@ -138,6 +138,9 @@ struct _qemuBlockJobData {
int brokentype; /* the previous type of a broken blockjob qemuBlockJobType */
+ bool processPending; /* process the 'pending' state of the job, if the job
+ should not be auto-finalized */
+
bool invalidData; /* the job data (except name) is not valid */
bool reconnected; /* internal field for tracking whether job is live after reconnect to qemu */
};
@@ -175,6 +178,7 @@ qemuBlockJobDiskNewCommit(virDomainObj *vm,
virStorageSource *top,
virStorageSource *base,
bool delete_imgs,
+ virTristateBool autofinalize,
unsigned int jobflags);
qemuBlockJobData *
From: Peter Krempa <pkrempa@redhat.com>
Date: Thu, 9 Feb 2023 09:40:32 +0100
Subject: qemuProcessRefreshDisks: Don't skip filling of disk information if
tray state didn't change
Commit 5ef2582646eb98 added emitting of even when refreshign disk state,
where it wanted to avoid sending the event if disk state didn't change.
This was achieved by using 'continue' in the loop filling the
information. Unfortunately this skips extraction of whether the device
has a tray which is propagated into internal structures, which in turn
broke cdrom media change as the code thought there's no tray for the
device.
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=2166411
Fixes: 5ef2582646eb98af208ce37355f82bdef39931fa
Signed-off-by: Peter Krempa <pkrempa@redhat.com>
Reviewed-by: Kristina Hanicova <khanicov@redhat.com>
(cherry picked from commit 86cfe93ef7fdc2d665a2fc88b79af89e7978ba78)
Forwarded: not-needed
Origin: https://gitlab.com/libvirt/libvirt/-/commit/86cfe93ef7fdc2d665a2fc88b79af89e7978ba78
---
src/qemu/qemu_process.c | 11 +++++------
1 file changed, 5 insertions(+), 6 deletions(-)
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index ee9f078..0c408ee 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -8724,16 +8724,13 @@ qemuProcessRefreshDisks(virDomainObj *vm,
continue;
if (info->removable) {
- virObjectEvent *event = NULL;
+ bool emitEvent = info->tray_open != disk->tray_status;
int reason;
if (info->empty)
virDomainDiskEmptySource(disk);
if (info->tray) {
- if (info->tray_open == disk->tray_status)
- continue;
-
if (info->tray_open) {
reason = VIR_DOMAIN_EVENT_TRAY_CHANGE_OPEN;
disk->tray_status = VIR_DOMAIN_DISK_TRAY_OPEN;
@@ -8742,8 +8739,10 @@ qemuProcessRefreshDisks(virDomainObj *vm,
disk->tray_status = VIR_DOMAIN_DISK_TRAY_CLOSED;
}
- event = virDomainEventTrayChangeNewFromObj(vm, disk->info.alias, reason);
- virObjectEventStateQueue(driver->domainEventState, event);
+ if (emitEvent) {
+ virObjectEvent *event = virDomainEventTrayChangeNewFromObj(vm, disk->info.alias, reason);
+ virObjectEventStateQueue(driver->domainEventState, event);
+ }
}
}
From: Michal Privoznik <mprivozn@redhat.com>
Date: Thu, 26 Jan 2023 11:56:10 +0100
Subject: qemu_domain: Don't unref NULL hash table in
qemuDomainRefreshStatsSchema()
MIME-Version: 1.0
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
The g_hash_table_unref() function does not accept NULL. Passing
NULL results in a glib warning being triggered. Check whether the
hash table is not NULL and unref it only then.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
(cherry picked from commit c3afde9211b550d3900edc5386ab121f5b39fd3e)
Forwarded: not-needed
Origin: https://gitlab.com/libvirt/libvirt/-/commit/c3afde9211b550d3900edc5386ab121f5b39fd3e
Bug-Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1030671
---
src/qemu/qemu_domain.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 2eb5653..da7a869 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -11920,7 +11920,8 @@ qemuDomainRefreshStatsSchema(virDomainObj *dom)
if (!schema)
return -1;
- g_hash_table_unref(priv->statsSchema);
+ if (priv->statsSchema)
+ g_hash_table_unref(priv->statsSchema);
priv->statsSchema = schema;
return 0;
From: Michal Privoznik <mprivozn@redhat.com>
Date: Fri, 10 Feb 2023 09:47:05 +0100
Subject: qemu_extdevice: Do cleanup host only for
VIR_DOMAIN_TPM_TYPE_EMULATOR
MIME-Version: 1.0
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
We only set up host for VIR_DOMAIN_TPM_TYPE_EMULATOR and thus
similarly, we should do cleanup for the same type. This also
fixes a crasher, in which qemuTPMEmulatorCleanupHost() accesses
tpm->data.emulator.storagepath which is NULL for
VIR_DOMAIN_TPM_TYPE_EXTERNAL.
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=2168762
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
(cherry picked from commit 03f76e577d66f8eea6aa7cc513e75026527b4cda)
Forwarded: not-needed
Origin: https://gitlab.com/libvirt/libvirt/-/commit/03f76e577d66f8eea6aa7cc513e75026527b4cda
---
src/qemu/qemu_extdevice.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/src/qemu/qemu_extdevice.c b/src/qemu/qemu_extdevice.c
index f7b2e2e..fdefe59 100644
--- a/src/qemu/qemu_extdevice.c
+++ b/src/qemu/qemu_extdevice.c
@@ -162,7 +162,10 @@ qemuExtDevicesCleanupHost(virQEMUDriver *driver,
return;
for (i = 0; i < def->ntpms; i++) {
- qemuExtTPMCleanupHost(def->tpms[i], flags, outgoingMigration);
+ virDomainTPMDef *tpm = def->tpms[i];
+
+ if (tpm->type == VIR_DOMAIN_TPM_TYPE_EMULATOR)
+ qemuExtTPMCleanupHost(tpm, flags, outgoingMigration);
}
}
From: Michal Privoznik <mprivozn@redhat.com>
Date: Tue, 7 Feb 2023 15:06:32 +0100
Subject: qemu_namespace: Deal with nested mounts when umount()-ing /dev
In one of recent commits (v9.0.0-rc1~106) I've made our QEMU
namespace code umount the original /dev. One of the reasons was
enhanced security, because previously we just mounted a tmpfs
over the original /dev. Thus a malicious QEMU could just
umount("/dev") and it would get to the original /dev with all
nodes.
Now, on some systems this introduced a regression:
failed to umount devfs on /dev: Device or resource busy
But how this could be? We've moved all file systems mounted under
/dev to a temporary location. Or have we? As it turns out, not
quite. If there are two file systems mounted on the same target,
e.g. like this:
mount -t tmpfs tmpfs /dev/shm/ && mount -t tmpfs tmpfs /dev/shm/
then only the top most (i.e. the last one) is moved. See
qemuDomainUnshareNamespace() for more info.
Now, we could enhance our code to deal with these "doubled" mount
points. Or, since it is the top most file system that is
accessible anyways (and this one is preserved), we can
umount("/dev") in a recursive fashion.
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=2167302
Fixes: 379c0ce4bfed8733dfbde557c359eecc5474ce38
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Jim Fehlig <jfehlig@suse.com>
(cherry picked from commit 5155ab4b2a704285505dfea6ffee8b980fdaa29e)
Forwarded: not-needed
Origin: https://gitlab.com/libvirt/libvirt/-/commit/5155ab4b2a704285505dfea6ffee8b980fdaa29e
---
src/qemu/qemu_namespace.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/qemu/qemu_namespace.c b/src/qemu/qemu_namespace.c
index 833313d..6e50ef5 100644
--- a/src/qemu/qemu_namespace.c
+++ b/src/qemu/qemu_namespace.c
@@ -777,7 +777,7 @@ qemuDomainUnshareNamespace(virQEMUDriverConfig *cfg,
}
#if defined(__linux__)
- if (umount("/dev") < 0) {
+ if (umount2("/dev", MNT_DETACH) < 0) {
virReportSystemError(errno, "%s", _("failed to umount devfs on /dev"));
goto cleanup;
}
From: Peter Krempa <pkrempa@redhat.com>
Date: Wed, 15 Feb 2023 10:48:31 +0100
Subject: rpc: Don't warn about "max_client_requests" in single-threaded
daemons
MIME-Version: 1.0
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
The warning about max_client_requests is hit inside virtlogd every time
a VM starts which spams the logs.
Emit the warning only when the client request limit is not 1 and add a
warning into the daemon config to not configure it too low instead.
Fixes: 031878c2364
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=2145188
Signed-off-by: Peter Krempa <pkrempa@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
(cherry picked from commit b3f8e072fe08a6beaf3ec3d27e02efee4358b2ca)
Forwarded: not-needed
Origin: https://gitlab.com/libvirt/libvirt/-/commit/b3f8e072fe08a6beaf3ec3d27e02efee4358b2ca
---
src/remote/libvirtd.conf.in | 1 +
src/rpc/virnetserverclient.c | 3 ++-
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/src/remote/libvirtd.conf.in b/src/remote/libvirtd.conf.in
index 80a98b1..32a6803 100644
--- a/src/remote/libvirtd.conf.in
+++ b/src/remote/libvirtd.conf.in
@@ -374,6 +374,7 @@
# connection. To avoid one client monopolizing the server
# this should be a small fraction of the global max_workers
# parameter.
+# Setting this too low may cause keepalive timeouts.
#max_client_requests = 5
# Same processing controls, but this time for the admin interface.
diff --git a/src/rpc/virnetserverclient.c b/src/rpc/virnetserverclient.c
index b5c764b..bdb3552 100644
--- a/src/rpc/virnetserverclient.c
+++ b/src/rpc/virnetserverclient.c
@@ -1261,7 +1261,8 @@ static virNetMessage *virNetServerClientDispatchRead(virNetServerClient *client)
client->rx->bufferLength = VIR_NET_MESSAGE_LEN_MAX;
client->rx->buffer = g_new0(char, client->rx->bufferLength);
client->nrequests++;
- } else if (!client->nrequests_warning) {
+ } else if (!client->nrequests_warning &&
+ client->nrequests_max > 1) {
client->nrequests_warning = true;
VIR_WARN("Client hit max requests limit %zd. This may result "
"in keep-alive timeouts. Consider tuning the "
From: Peter Krempa <pkrempa@redhat.com>
Date: Wed, 15 Feb 2023 10:43:53 +0100
Subject: rpc: client: Don't check return value of virNetMessageNew
MIME-Version: 1.0
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
virNetServerClientDispatchRead checked the return value but it's not
necessary any more as it can't return NULL nowadays.
Signed-off-by: Peter Krempa <pkrempa@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
(cherry picked from commit 761cb8a0876d32445951791030c77afa147c0de1)
Forwarded: not-needed
Origin: https://gitlab.com/libvirt/libvirt/-/commit/761cb8a0876d32445951791030c77afa147c0de1
---
src/rpc/virnetserverclient.c | 11 ++++-------
1 file changed, 4 insertions(+), 7 deletions(-)
diff --git a/src/rpc/virnetserverclient.c b/src/rpc/virnetserverclient.c
index c9a4eb5..b5c764b 100644
--- a/src/rpc/virnetserverclient.c
+++ b/src/rpc/virnetserverclient.c
@@ -1257,13 +1257,10 @@ static virNetMessage *virNetServerClientDispatchRead(virNetServerClient *client)
/* Possibly need to create another receive buffer */
if (client->nrequests < client->nrequests_max) {
- if (!(client->rx = virNetMessageNew(true))) {
- client->wantClose = true;
- } else {
- client->rx->bufferLength = VIR_NET_MESSAGE_LEN_MAX;
- client->rx->buffer = g_new0(char, client->rx->bufferLength);
- client->nrequests++;
- }
+ client->rx = virNetMessageNew(true);
+ client->rx->bufferLength = VIR_NET_MESSAGE_LEN_MAX;
+ client->rx->buffer = g_new0(char, client->rx->bufferLength);
+ client->nrequests++;
} else if (!client->nrequests_warning) {
client->nrequests_warning = true;
VIR_WARN("Client hit max requests limit %zd. This may result "
backport/apparmor-Allow-umount-dev.patch
backport/qemu_interface-Fix-managed-no-case-when-creating-an-ether.patch
backport/qemu_domain-Don-t-unref-NULL-hash-table-in-qemuDomainRefr.patch
backport/qemu-Jump-to-cleanup-label-on-umount-failure.patch
backport/qemu_namespace-Deal-with-nested-mounts-when-umount-ing-de.patch
backport/qemuProcessRefreshDisks-Don-t-skip-filling-of-disk-inform.patch
backport/qemu_extdevice-Do-cleanup-host-only-for-VIR_DOMAIN_TPM_TY.patch
backport/qemu-blockjob-Handle-pending-blockjob-state-only-when-we-.patch
backport/rpc-client-Don-t-check-return-value-of-virNetMessageNew.patch
backport/rpc-Don-t-warn-about-max_client_requests-in-single-thread.patch
forward/Skip-vircgrouptest.patch
forward/Reduce-udevadm-settle-timeout-to-10-seconds.patch
forward/Pass-GPG_TTY-env-var-to-the-ssh-binary.patch
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment