Currently, the struct kvm_riscv_hfence does not have vmid field
and various hfence processing functions always pick vmid assigned
to the guest/VM. This prevents us from doing hfence operation on
arbitrary vmid hence add vmid field to struct kvm_riscv_hfence
and use it wherever applicable.
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
---
arch/riscv/include/asm/kvm_tlb.h | 1 +
arch/riscv/kvm/tlb.c | 30 ++++++++++++++++--------------
2 files changed, 17 insertions(+), 14 deletions(-)
diff --git a/arch/riscv/include/asm/kvm_tlb.h b/arch/riscv/include/asm/kvm_tlb.h
index cd00c9a46cb1..f67e03edeaec 100644
--- a/arch/riscv/include/asm/kvm_tlb.h
+++ b/arch/riscv/include/asm/kvm_tlb.h
@@ -19,6 +19,7 @@ enum kvm_riscv_hfence_type {
struct kvm_riscv_hfence {
enum kvm_riscv_hfence_type type;
unsigned long asid;
+ unsigned long vmid;
unsigned long order;
gpa_t addr;
gpa_t size;
diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c
index 6fc4361c3d75..349fcfc93f54 100644
--- a/arch/riscv/kvm/tlb.c
+++ b/arch/riscv/kvm/tlb.c
@@ -237,49 +237,43 @@ static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
{
- unsigned long vmid;
struct kvm_riscv_hfence d = { 0 };
- struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
while (vcpu_hfence_dequeue(vcpu, &d)) {
switch (d.type) {
case KVM_RISCV_HFENCE_UNKNOWN:
break;
case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
- vmid = READ_ONCE(v->vmid);
if (kvm_riscv_nacl_available())
- nacl_hfence_gvma_vmid(nacl_shmem(), vmid,
+ nacl_hfence_gvma_vmid(nacl_shmem(), d.vmid,
d.addr, d.size, d.order);
else
- kvm_riscv_local_hfence_gvma_vmid_gpa(vmid, d.addr,
+ kvm_riscv_local_hfence_gvma_vmid_gpa(d.vmid, d.addr,
d.size, d.order);
break;
case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
- vmid = READ_ONCE(v->vmid);
if (kvm_riscv_nacl_available())
- nacl_hfence_vvma_asid(nacl_shmem(), vmid, d.asid,
+ nacl_hfence_vvma_asid(nacl_shmem(), d.vmid, d.asid,
d.addr, d.size, d.order);
else
- kvm_riscv_local_hfence_vvma_asid_gva(vmid, d.asid, d.addr,
+ kvm_riscv_local_hfence_vvma_asid_gva(d.vmid, d.asid, d.addr,
d.size, d.order);
break;
case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
- vmid = READ_ONCE(v->vmid);
if (kvm_riscv_nacl_available())
- nacl_hfence_vvma_asid_all(nacl_shmem(), vmid, d.asid);
+ nacl_hfence_vvma_asid_all(nacl_shmem(), d.vmid, d.asid);
else
- kvm_riscv_local_hfence_vvma_asid_all(vmid, d.asid);
+ kvm_riscv_local_hfence_vvma_asid_all(d.vmid, d.asid);
break;
case KVM_RISCV_HFENCE_VVMA_GVA:
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
- vmid = READ_ONCE(v->vmid);
if (kvm_riscv_nacl_available())
- nacl_hfence_vvma(nacl_shmem(), vmid,
+ nacl_hfence_vvma(nacl_shmem(), d.vmid,
d.addr, d.size, d.order);
else
- kvm_riscv_local_hfence_vvma_gva(vmid, d.addr,
+ kvm_riscv_local_hfence_vvma_gva(d.vmid, d.addr,
d.size, d.order);
break;
default:
@@ -336,10 +330,12 @@ void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
gpa_t gpa, gpa_t gpsz,
unsigned long order)
{
+ struct kvm_vmid *v = &kvm->arch.vmid;
struct kvm_riscv_hfence data;
data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
data.asid = 0;
+ data.vmid = READ_ONCE(v->vmid);
data.addr = gpa;
data.size = gpsz;
data.order = order;
@@ -359,10 +355,12 @@ void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
unsigned long gva, unsigned long gvsz,
unsigned long order, unsigned long asid)
{
+ struct kvm_vmid *v = &kvm->arch.vmid;
struct kvm_riscv_hfence data;
data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
data.asid = asid;
+ data.vmid = READ_ONCE(v->vmid);
data.addr = gva;
data.size = gvsz;
data.order = order;
@@ -374,10 +372,12 @@ void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
unsigned long hbase, unsigned long hmask,
unsigned long asid)
{
+ struct kvm_vmid *v = &kvm->arch.vmid;
struct kvm_riscv_hfence data;
data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
data.asid = asid;
+ data.vmid = READ_ONCE(v->vmid);
data.addr = data.size = data.order = 0;
make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
KVM_REQ_HFENCE_VVMA_ALL, &data);
@@ -388,10 +388,12 @@ void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
unsigned long gva, unsigned long gvsz,
unsigned long order)
{
+ struct kvm_vmid *v = &kvm->arch.vmid;
struct kvm_riscv_hfence data;
data.type = KVM_RISCV_HFENCE_VVMA_GVA;
data.asid = 0;
+ data.vmid = READ_ONCE(v->vmid);
data.addr = gva;
data.size = gvsz;
data.order = order;
--
2.43.0
On 6/12/25 11:57 PM, Anup Patel wrote:
> Currently, the struct kvm_riscv_hfence does not have vmid field
> and various hfence processing functions always pick vmid assigned
> to the guest/VM. This prevents us from doing hfence operation on
> arbitrary vmid hence add vmid field to struct kvm_riscv_hfence
> and use it wherever applicable.
>
> Signed-off-by: Anup Patel <apatel@ventanamicro.com>
> ---
> arch/riscv/include/asm/kvm_tlb.h | 1 +
> arch/riscv/kvm/tlb.c | 30 ++++++++++++++++--------------
> 2 files changed, 17 insertions(+), 14 deletions(-)
>
> diff --git a/arch/riscv/include/asm/kvm_tlb.h b/arch/riscv/include/asm/kvm_tlb.h
> index cd00c9a46cb1..f67e03edeaec 100644
> --- a/arch/riscv/include/asm/kvm_tlb.h
> +++ b/arch/riscv/include/asm/kvm_tlb.h
> @@ -19,6 +19,7 @@ enum kvm_riscv_hfence_type {
> struct kvm_riscv_hfence {
> enum kvm_riscv_hfence_type type;
> unsigned long asid;
> + unsigned long vmid;
> unsigned long order;
> gpa_t addr;
> gpa_t size;
> diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c
> index 6fc4361c3d75..349fcfc93f54 100644
> --- a/arch/riscv/kvm/tlb.c
> +++ b/arch/riscv/kvm/tlb.c
> @@ -237,49 +237,43 @@ static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
>
> void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
> {
> - unsigned long vmid;
> struct kvm_riscv_hfence d = { 0 };
> - struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
>
> while (vcpu_hfence_dequeue(vcpu, &d)) {
> switch (d.type) {
> case KVM_RISCV_HFENCE_UNKNOWN:
> break;
> case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
> - vmid = READ_ONCE(v->vmid);
> if (kvm_riscv_nacl_available())
> - nacl_hfence_gvma_vmid(nacl_shmem(), vmid,
> + nacl_hfence_gvma_vmid(nacl_shmem(), d.vmid,
> d.addr, d.size, d.order);
> else
> - kvm_riscv_local_hfence_gvma_vmid_gpa(vmid, d.addr,
> + kvm_riscv_local_hfence_gvma_vmid_gpa(d.vmid, d.addr,
> d.size, d.order);
> break;
> case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
> kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
> - vmid = READ_ONCE(v->vmid);
> if (kvm_riscv_nacl_available())
> - nacl_hfence_vvma_asid(nacl_shmem(), vmid, d.asid,
> + nacl_hfence_vvma_asid(nacl_shmem(), d.vmid, d.asid,
> d.addr, d.size, d.order);
> else
> - kvm_riscv_local_hfence_vvma_asid_gva(vmid, d.asid, d.addr,
> + kvm_riscv_local_hfence_vvma_asid_gva(d.vmid, d.asid, d.addr,
> d.size, d.order);
> break;
> case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
> kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
> - vmid = READ_ONCE(v->vmid);
> if (kvm_riscv_nacl_available())
> - nacl_hfence_vvma_asid_all(nacl_shmem(), vmid, d.asid);
> + nacl_hfence_vvma_asid_all(nacl_shmem(), d.vmid, d.asid);
> else
> - kvm_riscv_local_hfence_vvma_asid_all(vmid, d.asid);
> + kvm_riscv_local_hfence_vvma_asid_all(d.vmid, d.asid);
> break;
> case KVM_RISCV_HFENCE_VVMA_GVA:
> kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
> - vmid = READ_ONCE(v->vmid);
> if (kvm_riscv_nacl_available())
> - nacl_hfence_vvma(nacl_shmem(), vmid,
> + nacl_hfence_vvma(nacl_shmem(), d.vmid,
> d.addr, d.size, d.order);
> else
> - kvm_riscv_local_hfence_vvma_gva(vmid, d.addr,
> + kvm_riscv_local_hfence_vvma_gva(d.vmid, d.addr,
> d.size, d.order);
> break;
> default:
> @@ -336,10 +330,12 @@ void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
> gpa_t gpa, gpa_t gpsz,
> unsigned long order)
> {
> + struct kvm_vmid *v = &kvm->arch.vmid;
> struct kvm_riscv_hfence data;
>
> data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
> data.asid = 0;
> + data.vmid = READ_ONCE(v->vmid);
> data.addr = gpa;
> data.size = gpsz;
> data.order = order;
> @@ -359,10 +355,12 @@ void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
> unsigned long gva, unsigned long gvsz,
> unsigned long order, unsigned long asid)
> {
> + struct kvm_vmid *v = &kvm->arch.vmid;
> struct kvm_riscv_hfence data;
>
> data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
> data.asid = asid;
> + data.vmid = READ_ONCE(v->vmid);
> data.addr = gva;
> data.size = gvsz;
> data.order = order;
> @@ -374,10 +372,12 @@ void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
> unsigned long hbase, unsigned long hmask,
> unsigned long asid)
> {
> + struct kvm_vmid *v = &kvm->arch.vmid;
> struct kvm_riscv_hfence data;
>
> data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
> data.asid = asid;
> + data.vmid = READ_ONCE(v->vmid);
> data.addr = data.size = data.order = 0;
> make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
> KVM_REQ_HFENCE_VVMA_ALL, &data);
> @@ -388,10 +388,12 @@ void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
> unsigned long gva, unsigned long gvsz,
> unsigned long order)
> {
> + struct kvm_vmid *v = &kvm->arch.vmid;
> struct kvm_riscv_hfence data;
>
> data.type = KVM_RISCV_HFENCE_VVMA_GVA;
> data.asid = 0;
> + data.vmid = READ_ONCE(v->vmid);
> data.addr = gva;
> data.size = gvsz;
> data.order = order;
Reviewed-by: Atish Patra <atishp@rivosinc.com>
© 2016 - 2026 Red Hat, Inc.