IOCSR instruction supports 1/2/4/8 bytes access, the address should
be naturally aligned with its access size. Here address alignment
check is added in eiointc kernel emulation.
At the same time len must be 1/2/4/8 bytes from iocsr exit emulation
function kvm_emu_iocsr(), remove the default case in switch case
statements.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
---
arch/loongarch/kvm/intc/eiointc.c | 21 +++++++++++++--------
1 file changed, 13 insertions(+), 8 deletions(-)
diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c
index 8b0d9376eb54..4e9d12300cc4 100644
--- a/arch/loongarch/kvm/intc/eiointc.c
+++ b/arch/loongarch/kvm/intc/eiointc.c
@@ -311,6 +311,12 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu,
return -EINVAL;
}
+ /* len must be 1/2/4/8 from function kvm_emu_iocsr() */
+ if (addr & (len - 1)) {
+ kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
+ return -EINVAL;
+ }
+
vcpu->stat.eiointc_read_exits++;
spin_lock_irqsave(&eiointc->lock, flags);
switch (len) {
@@ -323,12 +329,9 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu,
case 4:
ret = loongarch_eiointc_readl(vcpu, eiointc, addr, val);
break;
- case 8:
+ default:
ret = loongarch_eiointc_readq(vcpu, eiointc, addr, val);
break;
- default:
- WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n",
- __func__, addr, len);
}
spin_unlock_irqrestore(&eiointc->lock, flags);
@@ -682,6 +685,11 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
return -EINVAL;
}
+ if (addr & (len - 1)) {
+ kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len);
+ return -EINVAL;
+ }
+
vcpu->stat.eiointc_write_exits++;
spin_lock_irqsave(&eiointc->lock, flags);
switch (len) {
@@ -694,12 +702,9 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
case 4:
ret = loongarch_eiointc_writel(vcpu, eiointc, addr, val);
break;
- case 8:
+ default:
ret = loongarch_eiointc_writeq(vcpu, eiointc, addr, val);
break;
- default:
- WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n",
- __func__, addr, len);
}
spin_unlock_irqrestore(&eiointc->lock, flags);
--
2.39.3
Hi, Bibo, On Wed, Jun 11, 2025 at 9:51 AM Bibo Mao <maobibo@loongson.cn> wrote: > > IOCSR instruction supports 1/2/4/8 bytes access, the address should > be naturally aligned with its access size. Here address alignment > check is added in eiointc kernel emulation. > > At the same time len must be 1/2/4/8 bytes from iocsr exit emulation > function kvm_emu_iocsr(), remove the default case in switch case > statements. Robust code doesn't depend its callers do things right, so I suggest keeping the default case, which means we just add the alignment check here. And I think this patch should also Cc stable and add a Fixes tag. Huacai > > Signed-off-by: Bibo Mao <maobibo@loongson.cn> > --- > arch/loongarch/kvm/intc/eiointc.c | 21 +++++++++++++-------- > 1 file changed, 13 insertions(+), 8 deletions(-) > > diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c > index 8b0d9376eb54..4e9d12300cc4 100644 > --- a/arch/loongarch/kvm/intc/eiointc.c > +++ b/arch/loongarch/kvm/intc/eiointc.c > @@ -311,6 +311,12 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu, > return -EINVAL; > } > > + /* len must be 1/2/4/8 from function kvm_emu_iocsr() */ > + if (addr & (len - 1)) { > + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); > + return -EINVAL; > + } > + > vcpu->stat.eiointc_read_exits++; > spin_lock_irqsave(&eiointc->lock, flags); > switch (len) { > @@ -323,12 +329,9 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu, > case 4: > ret = loongarch_eiointc_readl(vcpu, eiointc, addr, val); > break; > - case 8: > + default: > ret = loongarch_eiointc_readq(vcpu, eiointc, addr, val); > break; > - default: > - WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n", > - __func__, addr, len); > } > spin_unlock_irqrestore(&eiointc->lock, flags); > > @@ -682,6 +685,11 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu, > return -EINVAL; > } > > + if (addr & (len - 1)) { > + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); > + return -EINVAL; > + } > + > vcpu->stat.eiointc_write_exits++; > spin_lock_irqsave(&eiointc->lock, flags); > switch (len) { > @@ -694,12 +702,9 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu, > case 4: > ret = loongarch_eiointc_writel(vcpu, eiointc, addr, val); > break; > - case 8: > + default: > ret = loongarch_eiointc_writeq(vcpu, eiointc, addr, val); > break; > - default: > - WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n", > - __func__, addr, len); > } > spin_unlock_irqrestore(&eiointc->lock, flags); > > -- > 2.39.3 >
On Thu, 19 Jun 2025 16:47:22 +0800 Huacai Chen <chenhuacai@kernel.org> wrote: > Hi, Bibo, > > On Wed, Jun 11, 2025 at 9:51 AM Bibo Mao <maobibo@loongson.cn> wrote: > > > > IOCSR instruction supports 1/2/4/8 bytes access, the address should > > be naturally aligned with its access size. Here address alignment > > check is added in eiointc kernel emulation. > > > > At the same time len must be 1/2/4/8 bytes from iocsr exit emulation > > function kvm_emu_iocsr(), remove the default case in switch case > > statements. > Robust code doesn't depend its callers do things right, so I suggest > keeping the default case, which means we just add the alignment check > here. kernel code generally relies on callers to DTRT - except for values that come from userspace. Otherwise you get unreadable and slow code that continuously checks for things that can't happen. David > > And I think this patch should also Cc stable and add a Fixes tag. > > > Huacai > > > > > Signed-off-by: Bibo Mao <maobibo@loongson.cn> > > --- > > arch/loongarch/kvm/intc/eiointc.c | 21 +++++++++++++-------- > > 1 file changed, 13 insertions(+), 8 deletions(-) > > > > diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c > > index 8b0d9376eb54..4e9d12300cc4 100644 > > --- a/arch/loongarch/kvm/intc/eiointc.c > > +++ b/arch/loongarch/kvm/intc/eiointc.c > > @@ -311,6 +311,12 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu, > > return -EINVAL; > > } > > > > + /* len must be 1/2/4/8 from function kvm_emu_iocsr() */ > > + if (addr & (len - 1)) { > > + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); > > + return -EINVAL; > > + } > > + > > vcpu->stat.eiointc_read_exits++; > > spin_lock_irqsave(&eiointc->lock, flags); > > switch (len) { > > @@ -323,12 +329,9 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu, > > case 4: > > ret = loongarch_eiointc_readl(vcpu, eiointc, addr, val); > > break; > > - case 8: > > + default: > > ret = loongarch_eiointc_readq(vcpu, eiointc, addr, val); > > break; > > - default: > > - WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n", > > - __func__, addr, len); > > } > > spin_unlock_irqrestore(&eiointc->lock, flags); > > > > @@ -682,6 +685,11 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu, > > return -EINVAL; > > } > > > > + if (addr & (len - 1)) { > > + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); > > + return -EINVAL; > > + } > > + > > vcpu->stat.eiointc_write_exits++; > > spin_lock_irqsave(&eiointc->lock, flags); > > switch (len) { > > @@ -694,12 +702,9 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu, > > case 4: > > ret = loongarch_eiointc_writel(vcpu, eiointc, addr, val); > > break; > > - case 8: > > + default: > > ret = loongarch_eiointc_writeq(vcpu, eiointc, addr, val); > > break; > > - default: > > - WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n", > > - __func__, addr, len); > > } > > spin_unlock_irqrestore(&eiointc->lock, flags); > > > > -- > > 2.39.3 > > >
Hi, David, On Sat, Jun 21, 2025 at 7:21 PM David Laight <david.laight.linux@gmail.com> wrote: > > On Thu, 19 Jun 2025 16:47:22 +0800 > Huacai Chen <chenhuacai@kernel.org> wrote: > > > Hi, Bibo, > > > > On Wed, Jun 11, 2025 at 9:51 AM Bibo Mao <maobibo@loongson.cn> wrote: > > > > > > IOCSR instruction supports 1/2/4/8 bytes access, the address should > > > be naturally aligned with its access size. Here address alignment > > > check is added in eiointc kernel emulation. > > > > > > At the same time len must be 1/2/4/8 bytes from iocsr exit emulation > > > function kvm_emu_iocsr(), remove the default case in switch case > > > statements. > > Robust code doesn't depend its callers do things right, so I suggest > > keeping the default case, which means we just add the alignment check > > here. > > kernel code generally relies on callers to DTRT - except for values > that come from userspace. > > Otherwise you get unreadable and slow code that continuously checks > for things that can't happen. Generally you are right - but this patch is not the case. Adding a "default" case here doesn't make code slower or unreadable, and the code becomes more robust. Huacai > > David > > > > > And I think this patch should also Cc stable and add a Fixes tag. > > > > > > Huacai > > > > > > > > Signed-off-by: Bibo Mao <maobibo@loongson.cn> > > > --- > > > arch/loongarch/kvm/intc/eiointc.c | 21 +++++++++++++-------- > > > 1 file changed, 13 insertions(+), 8 deletions(-) > > > > > > diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c > > > index 8b0d9376eb54..4e9d12300cc4 100644 > > > --- a/arch/loongarch/kvm/intc/eiointc.c > > > +++ b/arch/loongarch/kvm/intc/eiointc.c > > > @@ -311,6 +311,12 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu, > > > return -EINVAL; > > > } > > > > > > + /* len must be 1/2/4/8 from function kvm_emu_iocsr() */ > > > + if (addr & (len - 1)) { > > > + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); > > > + return -EINVAL; > > > + } > > > + > > > vcpu->stat.eiointc_read_exits++; > > > spin_lock_irqsave(&eiointc->lock, flags); > > > switch (len) { > > > @@ -323,12 +329,9 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu, > > > case 4: > > > ret = loongarch_eiointc_readl(vcpu, eiointc, addr, val); > > > break; > > > - case 8: > > > + default: > > > ret = loongarch_eiointc_readq(vcpu, eiointc, addr, val); > > > break; > > > - default: > > > - WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n", > > > - __func__, addr, len); > > > } > > > spin_unlock_irqrestore(&eiointc->lock, flags); > > > > > > @@ -682,6 +685,11 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu, > > > return -EINVAL; > > > } > > > > > > + if (addr & (len - 1)) { > > > + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); > > > + return -EINVAL; > > > + } > > > + > > > vcpu->stat.eiointc_write_exits++; > > > spin_lock_irqsave(&eiointc->lock, flags); > > > switch (len) { > > > @@ -694,12 +702,9 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu, > > > case 4: > > > ret = loongarch_eiointc_writel(vcpu, eiointc, addr, val); > > > break; > > > - case 8: > > > + default: > > > ret = loongarch_eiointc_writeq(vcpu, eiointc, addr, val); > > > break; > > > - default: > > > - WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n", > > > - __func__, addr, len); > > > } > > > spin_unlock_irqrestore(&eiointc->lock, flags); > > > > > > -- > > > 2.39.3 > > > > > >
That is a little different with x86/arm64 kvm. With iocsr address space, there is access size with 1/2/4/8. With IO memory address space, there is generic access size with 1/2/4/8, however if irqchip driver of VM is deployed in user space, it will be possible with 16/32 bytes with SIMD instruction. Regards Bibo Mao On 2025/6/21 下午9:04, Huacai Chen wrote: > Hi, David, > > On Sat, Jun 21, 2025 at 7:21 PM David Laight > <david.laight.linux@gmail.com> wrote: >> >> On Thu, 19 Jun 2025 16:47:22 +0800 >> Huacai Chen <chenhuacai@kernel.org> wrote: >> >>> Hi, Bibo, >>> >>> On Wed, Jun 11, 2025 at 9:51 AM Bibo Mao <maobibo@loongson.cn> wrote: >>>> >>>> IOCSR instruction supports 1/2/4/8 bytes access, the address should >>>> be naturally aligned with its access size. Here address alignment >>>> check is added in eiointc kernel emulation. >>>> >>>> At the same time len must be 1/2/4/8 bytes from iocsr exit emulation >>>> function kvm_emu_iocsr(), remove the default case in switch case >>>> statements. >>> Robust code doesn't depend its callers do things right, so I suggest >>> keeping the default case, which means we just add the alignment check >>> here. >> >> kernel code generally relies on callers to DTRT - except for values >> that come from userspace. >> >> Otherwise you get unreadable and slow code that continuously checks >> for things that can't happen. > Generally you are right - but this patch is not the case. > > Adding a "default" case here doesn't make code slower or unreadable, > and the code becomes more robust. > > Huacai > >> >> David >> >>> >>> And I think this patch should also Cc stable and add a Fixes tag. >>> >>> >>> Huacai >>> >>>> >>>> Signed-off-by: Bibo Mao <maobibo@loongson.cn> >>>> --- >>>> arch/loongarch/kvm/intc/eiointc.c | 21 +++++++++++++-------- >>>> 1 file changed, 13 insertions(+), 8 deletions(-) >>>> >>>> diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c >>>> index 8b0d9376eb54..4e9d12300cc4 100644 >>>> --- a/arch/loongarch/kvm/intc/eiointc.c >>>> +++ b/arch/loongarch/kvm/intc/eiointc.c >>>> @@ -311,6 +311,12 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu, >>>> return -EINVAL; >>>> } >>>> >>>> + /* len must be 1/2/4/8 from function kvm_emu_iocsr() */ >>>> + if (addr & (len - 1)) { >>>> + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); >>>> + return -EINVAL; >>>> + } >>>> + >>>> vcpu->stat.eiointc_read_exits++; >>>> spin_lock_irqsave(&eiointc->lock, flags); >>>> switch (len) { >>>> @@ -323,12 +329,9 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu, >>>> case 4: >>>> ret = loongarch_eiointc_readl(vcpu, eiointc, addr, val); >>>> break; >>>> - case 8: >>>> + default: >>>> ret = loongarch_eiointc_readq(vcpu, eiointc, addr, val); >>>> break; >>>> - default: >>>> - WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n", >>>> - __func__, addr, len); >>>> } >>>> spin_unlock_irqrestore(&eiointc->lock, flags); >>>> >>>> @@ -682,6 +685,11 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu, >>>> return -EINVAL; >>>> } >>>> >>>> + if (addr & (len - 1)) { >>>> + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); >>>> + return -EINVAL; >>>> + } >>>> + >>>> vcpu->stat.eiointc_write_exits++; >>>> spin_lock_irqsave(&eiointc->lock, flags); >>>> switch (len) { >>>> @@ -694,12 +702,9 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu, >>>> case 4: >>>> ret = loongarch_eiointc_writel(vcpu, eiointc, addr, val); >>>> break; >>>> - case 8: >>>> + default: >>>> ret = loongarch_eiointc_writeq(vcpu, eiointc, addr, val); >>>> break; >>>> - default: >>>> - WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n", >>>> - __func__, addr, len); >>>> } >>>> spin_unlock_irqrestore(&eiointc->lock, flags); >>>> >>>> -- >>>> 2.39.3 >>>> >>> >>
On 2025/6/21 下午9:04, Huacai Chen wrote: > Hi, David, > > On Sat, Jun 21, 2025 at 7:21 PM David Laight > <david.laight.linux@gmail.com> wrote: >> >> On Thu, 19 Jun 2025 16:47:22 +0800 >> Huacai Chen <chenhuacai@kernel.org> wrote: >> >>> Hi, Bibo, >>> >>> On Wed, Jun 11, 2025 at 9:51 AM Bibo Mao <maobibo@loongson.cn> wrote: >>>> >>>> IOCSR instruction supports 1/2/4/8 bytes access, the address should >>>> be naturally aligned with its access size. Here address alignment >>>> check is added in eiointc kernel emulation. >>>> >>>> At the same time len must be 1/2/4/8 bytes from iocsr exit emulation >>>> function kvm_emu_iocsr(), remove the default case in switch case >>>> statements. >>> Robust code doesn't depend its callers do things right, so I suggest >>> keeping the default case, which means we just add the alignment check >>> here. >> >> kernel code generally relies on callers to DTRT - except for values >> that come from userspace. >> >> Otherwise you get unreadable and slow code that continuously checks >> for things that can't happen. > Generally you are right - but this patch is not the case. > > Adding a "default" case here doesn't make code slower or unreadable, > and the code becomes more robust. By my understanding, the default case cannot happen never with iocsr emulation function kvm_emu_iocsr() in kernel in the previous patch. David suggests that default case can be removed since it will not happen. It actually makes code quicker if compared with assembly language, since there is one if-else comparison reduction. Regards Bibo Mao > > Huacai > >> >> David >> >>> >>> And I think this patch should also Cc stable and add a Fixes tag. >>> >>> >>> Huacai >>> >>>> >>>> Signed-off-by: Bibo Mao <maobibo@loongson.cn> >>>> --- >>>> arch/loongarch/kvm/intc/eiointc.c | 21 +++++++++++++-------- >>>> 1 file changed, 13 insertions(+), 8 deletions(-) >>>> >>>> diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c >>>> index 8b0d9376eb54..4e9d12300cc4 100644 >>>> --- a/arch/loongarch/kvm/intc/eiointc.c >>>> +++ b/arch/loongarch/kvm/intc/eiointc.c >>>> @@ -311,6 +311,12 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu, >>>> return -EINVAL; >>>> } >>>> >>>> + /* len must be 1/2/4/8 from function kvm_emu_iocsr() */ >>>> + if (addr & (len - 1)) { >>>> + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); >>>> + return -EINVAL; >>>> + } >>>> + >>>> vcpu->stat.eiointc_read_exits++; >>>> spin_lock_irqsave(&eiointc->lock, flags); >>>> switch (len) { >>>> @@ -323,12 +329,9 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu, >>>> case 4: >>>> ret = loongarch_eiointc_readl(vcpu, eiointc, addr, val); >>>> break; >>>> - case 8: >>>> + default: >>>> ret = loongarch_eiointc_readq(vcpu, eiointc, addr, val); >>>> break; >>>> - default: >>>> - WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n", >>>> - __func__, addr, len); >>>> } >>>> spin_unlock_irqrestore(&eiointc->lock, flags); >>>> >>>> @@ -682,6 +685,11 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu, >>>> return -EINVAL; >>>> } >>>> >>>> + if (addr & (len - 1)) { >>>> + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); >>>> + return -EINVAL; >>>> + } >>>> + >>>> vcpu->stat.eiointc_write_exits++; >>>> spin_lock_irqsave(&eiointc->lock, flags); >>>> switch (len) { >>>> @@ -694,12 +702,9 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu, >>>> case 4: >>>> ret = loongarch_eiointc_writel(vcpu, eiointc, addr, val); >>>> break; >>>> - case 8: >>>> + default: >>>> ret = loongarch_eiointc_writeq(vcpu, eiointc, addr, val); >>>> break; >>>> - default: >>>> - WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n", >>>> - __func__, addr, len); >>>> } >>>> spin_unlock_irqrestore(&eiointc->lock, flags); >>>> >>>> -- >>>> 2.39.3 >>>> >>> >>
On 2025/6/19 下午4:47, Huacai Chen wrote: > Hi, Bibo, > > On Wed, Jun 11, 2025 at 9:51 AM Bibo Mao <maobibo@loongson.cn> wrote: >> >> IOCSR instruction supports 1/2/4/8 bytes access, the address should >> be naturally aligned with its access size. Here address alignment >> check is added in eiointc kernel emulation. >> >> At the same time len must be 1/2/4/8 bytes from iocsr exit emulation >> function kvm_emu_iocsr(), remove the default case in switch case >> statements. > Robust code doesn't depend its callers do things right, so I suggest > keeping the default case, which means we just add the alignment check > here. ok, will keep the default case. > > And I think this patch should also Cc stable and add a Fixes tag. ok, will add Cc stabe and Fixes tag. Regards Bibo Mao > > > Huacai > >> >> Signed-off-by: Bibo Mao <maobibo@loongson.cn> >> --- >> arch/loongarch/kvm/intc/eiointc.c | 21 +++++++++++++-------- >> 1 file changed, 13 insertions(+), 8 deletions(-) >> >> diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c >> index 8b0d9376eb54..4e9d12300cc4 100644 >> --- a/arch/loongarch/kvm/intc/eiointc.c >> +++ b/arch/loongarch/kvm/intc/eiointc.c >> @@ -311,6 +311,12 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu, >> return -EINVAL; >> } >> >> + /* len must be 1/2/4/8 from function kvm_emu_iocsr() */ >> + if (addr & (len - 1)) { >> + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); >> + return -EINVAL; >> + } >> + >> vcpu->stat.eiointc_read_exits++; >> spin_lock_irqsave(&eiointc->lock, flags); >> switch (len) { >> @@ -323,12 +329,9 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu, >> case 4: >> ret = loongarch_eiointc_readl(vcpu, eiointc, addr, val); >> break; >> - case 8: >> + default: >> ret = loongarch_eiointc_readq(vcpu, eiointc, addr, val); >> break; >> - default: >> - WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n", >> - __func__, addr, len); >> } >> spin_unlock_irqrestore(&eiointc->lock, flags); >> >> @@ -682,6 +685,11 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu, >> return -EINVAL; >> } >> >> + if (addr & (len - 1)) { >> + kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); >> + return -EINVAL; >> + } >> + >> vcpu->stat.eiointc_write_exits++; >> spin_lock_irqsave(&eiointc->lock, flags); >> switch (len) { >> @@ -694,12 +702,9 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu, >> case 4: >> ret = loongarch_eiointc_writel(vcpu, eiointc, addr, val); >> break; >> - case 8: >> + default: >> ret = loongarch_eiointc_writeq(vcpu, eiointc, addr, val); >> break; >> - default: >> - WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n", >> - __func__, addr, len); >> } >> spin_unlock_irqrestore(&eiointc->lock, flags); >> >> -- >> 2.39.3 >>
© 2016 - 2025 Red Hat, Inc.