Change flush_into_kvec to return KVVec instead of KVec. KVVec uses
vmalloc for large allocations, which is appropriate since RPC reply
payloads can be large (>=20 KiB).
Update GspSequence to use KVVec accordingly.
Signed-off-by: Eliot Courtney <ecourtney@nvidia.com>
---
drivers/gpu/nova-core/gsp/sequencer.rs | 4 ++--
drivers/gpu/nova-core/sbuffer.rs | 6 +++---
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/nova-core/gsp/sequencer.rs b/drivers/gpu/nova-core/gsp/sequencer.rs
index 474e4c8021db..c8f587d2d57b 100644
--- a/drivers/gpu/nova-core/gsp/sequencer.rs
+++ b/drivers/gpu/nova-core/gsp/sequencer.rs
@@ -42,7 +42,7 @@ struct GspSequence {
/// Current command index for error reporting.
cmd_index: u32,
/// Command data buffer containing the sequence of commands.
- cmd_data: KVec<u8>,
+ cmd_data: KVVec<u8>,
}
impl MessageFromGsp for GspSequence {
@@ -54,7 +54,7 @@ fn read(
msg: &Self::Message,
sbuffer: &mut SBufferIter<array::IntoIter<&[u8], 2>>,
) -> Result<Self, Self::InitError> {
- let cmd_data = sbuffer.flush_into_kvec(GFP_KERNEL)?;
+ let cmd_data = sbuffer.read_to_vec(GFP_KERNEL)?;
Ok(GspSequence {
cmd_index: msg.cmd_index(),
cmd_data,
diff --git a/drivers/gpu/nova-core/sbuffer.rs b/drivers/gpu/nova-core/sbuffer.rs
index 3a41d224c77a..ae2facdcbdd4 100644
--- a/drivers/gpu/nova-core/sbuffer.rs
+++ b/drivers/gpu/nova-core/sbuffer.rs
@@ -162,11 +162,11 @@ pub(crate) fn read_exact(&mut self, mut dst: &mut [u8]) -> Result {
Ok(())
}
- /// Read all the remaining data into a [`KVec`].
+ /// Read all the remaining data into a [`KVVec`].
///
/// `self` will be empty after this operation.
- pub(crate) fn flush_into_kvec(&mut self, flags: kernel::alloc::Flags) -> Result<KVec<u8>> {
- let mut buf = KVec::<u8>::new();
+ pub(crate) fn read_to_vec(&mut self, flags: kernel::alloc::Flags) -> Result<KVVec<u8>> {
+ let mut buf = KVVec::<u8>::new();
if let Some(slice) = core::mem::take(&mut self.cur_slice) {
buf.extend_from_slice(slice, flags)?;
--
2.53.0
On 2026-03-18 at 18:14 +1100, Eliot Courtney <ecourtney@nvidia.com> wrote...
> Change flush_into_kvec to return KVVec instead of KVec. KVVec uses
> vmalloc for large allocations, which is appropriate since RPC reply
> payloads can be large (>=20 KiB).
Out of curiosity do you know if there is any upper limit on payload size?
And is there any concern about performance of vmalloc() vs. kmalloc() for RPC
messages?
> Update GspSequence to use KVVec accordingly.
>
> Signed-off-by: Eliot Courtney <ecourtney@nvidia.com>
> ---
> drivers/gpu/nova-core/gsp/sequencer.rs | 4 ++--
> drivers/gpu/nova-core/sbuffer.rs | 6 +++---
> 2 files changed, 5 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/gpu/nova-core/gsp/sequencer.rs b/drivers/gpu/nova-core/gsp/sequencer.rs
> index 474e4c8021db..c8f587d2d57b 100644
> --- a/drivers/gpu/nova-core/gsp/sequencer.rs
> +++ b/drivers/gpu/nova-core/gsp/sequencer.rs
> @@ -42,7 +42,7 @@ struct GspSequence {
> /// Current command index for error reporting.
> cmd_index: u32,
> /// Command data buffer containing the sequence of commands.
> - cmd_data: KVec<u8>,
> + cmd_data: KVVec<u8>,
> }
>
> impl MessageFromGsp for GspSequence {
> @@ -54,7 +54,7 @@ fn read(
> msg: &Self::Message,
> sbuffer: &mut SBufferIter<array::IntoIter<&[u8], 2>>,
> ) -> Result<Self, Self::InitError> {
> - let cmd_data = sbuffer.flush_into_kvec(GFP_KERNEL)?;
> + let cmd_data = sbuffer.read_to_vec(GFP_KERNEL)?;
> Ok(GspSequence {
> cmd_index: msg.cmd_index(),
> cmd_data,
> diff --git a/drivers/gpu/nova-core/sbuffer.rs b/drivers/gpu/nova-core/sbuffer.rs
> index 3a41d224c77a..ae2facdcbdd4 100644
> --- a/drivers/gpu/nova-core/sbuffer.rs
> +++ b/drivers/gpu/nova-core/sbuffer.rs
> @@ -162,11 +162,11 @@ pub(crate) fn read_exact(&mut self, mut dst: &mut [u8]) -> Result {
> Ok(())
> }
>
> - /// Read all the remaining data into a [`KVec`].
> + /// Read all the remaining data into a [`KVVec`].
> ///
> /// `self` will be empty after this operation.
> - pub(crate) fn flush_into_kvec(&mut self, flags: kernel::alloc::Flags) -> Result<KVec<u8>> {
> - let mut buf = KVec::<u8>::new();
> + pub(crate) fn read_to_vec(&mut self, flags: kernel::alloc::Flags) -> Result<KVVec<u8>> {
> + let mut buf = KVVec::<u8>::new();
>
> if let Some(slice) = core::mem::take(&mut self.cur_slice) {
> buf.extend_from_slice(slice, flags)?;
>
> --
> 2.53.0
>
On Fri Mar 20, 2026 at 1:32 PM JST, Alistair Popple wrote:
> On 2026-03-18 at 18:14 +1100, Eliot Courtney <ecourtney@nvidia.com> wrote...
>> Change flush_into_kvec to return KVVec instead of KVec. KVVec uses
>> vmalloc for large allocations, which is appropriate since RPC reply
>> payloads can be large (>=20 KiB).
>
> Out of curiosity do you know if there is any upper limit on payload size?
IIRC the largest one I saw in openrm was a few hundred KiB.
Theoretically, the largest complete payload you could have in a single
message is ~64 KiB since we don't support continuation records on the
receive path.
>
> And is there any concern about performance of vmalloc() vs. kmalloc() for RPC
> messages?
`KVVec` uses `KVmalloc` which tries `Kmalloc` first. Most of the time,
`Kmalloc` should work. So this shouldn't regress performance really in
the common case, and it's required for the longer RPCs.
>
>> Update GspSequence to use KVVec accordingly.
>>
>> Signed-off-by: Eliot Courtney <ecourtney@nvidia.com>
>> ---
>> drivers/gpu/nova-core/gsp/sequencer.rs | 4 ++--
>> drivers/gpu/nova-core/sbuffer.rs | 6 +++---
>> 2 files changed, 5 insertions(+), 5 deletions(-)
>>
>> diff --git a/drivers/gpu/nova-core/gsp/sequencer.rs b/drivers/gpu/nova-core/gsp/sequencer.rs
>> index 474e4c8021db..c8f587d2d57b 100644
>> --- a/drivers/gpu/nova-core/gsp/sequencer.rs
>> +++ b/drivers/gpu/nova-core/gsp/sequencer.rs
>> @@ -42,7 +42,7 @@ struct GspSequence {
>> /// Current command index for error reporting.
>> cmd_index: u32,
>> /// Command data buffer containing the sequence of commands.
>> - cmd_data: KVec<u8>,
>> + cmd_data: KVVec<u8>,
>> }
>>
>> impl MessageFromGsp for GspSequence {
>> @@ -54,7 +54,7 @@ fn read(
>> msg: &Self::Message,
>> sbuffer: &mut SBufferIter<array::IntoIter<&[u8], 2>>,
>> ) -> Result<Self, Self::InitError> {
>> - let cmd_data = sbuffer.flush_into_kvec(GFP_KERNEL)?;
>> + let cmd_data = sbuffer.read_to_vec(GFP_KERNEL)?;
>> Ok(GspSequence {
>> cmd_index: msg.cmd_index(),
>> cmd_data,
>> diff --git a/drivers/gpu/nova-core/sbuffer.rs b/drivers/gpu/nova-core/sbuffer.rs
>> index 3a41d224c77a..ae2facdcbdd4 100644
>> --- a/drivers/gpu/nova-core/sbuffer.rs
>> +++ b/drivers/gpu/nova-core/sbuffer.rs
>> @@ -162,11 +162,11 @@ pub(crate) fn read_exact(&mut self, mut dst: &mut [u8]) -> Result {
>> Ok(())
>> }
>>
>> - /// Read all the remaining data into a [`KVec`].
>> + /// Read all the remaining data into a [`KVVec`].
>> ///
>> /// `self` will be empty after this operation.
>> - pub(crate) fn flush_into_kvec(&mut self, flags: kernel::alloc::Flags) -> Result<KVec<u8>> {
>> - let mut buf = KVec::<u8>::new();
>> + pub(crate) fn read_to_vec(&mut self, flags: kernel::alloc::Flags) -> Result<KVVec<u8>> {
>> + let mut buf = KVVec::<u8>::new();
>>
>> if let Some(slice) = core::mem::take(&mut self.cur_slice) {
>> buf.extend_from_slice(slice, flags)?;
>>
>> --
>> 2.53.0
>>
© 2016 - 2026 Red Hat, Inc.