From nobody Sun Nov 24 11:25:10 2024 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id CDF771DF75B; Wed, 6 Nov 2024 11:50:51 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730893851; cv=none; b=kXWe7BH/QHXXD3epZ538/Z7ME3nwJpG4vJLq+K3EXiFft2VF0OjXq/rKLe13rMZKFG4Cv2Umcy40ozVnsR+E/s6kad5yAibm6nf85qhjkDcuyq77zy4FIntXhsuTbf2FY8Z6voBoS76uKHAEapkk5Nuw8o7Jn6VeCsz7CYfdTVQ= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730893851; c=relaxed/simple; bh=OfS/TnI6Cia3SA8mJAAflggJ3zWnirUzgTzpPr2YPFY=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=EQ/YyZ9lpHQLfaXxJR31xS5MCaJv7JCXfMO5Gw6UQEY9OZ3mTp9pkLhbKQAIx9f7/tEuMoWGVohxJPT6NjvsOL6ypHvVwtILbVFnCKQ/Kga8qCbPrvYg8TLNDFRxA6WsGOSVys/BZEfDO9tYWjco2Y3tbTuULuA93GB+5YYsdHg= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=gDiy8BV0; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="gDiy8BV0" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 6E630C4CED2; Wed, 6 Nov 2024 11:50:50 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730893851; bh=OfS/TnI6Cia3SA8mJAAflggJ3zWnirUzgTzpPr2YPFY=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=gDiy8BV02/+y761IKR8VDAZF3MnyGJb2EcYm2CtpyCa4dOOmksBKfKv1hM/GrwGhi 9+q6EEvBeMZP+wdJjrzQ0hYUikVWhmXh3pnElAMdbAFsYwsBREdehxHHQKpU+Urx51 HafC/ubD27QUgA02agsLfZ3C0mLsXR9Bl/53Hbu3cVduNCy4nSuglZPoUYqUp3Uelk wTSHPfMO6VpgeTi/aP1ar/r2Xq0D1shqNb5roF0y7rDuNi/97KCkl1wAQx0gNOi1H0 A0borxuHm+4oF7TTNTRfpyy5vKmMmBeskgbBDhFoG0Qm/JDii/qLewFAoZTfzwp62a W7RSNqZGB90lA== From: Leon Romanovsky To: Jens Axboe , Jason Gunthorpe , Robin Murphy , Joerg Roedel , Will Deacon , Christoph Hellwig , Sagi Grimberg Cc: Leon Romanovsky , Keith Busch , Bjorn Helgaas , Logan Gunthorpe , Yishai Hadas , Shameer Kolothum , Kevin Tian , Alex Williamson , Marek Szyprowski , =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Andrew Morton , Jonathan Corbet , linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org, linux-block@vger.kernel.org, linux-rdma@vger.kernel.org, iommu@lists.linux.dev, linux-nvme@lists.infradead.org, linux-pci@vger.kernel.org, kvm@vger.kernel.org, linux-mm@kvack.org, Randy Dunlap Subject: [PATCH v2 15/17] vfio/mlx5: Explicitly use number of pages instead of allocated length Date: Wed, 6 Nov 2024 15:49:43 +0200 Message-ID: X-Mailer: git-send-email 2.47.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Leon Romanovsky allocated_length is a multiple of page size and number of pages, so let's change the functions to accept number of pages. It opens us a venue to combine receive and send paths together with code readability improvement. Signed-off-by: Leon Romanovsky --- drivers/vfio/pci/mlx5/cmd.c | 32 ++++++++++----------- drivers/vfio/pci/mlx5/cmd.h | 10 +++---- drivers/vfio/pci/mlx5/main.c | 56 +++++++++++++++++++++++------------- 3 files changed, 57 insertions(+), 41 deletions(-) diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c index 41a4b0cf4297..fdc3e515741f 100644 --- a/drivers/vfio/pci/mlx5/cmd.c +++ b/drivers/vfio/pci/mlx5/cmd.c @@ -318,8 +318,7 @@ static int _create_mkey(struct mlx5_core_dev *mdev, u32= pdn, struct mlx5_vhca_recv_buf *recv_buf, u32 *mkey) { - size_t npages =3D buf ? DIV_ROUND_UP(buf->allocated_length, PAGE_SIZE) : - recv_buf->npages; + size_t npages =3D buf ? buf->npages : recv_buf->npages; int err =3D 0, inlen; __be64 *mtt; void *mkc; @@ -375,7 +374,7 @@ static int mlx5vf_dma_data_buffer(struct mlx5_vhca_data= _buffer *buf) if (mvdev->mdev_detach) return -ENOTCONN; =20 - if (buf->dmaed || !buf->allocated_length) + if (buf->dmaed || !buf->npages) return -EINVAL; =20 ret =3D dma_map_sgtable(mdev->device, &buf->table.sgt, buf->dma_dir, 0); @@ -444,7 +443,7 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_= data_buffer *buf, =20 if (ret) goto err; - buf->allocated_length +=3D filled * PAGE_SIZE; + buf->npages +=3D filled; /* clean input for another bulk allocation */ memset(page_list, 0, filled * sizeof(*page_list)); to_fill =3D min_t(unsigned int, to_alloc, @@ -460,8 +459,7 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_= data_buffer *buf, } =20 struct mlx5_vhca_data_buffer * -mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf, - size_t length, +mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf, u32 npages, enum dma_data_direction dma_dir) { struct mlx5_vhca_data_buffer *buf; @@ -473,9 +471,8 @@ mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file = *migf, =20 buf->dma_dir =3D dma_dir; buf->migf =3D migf; - if (length) { - ret =3D mlx5vf_add_migration_pages(buf, - DIV_ROUND_UP_ULL(length, PAGE_SIZE)); + if (npages) { + ret =3D mlx5vf_add_migration_pages(buf, npages); if (ret) goto end; =20 @@ -501,8 +498,8 @@ void mlx5vf_put_data_buffer(struct mlx5_vhca_data_buffe= r *buf) } =20 struct mlx5_vhca_data_buffer * -mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf, - size_t length, enum dma_data_direction dma_dir) +mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf, u32 npages, + enum dma_data_direction dma_dir) { struct mlx5_vhca_data_buffer *buf, *temp_buf; struct list_head free_list; @@ -517,7 +514,7 @@ mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *m= igf, list_for_each_entry_safe(buf, temp_buf, &migf->avail_list, buf_elm) { if (buf->dma_dir =3D=3D dma_dir) { list_del_init(&buf->buf_elm); - if (buf->allocated_length >=3D length) { + if (buf->npages >=3D npages) { spin_unlock_irq(&migf->list_lock); goto found; } @@ -531,7 +528,7 @@ mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *m= igf, } } spin_unlock_irq(&migf->list_lock); - buf =3D mlx5vf_alloc_data_buffer(migf, length, dma_dir); + buf =3D mlx5vf_alloc_data_buffer(migf, npages, dma_dir); =20 found: while ((temp_buf =3D list_first_entry_or_null(&free_list, @@ -712,7 +709,7 @@ int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_d= evice *mvdev, MLX5_SET(save_vhca_state_in, in, op_mod, 0); MLX5_SET(save_vhca_state_in, in, vhca_id, mvdev->vhca_id); MLX5_SET(save_vhca_state_in, in, mkey, buf->mkey); - MLX5_SET(save_vhca_state_in, in, size, buf->allocated_length); + MLX5_SET(save_vhca_state_in, in, size, buf->npages * PAGE_SIZE); MLX5_SET(save_vhca_state_in, in, incremental, inc); MLX5_SET(save_vhca_state_in, in, set_track, track); =20 @@ -734,8 +731,11 @@ int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_= device *mvdev, } =20 if (!header_buf) { - header_buf =3D mlx5vf_get_data_buffer(migf, - sizeof(struct mlx5_vf_migration_header), DMA_NONE); + header_buf =3D mlx5vf_get_data_buffer( + migf, + DIV_ROUND_UP(sizeof(struct mlx5_vf_migration_header), + PAGE_SIZE), + DMA_NONE); if (IS_ERR(header_buf)) { err =3D PTR_ERR(header_buf); goto err_free; diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h index df421dc6de04..7d4a833b6900 100644 --- a/drivers/vfio/pci/mlx5/cmd.h +++ b/drivers/vfio/pci/mlx5/cmd.h @@ -56,7 +56,7 @@ struct mlx5_vhca_data_buffer { struct sg_append_table table; loff_t start_pos; u64 length; - u64 allocated_length; + u32 npages; u32 mkey; enum dma_data_direction dma_dir; u8 dmaed:1; @@ -217,12 +217,12 @@ int mlx5vf_cmd_alloc_pd(struct mlx5_vf_migration_file= *migf); void mlx5vf_cmd_dealloc_pd(struct mlx5_vf_migration_file *migf); void mlx5fv_cmd_clean_migf_resources(struct mlx5_vf_migration_file *migf); struct mlx5_vhca_data_buffer * -mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf, - size_t length, enum dma_data_direction dma_dir); +mlx5vf_alloc_data_buffer(struct mlx5_vf_migration_file *migf, u32 npages, + enum dma_data_direction dma_dir); void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf); struct mlx5_vhca_data_buffer * -mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf, - size_t length, enum dma_data_direction dma_dir); +mlx5vf_get_data_buffer(struct mlx5_vf_migration_file *migf, u32 npages, + enum dma_data_direction dma_dir); void mlx5vf_put_data_buffer(struct mlx5_vhca_data_buffer *buf); struct page *mlx5vf_get_migration_page(struct mlx5_vhca_data_buffer *buf, unsigned long offset); diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c index 242c23eef452..a1dbee3be1e0 100644 --- a/drivers/vfio/pci/mlx5/main.c +++ b/drivers/vfio/pci/mlx5/main.c @@ -308,6 +308,7 @@ static struct mlx5_vhca_data_buffer * mlx5vf_mig_file_get_stop_copy_buf(struct mlx5_vf_migration_file *migf, u8 index, size_t required_length) { + u32 npages =3D DIV_ROUND_UP(required_length, PAGE_SIZE); struct mlx5_vhca_data_buffer *buf =3D migf->buf[index]; u8 chunk_num; =20 @@ -315,12 +316,11 @@ mlx5vf_mig_file_get_stop_copy_buf(struct mlx5_vf_migr= ation_file *migf, chunk_num =3D buf->stop_copy_chunk_num; buf->migf->buf[index] =3D NULL; /* Checking whether the pre-allocated buffer can fit */ - if (buf->allocated_length >=3D required_length) + if (buf->npages >=3D npages) return buf; =20 mlx5vf_put_data_buffer(buf); - buf =3D mlx5vf_get_data_buffer(buf->migf, required_length, - DMA_FROM_DEVICE); + buf =3D mlx5vf_get_data_buffer(buf->migf, npages, DMA_FROM_DEVICE); if (IS_ERR(buf)) return buf; =20 @@ -373,7 +373,8 @@ static int mlx5vf_add_stop_copy_header(struct mlx5_vf_m= igration_file *migf, u8 *to_buff; int ret; =20 - header_buf =3D mlx5vf_get_data_buffer(migf, size, DMA_NONE); + header_buf =3D mlx5vf_get_data_buffer(migf, DIV_ROUND_UP(size, PAGE_SIZE), + DMA_NONE); if (IS_ERR(header_buf)) return PTR_ERR(header_buf); =20 @@ -388,7 +389,7 @@ static int mlx5vf_add_stop_copy_header(struct mlx5_vf_m= igration_file *migf, to_buff =3D kmap_local_page(page); memcpy(to_buff, &header, sizeof(header)); header_buf->length =3D sizeof(header); - data.stop_copy_size =3D cpu_to_le64(migf->buf[0]->allocated_length); + data.stop_copy_size =3D cpu_to_le64(migf->buf[0]->npages * PAGE_SIZE); memcpy(to_buff + sizeof(header), &data, sizeof(data)); header_buf->length +=3D sizeof(data); kunmap_local(to_buff); @@ -437,15 +438,20 @@ static int mlx5vf_prep_stop_copy(struct mlx5vf_pci_co= re_device *mvdev, =20 num_chunks =3D mvdev->chunk_mode ? MAX_NUM_CHUNKS : 1; for (i =3D 0; i < num_chunks; i++) { - buf =3D mlx5vf_get_data_buffer(migf, inc_state_size, DMA_FROM_DEVICE); + buf =3D mlx5vf_get_data_buffer( + migf, DIV_ROUND_UP(inc_state_size, PAGE_SIZE), + DMA_FROM_DEVICE); if (IS_ERR(buf)) { ret =3D PTR_ERR(buf); goto err; } =20 migf->buf[i] =3D buf; - buf =3D mlx5vf_get_data_buffer(migf, - sizeof(struct mlx5_vf_migration_header), DMA_NONE); + buf =3D mlx5vf_get_data_buffer( + migf, + DIV_ROUND_UP(sizeof(struct mlx5_vf_migration_header), + PAGE_SIZE), + DMA_NONE); if (IS_ERR(buf)) { ret =3D PTR_ERR(buf); goto err; @@ -553,7 +559,8 @@ static long mlx5vf_precopy_ioctl(struct file *filp, uns= igned int cmd, * We finished transferring the current state and the device has a * dirty state, save a new state to be ready for. */ - buf =3D mlx5vf_get_data_buffer(migf, inc_length, DMA_FROM_DEVICE); + buf =3D mlx5vf_get_data_buffer(migf, DIV_ROUND_UP(inc_length, PAGE_SIZE), + DMA_FROM_DEVICE); if (IS_ERR(buf)) { ret =3D PTR_ERR(buf); mlx5vf_mark_err(migf); @@ -673,8 +680,8 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_devi= ce *mvdev, bool track) =20 if (track) { /* leave the allocated buffer ready for the stop-copy phase */ - buf =3D mlx5vf_alloc_data_buffer(migf, - migf->buf[0]->allocated_length, DMA_FROM_DEVICE); + buf =3D mlx5vf_alloc_data_buffer(migf, migf->buf[0]->npages, + DMA_FROM_DEVICE); if (IS_ERR(buf)) { ret =3D PTR_ERR(buf); goto out_pd; @@ -917,11 +924,14 @@ static ssize_t mlx5vf_resume_write(struct file *filp,= const char __user *buf, goto out_unlock; break; case MLX5_VF_LOAD_STATE_PREP_HEADER_DATA: - if (vhca_buf_header->allocated_length < migf->record_size) { + { + u32 npages =3D DIV_ROUND_UP(migf->record_size, PAGE_SIZE); + + if (vhca_buf_header->npages < npages) { mlx5vf_free_data_buffer(vhca_buf_header); =20 - migf->buf_header[0] =3D mlx5vf_alloc_data_buffer(migf, - migf->record_size, DMA_NONE); + migf->buf_header[0] =3D mlx5vf_alloc_data_buffer( + migf, npages, DMA_NONE); if (IS_ERR(migf->buf_header[0])) { ret =3D PTR_ERR(migf->buf_header[0]); migf->buf_header[0] =3D NULL; @@ -934,6 +944,7 @@ static ssize_t mlx5vf_resume_write(struct file *filp, c= onst char __user *buf, vhca_buf_header->start_pos =3D migf->max_pos; migf->load_state =3D MLX5_VF_LOAD_STATE_READ_HEADER_DATA; break; + } case MLX5_VF_LOAD_STATE_READ_HEADER_DATA: ret =3D mlx5vf_resume_read_header_data(migf, vhca_buf_header, &buf, &len, pos, &done); @@ -944,12 +955,13 @@ static ssize_t mlx5vf_resume_write(struct file *filp,= const char __user *buf, { u64 size =3D max(migf->record_size, migf->stop_copy_prep_size); + u32 npages =3D DIV_ROUND_UP(size, PAGE_SIZE); =20 - if (vhca_buf->allocated_length < size) { + if (vhca_buf->npages < npages) { mlx5vf_free_data_buffer(vhca_buf); =20 - migf->buf[0] =3D mlx5vf_alloc_data_buffer(migf, - size, DMA_TO_DEVICE); + migf->buf[0] =3D mlx5vf_alloc_data_buffer( + migf, npages, DMA_TO_DEVICE); if (IS_ERR(migf->buf[0])) { ret =3D PTR_ERR(migf->buf[0]); migf->buf[0] =3D NULL; @@ -1031,8 +1043,11 @@ mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core= _device *mvdev) } =20 migf->buf[0] =3D buf; - buf =3D mlx5vf_alloc_data_buffer(migf, - sizeof(struct mlx5_vf_migration_header), DMA_NONE); + buf =3D mlx5vf_alloc_data_buffer( + migf, + DIV_ROUND_UP(sizeof(struct mlx5_vf_migration_header), + PAGE_SIZE), + DMA_NONE); if (IS_ERR(buf)) { ret =3D PTR_ERR(buf); goto out_buf; @@ -1149,7 +1164,8 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci= _core_device *mvdev, MLX5VF_QUERY_INC | MLX5VF_QUERY_CLEANUP); if (ret) return ERR_PTR(ret); - buf =3D mlx5vf_get_data_buffer(migf, size, DMA_FROM_DEVICE); + buf =3D mlx5vf_get_data_buffer(migf, + DIV_ROUND_UP(size, PAGE_SIZE), DMA_FROM_DEVICE); if (IS_ERR(buf)) return ERR_CAST(buf); /* pre_copy cleanup */ --=20 2.47.0