From: Jinliang Zheng <alexjlzheng@tencent.com>
In the next patch, we allow iomap_write_end() to conditionally accept
partial writes, so this patch makes iomap_write_end() return the number
of accepted write bytes in preparation for the next patch.
Signed-off-by: Jinliang Zheng <alexjlzheng@tencent.com>
---
fs/iomap/buffered-io.c | 27 +++++++++++++--------------
1 file changed, 13 insertions(+), 14 deletions(-)
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index e130db3b761e..6e516c7d9f04 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -873,7 +873,7 @@ static int iomap_write_begin(struct iomap_iter *iter,
return status;
}
-static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
+static int __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
size_t copied, struct folio *folio)
{
flush_dcache_folio(folio);
@@ -890,11 +890,11 @@ static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
* redo the whole thing.
*/
if (unlikely(copied < len && !folio_test_uptodate(folio)))
- return false;
+ return 0;
iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
filemap_dirty_folio(inode->i_mapping, folio);
- return true;
+ return copied;
}
static void iomap_write_end_inline(const struct iomap_iter *iter,
@@ -915,10 +915,10 @@ static void iomap_write_end_inline(const struct iomap_iter *iter,
}
/*
- * Returns true if all copied bytes have been written to the pagecache,
- * otherwise return false.
+ * Returns number of copied bytes have been written to the pagecache,
+ * zero if block is partial update.
*/
-static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied,
+static int iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied,
struct folio *folio)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
@@ -926,7 +926,7 @@ static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied,
if (srcmap->type == IOMAP_INLINE) {
iomap_write_end_inline(iter, folio, pos, copied);
- return true;
+ return copied;
}
if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
@@ -934,7 +934,7 @@ static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied,
bh_written = block_write_end(pos, len, copied, folio);
WARN_ON_ONCE(bh_written != copied && bh_written != 0);
- return bh_written == copied;
+ return bh_written;
}
return __iomap_write_end(iter->inode, pos, len, copied, folio);
@@ -1000,8 +1000,7 @@ static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i,
flush_dcache_folio(folio);
copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
- written = iomap_write_end(iter, bytes, copied, folio) ?
- copied : 0;
+ written = iomap_write_end(iter, bytes, copied, folio);
/*
* Update the in-memory inode size after copying the data into
@@ -1315,7 +1314,7 @@ static int iomap_unshare_iter(struct iomap_iter *iter,
do {
struct folio *folio;
size_t offset;
- bool ret;
+ int ret;
bytes = min_t(u64, SIZE_MAX, bytes);
status = iomap_write_begin(iter, write_ops, &folio, &offset,
@@ -1327,7 +1326,7 @@ static int iomap_unshare_iter(struct iomap_iter *iter,
ret = iomap_write_end(iter, bytes, bytes, folio);
__iomap_put_folio(iter, write_ops, bytes, folio);
- if (WARN_ON_ONCE(!ret))
+ if (WARN_ON_ONCE(ret != bytes))
return -EIO;
cond_resched();
@@ -1388,7 +1387,7 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
do {
struct folio *folio;
size_t offset;
- bool ret;
+ int ret;
bytes = min_t(u64, SIZE_MAX, bytes);
status = iomap_write_begin(iter, write_ops, &folio, &offset,
@@ -1406,7 +1405,7 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
ret = iomap_write_end(iter, bytes, bytes, folio);
__iomap_put_folio(iter, write_ops, bytes, folio);
- if (WARN_ON_ONCE(!ret))
+ if (WARN_ON_ONCE(ret != bytes))
return -EIO;
status = iomap_iter_advance(iter, &bytes);
--
2.49.0
On Tue, Sep 23, 2025 at 12:21:57PM +0800, alexjlzheng@gmail.com wrote:
> From: Jinliang Zheng <alexjlzheng@tencent.com>
>
> In the next patch, we allow iomap_write_end() to conditionally accept
> partial writes, so this patch makes iomap_write_end() return the number
> of accepted write bytes in preparation for the next patch.
>
> Signed-off-by: Jinliang Zheng <alexjlzheng@tencent.com>
> ---
> fs/iomap/buffered-io.c | 27 +++++++++++++--------------
> 1 file changed, 13 insertions(+), 14 deletions(-)
>
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index e130db3b761e..6e516c7d9f04 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
...
> @@ -915,10 +915,10 @@ static void iomap_write_end_inline(const struct iomap_iter *iter,
> }
>
> /*
> - * Returns true if all copied bytes have been written to the pagecache,
> - * otherwise return false.
> + * Returns number of copied bytes have been written to the pagecache,
> + * zero if block is partial update.
> */
> -static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied,
> +static int iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied,
> struct folio *folio)
> {
> const struct iomap *srcmap = iomap_iter_srcmap(iter);
> @@ -926,7 +926,7 @@ static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied,
>
> if (srcmap->type == IOMAP_INLINE) {
> iomap_write_end_inline(iter, folio, pos, copied);
> - return true;
> + return copied;
> }
>
> if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
> @@ -934,7 +934,7 @@ static bool iomap_write_end(struct iomap_iter *iter, size_t len, size_t copied,
>
> bh_written = block_write_end(pos, len, copied, folio);
> WARN_ON_ONCE(bh_written != copied && bh_written != 0);
> - return bh_written == copied;
> + return bh_written;
I notice block_write_end() actually returns an int. Not sure it's an
issue really, but perhaps we should just change the type of bh_written
here as well. Otherwise seems reasonable.
Brian
> }
>
> return __iomap_write_end(iter->inode, pos, len, copied, folio);
> @@ -1000,8 +1000,7 @@ static int iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i,
> flush_dcache_folio(folio);
>
> copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
> - written = iomap_write_end(iter, bytes, copied, folio) ?
> - copied : 0;
> + written = iomap_write_end(iter, bytes, copied, folio);
>
> /*
> * Update the in-memory inode size after copying the data into
> @@ -1315,7 +1314,7 @@ static int iomap_unshare_iter(struct iomap_iter *iter,
> do {
> struct folio *folio;
> size_t offset;
> - bool ret;
> + int ret;
>
> bytes = min_t(u64, SIZE_MAX, bytes);
> status = iomap_write_begin(iter, write_ops, &folio, &offset,
> @@ -1327,7 +1326,7 @@ static int iomap_unshare_iter(struct iomap_iter *iter,
>
> ret = iomap_write_end(iter, bytes, bytes, folio);
> __iomap_put_folio(iter, write_ops, bytes, folio);
> - if (WARN_ON_ONCE(!ret))
> + if (WARN_ON_ONCE(ret != bytes))
> return -EIO;
>
> cond_resched();
> @@ -1388,7 +1387,7 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
> do {
> struct folio *folio;
> size_t offset;
> - bool ret;
> + int ret;
>
> bytes = min_t(u64, SIZE_MAX, bytes);
> status = iomap_write_begin(iter, write_ops, &folio, &offset,
> @@ -1406,7 +1405,7 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
>
> ret = iomap_write_end(iter, bytes, bytes, folio);
> __iomap_put_folio(iter, write_ops, bytes, folio);
> - if (WARN_ON_ONCE(!ret))
> + if (WARN_ON_ONCE(ret != bytes))
> return -EIO;
>
> status = iomap_iter_advance(iter, &bytes);
> --
> 2.49.0
>
>
© 2016 - 2026 Red Hat, Inc.