[PATCH v2 3/4] hugetlbfs: improve read HWPOISON hugepage

Jiaqi Yan posted 4 patches 2 years, 7 months ago
There is a newer version of this series
[PATCH v2 3/4] hugetlbfs: improve read HWPOISON hugepage
Posted by Jiaqi Yan 2 years, 7 months ago
When a hugepage contains HWPOISON pages, read() fails to read any byte
of the hugepage and returns -EIO, although many bytes in the HWPOISON
hugepage are readable.

Improve this by allowing hugetlbfs_read_iter returns as many bytes as
possible. For a requested range [offset, offset + len) that contains
HWPOISON page, return [offset, first HWPOISON page addr); the next read
attempt will fail and return -EIO.

Signed-off-by: Jiaqi Yan <jiaqiyan@google.com>
---
 fs/hugetlbfs/inode.c | 58 +++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 52 insertions(+), 6 deletions(-)

diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 90361a922cec..86879ca3ff1e 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -282,6 +282,42 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 }
 #endif
 
+/*
+ * Someone wants to read @bytes from a HWPOISON hugetlb @page from @offset.
+ * Returns the maximum number of bytes one can read without touching the 1st raw
+ * HWPOISON subpage.
+ *
+ * The implementation borrows the iteration logic from copy_page_to_iter*.
+ */
+static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t bytes)
+{
+	size_t n = 0;
+	size_t res = 0;
+	struct folio *folio = page_folio(page);
+
+	/* First subpage to start the loop. */
+	page += offset / PAGE_SIZE;
+	offset %= PAGE_SIZE;
+	while (1) {
+		if (is_raw_hwp_subpage(folio, page))
+			break;
+
+		/* Safe to read n bytes without touching HWPOISON subpage. */
+		n = min(bytes, (size_t)PAGE_SIZE - offset);
+		res += n;
+		bytes -= n;
+		if (!bytes || !n)
+			break;
+		offset += n;
+		if (offset == PAGE_SIZE) {
+			page++;
+			offset = 0;
+		}
+	}
+
+	return res;
+}
+
 /*
  * Support for read() - Find the page attached to f_mapping and copy out the
  * data. This provides functionality similar to filemap_read().
@@ -300,7 +336,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
 
 	while (iov_iter_count(to)) {
 		struct page *page;
-		size_t nr, copied;
+		size_t nr, copied, want;
 
 		/* nr is the maximum number of bytes to copy from this page */
 		nr = huge_page_size(h);
@@ -328,16 +364,26 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
 		} else {
 			unlock_page(page);
 
-			if (PageHWPoison(page)) {
-				put_page(page);
-				retval = -EIO;
-				break;
+			if (!PageHWPoison(page))
+				want = nr;
+			else {
+				/*
+				 * Adjust how many bytes safe to read without
+				 * touching the 1st raw HWPOISON subpage after
+				 * offset.
+				 */
+				want = adjust_range_hwpoison(page, offset, nr);
+				if (want == 0) {
+					put_page(page);
+					retval = -EIO;
+					break;
+				}
 			}
 
 			/*
 			 * We have the page, copy it to user space buffer.
 			 */
-			copied = copy_page_to_iter(page, offset, nr, to);
+			copied = copy_page_to_iter(page, offset, want, to);
 			put_page(page);
 		}
 		offset += copied;
-- 
2.41.0.162.gfafddb0af9-goog
Re: [PATCH v2 3/4] hugetlbfs: improve read HWPOISON hugepage
Posted by Mike Kravetz 2 years, 7 months ago
On 06/23/23 16:40, Jiaqi Yan wrote:
> When a hugepage contains HWPOISON pages, read() fails to read any byte
> of the hugepage and returns -EIO, although many bytes in the HWPOISON
> hugepage are readable.
> 
> Improve this by allowing hugetlbfs_read_iter returns as many bytes as
> possible. For a requested range [offset, offset + len) that contains
> HWPOISON page, return [offset, first HWPOISON page addr); the next read
> attempt will fail and return -EIO.
> 
> Signed-off-by: Jiaqi Yan <jiaqiyan@google.com>
> ---
>  fs/hugetlbfs/inode.c | 58 +++++++++++++++++++++++++++++++++++++++-----
>  1 file changed, 52 insertions(+), 6 deletions(-)

I went through the code and could not find any problems.

Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>

However, code like this is where I often make mistakes.  So, it would be
great if someone else could take a look as well.
-- 
Mike Kravetz

> 
> diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
> index 90361a922cec..86879ca3ff1e 100644
> --- a/fs/hugetlbfs/inode.c
> +++ b/fs/hugetlbfs/inode.c
> @@ -282,6 +282,42 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
>  }
>  #endif
>  
> +/*
> + * Someone wants to read @bytes from a HWPOISON hugetlb @page from @offset.
> + * Returns the maximum number of bytes one can read without touching the 1st raw
> + * HWPOISON subpage.
> + *
> + * The implementation borrows the iteration logic from copy_page_to_iter*.
> + */
> +static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t bytes)
> +{
> +	size_t n = 0;
> +	size_t res = 0;
> +	struct folio *folio = page_folio(page);
> +
> +	/* First subpage to start the loop. */
> +	page += offset / PAGE_SIZE;
> +	offset %= PAGE_SIZE;
> +	while (1) {
> +		if (is_raw_hwp_subpage(folio, page))
> +			break;
> +
> +		/* Safe to read n bytes without touching HWPOISON subpage. */
> +		n = min(bytes, (size_t)PAGE_SIZE - offset);
> +		res += n;
> +		bytes -= n;
> +		if (!bytes || !n)
> +			break;
> +		offset += n;
> +		if (offset == PAGE_SIZE) {
> +			page++;
> +			offset = 0;
> +		}
> +	}
> +
> +	return res;
> +}
> +
>  /*
>   * Support for read() - Find the page attached to f_mapping and copy out the
>   * data. This provides functionality similar to filemap_read().
> @@ -300,7 +336,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
>  
>  	while (iov_iter_count(to)) {
>  		struct page *page;
> -		size_t nr, copied;
> +		size_t nr, copied, want;
>  
>  		/* nr is the maximum number of bytes to copy from this page */
>  		nr = huge_page_size(h);
> @@ -328,16 +364,26 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
>  		} else {
>  			unlock_page(page);
>  
> -			if (PageHWPoison(page)) {
> -				put_page(page);
> -				retval = -EIO;
> -				break;
> +			if (!PageHWPoison(page))
> +				want = nr;
> +			else {
> +				/*
> +				 * Adjust how many bytes safe to read without
> +				 * touching the 1st raw HWPOISON subpage after
> +				 * offset.
> +				 */
> +				want = adjust_range_hwpoison(page, offset, nr);
> +				if (want == 0) {
> +					put_page(page);
> +					retval = -EIO;
> +					break;
> +				}
>  			}
>  
>  			/*
>  			 * We have the page, copy it to user space buffer.
>  			 */
> -			copied = copy_page_to_iter(page, offset, nr, to);
> +			copied = copy_page_to_iter(page, offset, want, to);
>  			put_page(page);
>  		}
>  		offset += copied;
> -- 
> 2.41.0.162.gfafddb0af9-goog
>
Re: [PATCH v2 3/4] hugetlbfs: improve read HWPOISON hugepage
Posted by Naoya Horiguchi 2 years, 7 months ago
On Fri, Jun 23, 2023 at 04:40:14PM +0000, Jiaqi Yan wrote:
> When a hugepage contains HWPOISON pages, read() fails to read any byte
> of the hugepage and returns -EIO, although many bytes in the HWPOISON
> hugepage are readable.
> 
> Improve this by allowing hugetlbfs_read_iter returns as many bytes as
> possible. For a requested range [offset, offset + len) that contains
> HWPOISON page, return [offset, first HWPOISON page addr); the next read
> attempt will fail and return -EIO.
> 
> Signed-off-by: Jiaqi Yan <jiaqiyan@google.com>

Looks good to me.

Reviewed-by: Naoya Horiguchi <naoya.horiguchi@nec.com>