As the subject says.
Signed-off-by: SeongJae Park <sj@kernel.org>
---
mm/memcontrol.c | 2 +-
mm/zswap.c | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 69c970554e85..74b1bc2252b6 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5421,7 +5421,7 @@ bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
* @size: size of compressed object
*
* This forces the charge after obj_cgroup_may_zswap() allowed
- * compression and storage in zwap for this cgroup to go ahead.
+ * compression and storage in zswap for this cgroup to go ahead.
*/
void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
{
diff --git a/mm/zswap.c b/mm/zswap.c
index 80619c8589a7..f6b1c8832a4f 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -879,7 +879,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
* acomp instance, then get those requests done simultaneously. but in this
* case, zswap actually does store and load page by page, there is no
* existing method to send the second page before the first page is done
- * in one thread doing zwap.
+ * in one thread doing zswap.
* but in different threads running on different cpu, we have different
* acomp instance, so multiple threads can do (de)compression in parallel.
*/
@@ -1128,7 +1128,7 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
*
* 1. We extract the swp_entry_t to the stack, allowing
* zswap_writeback_entry() to pin the swap entry and
- * then validate the zwap entry against that swap entry's
+ * then validate the zswap entry against that swap entry's
* tree using pointer value comparison. Only when that
* is successful can the entry be dereferenced.
*
--
2.39.5
On 2025/10/4 04:38, SeongJae Park wrote:
> As the subject says.
>
> Signed-off-by: SeongJae Park <sj@kernel.org>
Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev>
> ---
> mm/memcontrol.c | 2 +-
> mm/zswap.c | 4 ++--
> 2 files changed, 3 insertions(+), 3 deletions(-)
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 69c970554e85..74b1bc2252b6 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -5421,7 +5421,7 @@ bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
> * @size: size of compressed object
> *
> * This forces the charge after obj_cgroup_may_zswap() allowed
> - * compression and storage in zwap for this cgroup to go ahead.
> + * compression and storage in zswap for this cgroup to go ahead.
> */
> void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
> {
> diff --git a/mm/zswap.c b/mm/zswap.c
> index 80619c8589a7..f6b1c8832a4f 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -879,7 +879,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
> * acomp instance, then get those requests done simultaneously. but in this
> * case, zswap actually does store and load page by page, there is no
> * existing method to send the second page before the first page is done
> - * in one thread doing zwap.
> + * in one thread doing zswap.
> * but in different threads running on different cpu, we have different
> * acomp instance, so multiple threads can do (de)compression in parallel.
> */
> @@ -1128,7 +1128,7 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
> *
> * 1. We extract the swp_entry_t to the stack, allowing
> * zswap_writeback_entry() to pin the swap entry and
> - * then validate the zwap entry against that swap entry's
> + * then validate the zswap entry against that swap entry's
> * tree using pointer value comparison. Only when that
> * is successful can the entry be dereferenced.
> *
On Fri, Oct 3, 2025 at 1:38 PM SeongJae Park <sj@kernel.org> wrote: > > As the subject says. > > Signed-off-by: SeongJae Park <sj@kernel.org> Acked-by: Nhat Pham <nphamcs@gmail.com>
On Fri, Oct 03, 2025 at 01:38:49PM -0700, SeongJae Park wrote:
> As the subject says.
>
> Signed-off-by: SeongJae Park <sj@kernel.org>
Acked-by: Yosry Ahmed <yosry.ahmed@linux.dev>
> ---
> mm/memcontrol.c | 2 +-
> mm/zswap.c | 4 ++--
> 2 files changed, 3 insertions(+), 3 deletions(-)
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 69c970554e85..74b1bc2252b6 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -5421,7 +5421,7 @@ bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
> * @size: size of compressed object
> *
> * This forces the charge after obj_cgroup_may_zswap() allowed
> - * compression and storage in zwap for this cgroup to go ahead.
> + * compression and storage in zswap for this cgroup to go ahead.
> */
> void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
> {
> diff --git a/mm/zswap.c b/mm/zswap.c
> index 80619c8589a7..f6b1c8832a4f 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -879,7 +879,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
> * acomp instance, then get those requests done simultaneously. but in this
> * case, zswap actually does store and load page by page, there is no
> * existing method to send the second page before the first page is done
> - * in one thread doing zwap.
> + * in one thread doing zswap.
> * but in different threads running on different cpu, we have different
> * acomp instance, so multiple threads can do (de)compression in parallel.
> */
> @@ -1128,7 +1128,7 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
> *
> * 1. We extract the swp_entry_t to the stack, allowing
> * zswap_writeback_entry() to pin the swap entry and
> - * then validate the zwap entry against that swap entry's
> + * then validate the zswap entry against that swap entry's
> * tree using pointer value comparison. Only when that
> * is successful can the entry be dereferenced.
> *
> --
> 2.39.5
© 2016 - 2026 Red Hat, Inc.