Currently there is one 'struct page_frag' for every 'struct
sock' and 'struct task_struct', we are about to replace the
'struct page_frag' with 'struct page_frag_cache' for them.
Before begin the replacing, we need to ensure the size of
'struct page_frag_cache' is not bigger than the size of
'struct page_frag', as there may be tens of thousands of
'struct sock' and 'struct task_struct' instances in the
system.
By or'ing the page order & pfmemalloc with lower bits of
'va' instead of using 'u16' or 'u32' for page size and 'u8'
for pfmemalloc, we are able to avoid 3 or 5 bytes space waste.
And page address & pfmemalloc & order is unchanged for the
same page in the same 'page_frag_cache' instance, it makes
sense to fit them together.
After this patch, the size of 'struct page_frag_cache' should be
the same as the size of 'struct page_frag'.
CC: Alexander Duyck <alexander.duyck@gmail.com>
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
---
include/linux/mm_types_task.h | 19 ++++++-----
include/linux/page_frag_cache.h | 60 +++++++++++++++++++++++++++++++--
mm/page_frag_cache.c | 51 +++++++++++++++-------------
3 files changed, 97 insertions(+), 33 deletions(-)
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index cdc1e3696439..a8635460e027 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -50,18 +50,21 @@ struct page_frag {
#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
struct page_frag_cache {
- void *va;
-#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+ /* encoded_page consists of the virtual address, pfmemalloc bit and order
+ * of a page.
+ */
+ unsigned long encoded_page;
+
+ /* we maintain a pagecount bias, so that we dont dirty cache line
+ * containing page->_refcount every time we allocate a fragment.
+ */
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
__u16 offset;
- __u16 size;
+ __u16 pagecnt_bias;
#else
__u32 offset;
+ __u32 pagecnt_bias;
#endif
- /* we maintain a pagecount bias, so that we dont dirty cache line
- * containing page->_refcount every time we allocate a fragment.
- */
- unsigned int pagecnt_bias;
- bool pfmemalloc;
};
/* Track pages that require TLB flushes */
diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
index 0a52f7a179c8..372d6ed7e20a 100644
--- a/include/linux/page_frag_cache.h
+++ b/include/linux/page_frag_cache.h
@@ -3,18 +3,74 @@
#ifndef _LINUX_PAGE_FRAG_CACHE_H
#define _LINUX_PAGE_FRAG_CACHE_H
+#include <linux/bits.h>
+#include <linux/build_bug.h>
#include <linux/log2.h>
+#include <linux/mm.h>
#include <linux/mm_types_task.h>
#include <linux/types.h>
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+/* Use a full byte here to enable assembler optimization as the shift
+ * operation is usually expecting a byte.
+ */
+#define PAGE_FRAG_CACHE_ORDER_MASK GENMASK(7, 0)
+#define PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT 8
+#define PAGE_FRAG_CACHE_PFMEMALLOC_BIT BIT(PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT)
+#else
+/* Compiler should be able to figure out we don't read things as any value
+ * ANDed with 0 is 0.
+ */
+#define PAGE_FRAG_CACHE_ORDER_MASK 0
+#define PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT 0
+#define PAGE_FRAG_CACHE_PFMEMALLOC_BIT BIT(PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT)
+#endif
+
+static inline unsigned long page_frag_encode_page(struct page *page,
+ unsigned int order,
+ bool pfmemalloc)
+{
+ BUILD_BUG_ON(PAGE_FRAG_CACHE_MAX_ORDER > PAGE_FRAG_CACHE_ORDER_MASK);
+ BUILD_BUG_ON(PAGE_FRAG_CACHE_PFMEMALLOC_BIT >= PAGE_SIZE);
+
+ return (unsigned long)page_address(page) |
+ (order & PAGE_FRAG_CACHE_ORDER_MASK) |
+ ((unsigned long)pfmemalloc << PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT);
+}
+
+static inline unsigned long page_frag_encoded_page_order(unsigned long encoded_page)
+{
+ return encoded_page & PAGE_FRAG_CACHE_ORDER_MASK;
+}
+
+static inline bool page_frag_encoded_page_pfmemalloc(unsigned long encoded_page)
+{
+ return !!(encoded_page & PAGE_FRAG_CACHE_PFMEMALLOC_BIT);
+}
+
+static inline void *page_frag_encoded_page_address(unsigned long encoded_page)
+{
+ return (void *)(encoded_page & PAGE_MASK);
+}
+
+static inline struct page *page_frag_encoded_page_ptr(unsigned long encoded_page)
+{
+ return virt_to_page((void *)encoded_page);
+}
+
static inline void page_frag_cache_init(struct page_frag_cache *nc)
{
- nc->va = NULL;
+ nc->encoded_page = 0;
}
static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc)
{
- return !!nc->pfmemalloc;
+ return page_frag_encoded_page_pfmemalloc(nc->encoded_page);
+}
+
+static inline unsigned int page_frag_cache_page_size(unsigned long encoded_page)
+{
+ return PAGE_SIZE << page_frag_encoded_page_order(encoded_page);
}
void page_frag_cache_drain(struct page_frag_cache *nc);
diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c
index 4c8e04379cb3..228cff9a4cdb 100644
--- a/mm/page_frag_cache.c
+++ b/mm/page_frag_cache.c
@@ -15,13 +15,13 @@
#include <linux/export.h>
#include <linux/gfp_types.h>
#include <linux/init.h>
-#include <linux/mm.h>
#include <linux/page_frag_cache.h>
#include "internal.h"
static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
gfp_t gfp_mask)
{
+ unsigned long order = PAGE_FRAG_CACHE_MAX_ORDER;
struct page *page = NULL;
gfp_t gfp = gfp_mask;
@@ -30,23 +30,31 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
__GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
PAGE_FRAG_CACHE_MAX_ORDER);
- nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
#endif
- if (unlikely(!page))
+ if (unlikely(!page)) {
page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
+ if (unlikely(!page)) {
+ nc->encoded_page = 0;
+ return NULL;
+ }
- nc->va = page ? page_address(page) : NULL;
+ order = 0;
+ }
+
+ nc->encoded_page = page_frag_encode_page(page, order,
+ page_is_pfmemalloc(page));
return page;
}
void page_frag_cache_drain(struct page_frag_cache *nc)
{
- if (!nc->va)
+ if (!nc->encoded_page)
return;
- __page_frag_cache_drain(virt_to_head_page(nc->va), nc->pagecnt_bias);
- nc->va = NULL;
+ __page_frag_cache_drain(page_frag_encoded_page_ptr(nc->encoded_page),
+ nc->pagecnt_bias);
+ nc->encoded_page = 0;
}
EXPORT_SYMBOL(page_frag_cache_drain);
@@ -63,31 +71,27 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask,
unsigned int align_mask)
{
-#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
- unsigned int size = nc->size;
-#else
- unsigned int size = PAGE_SIZE;
-#endif
- unsigned int offset;
+ unsigned long encoded_page = nc->encoded_page;
+ unsigned int size, offset;
struct page *page;
- if (unlikely(!nc->va)) {
+ size = page_frag_cache_page_size(encoded_page);
+
+ if (unlikely(!encoded_page)) {
refill:
page = __page_frag_cache_refill(nc, gfp_mask);
if (!page)
return NULL;
-#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
- /* if size can vary use size else just use PAGE_SIZE */
- size = nc->size;
-#endif
+ encoded_page = nc->encoded_page;
+ size = page_frag_cache_page_size(encoded_page);
+
/* Even if we own the page, we do not use atomic_set().
* This would break get_page_unless_zero() users.
*/
page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
/* reset page count bias and offset to start of new frag */
- nc->pfmemalloc = page_is_pfmemalloc(page);
nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
nc->offset = 0;
}
@@ -107,13 +111,14 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
return NULL;
}
- page = virt_to_page(nc->va);
+ page = page_frag_encoded_page_ptr(encoded_page);
if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
goto refill;
- if (unlikely(nc->pfmemalloc)) {
- free_unref_page(page, compound_order(page));
+ if (unlikely(page_frag_encoded_page_pfmemalloc(encoded_page))) {
+ free_unref_page(page,
+ page_frag_encoded_page_order(encoded_page));
goto refill;
}
@@ -128,7 +133,7 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
nc->pagecnt_bias--;
nc->offset = offset + fragsz;
- return nc->va + offset;
+ return page_frag_encoded_page_address(encoded_page) + offset;
}
EXPORT_SYMBOL(__page_frag_alloc_align);
--
2.33.0
On Mon, Aug 26, 2024 at 5:46 AM Yunsheng Lin <linyunsheng@huawei.com> wrote:
>
> Currently there is one 'struct page_frag' for every 'struct
> sock' and 'struct task_struct', we are about to replace the
> 'struct page_frag' with 'struct page_frag_cache' for them.
> Before begin the replacing, we need to ensure the size of
> 'struct page_frag_cache' is not bigger than the size of
> 'struct page_frag', as there may be tens of thousands of
> 'struct sock' and 'struct task_struct' instances in the
> system.
>
> By or'ing the page order & pfmemalloc with lower bits of
> 'va' instead of using 'u16' or 'u32' for page size and 'u8'
> for pfmemalloc, we are able to avoid 3 or 5 bytes space waste.
> And page address & pfmemalloc & order is unchanged for the
> same page in the same 'page_frag_cache' instance, it makes
> sense to fit them together.
>
> After this patch, the size of 'struct page_frag_cache' should be
> the same as the size of 'struct page_frag'.
>
> CC: Alexander Duyck <alexander.duyck@gmail.com>
> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
> ---
> include/linux/mm_types_task.h | 19 ++++++-----
> include/linux/page_frag_cache.h | 60 +++++++++++++++++++++++++++++++--
> mm/page_frag_cache.c | 51 +++++++++++++++-------------
> 3 files changed, 97 insertions(+), 33 deletions(-)
>
> diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
> index cdc1e3696439..a8635460e027 100644
> --- a/include/linux/mm_types_task.h
> +++ b/include/linux/mm_types_task.h
> @@ -50,18 +50,21 @@ struct page_frag {
> #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
> #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
> struct page_frag_cache {
> - void *va;
> -#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> + /* encoded_page consists of the virtual address, pfmemalloc bit and order
> + * of a page.
> + */
> + unsigned long encoded_page;
> +
> + /* we maintain a pagecount bias, so that we dont dirty cache line
> + * containing page->_refcount every time we allocate a fragment.
> + */
> +#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
> __u16 offset;
> - __u16 size;
> + __u16 pagecnt_bias;
> #else
> __u32 offset;
> + __u32 pagecnt_bias;
> #endif
> - /* we maintain a pagecount bias, so that we dont dirty cache line
> - * containing page->_refcount every time we allocate a fragment.
> - */
> - unsigned int pagecnt_bias;
> - bool pfmemalloc;
> };
>
> /* Track pages that require TLB flushes */
> diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
> index 0a52f7a179c8..372d6ed7e20a 100644
> --- a/include/linux/page_frag_cache.h
> +++ b/include/linux/page_frag_cache.h
> @@ -3,18 +3,74 @@
> #ifndef _LINUX_PAGE_FRAG_CACHE_H
> #define _LINUX_PAGE_FRAG_CACHE_H
>
> +#include <linux/bits.h>
> +#include <linux/build_bug.h>
> #include <linux/log2.h>
> +#include <linux/mm.h>
> #include <linux/mm_types_task.h>
> #include <linux/types.h>
>
> +#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> +/* Use a full byte here to enable assembler optimization as the shift
> + * operation is usually expecting a byte.
> + */
> +#define PAGE_FRAG_CACHE_ORDER_MASK GENMASK(7, 0)
> +#define PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT 8
> +#define PAGE_FRAG_CACHE_PFMEMALLOC_BIT BIT(PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT)
> +#else
> +/* Compiler should be able to figure out we don't read things as any value
> + * ANDed with 0 is 0.
> + */
> +#define PAGE_FRAG_CACHE_ORDER_MASK 0
> +#define PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT 0
> +#define PAGE_FRAG_CACHE_PFMEMALLOC_BIT BIT(PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT)
> +#endif
> +
> +static inline unsigned long page_frag_encode_page(struct page *page,
> + unsigned int order,
> + bool pfmemalloc)
> +{
> + BUILD_BUG_ON(PAGE_FRAG_CACHE_MAX_ORDER > PAGE_FRAG_CACHE_ORDER_MASK);
> + BUILD_BUG_ON(PAGE_FRAG_CACHE_PFMEMALLOC_BIT >= PAGE_SIZE);
> +
> + return (unsigned long)page_address(page) |
> + (order & PAGE_FRAG_CACHE_ORDER_MASK) |
> + ((unsigned long)pfmemalloc << PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT);
> +}
> +
> +static inline unsigned long page_frag_encoded_page_order(unsigned long encoded_page)
> +{
> + return encoded_page & PAGE_FRAG_CACHE_ORDER_MASK;
> +}
> +
> +static inline bool page_frag_encoded_page_pfmemalloc(unsigned long encoded_page)
> +{
> + return !!(encoded_page & PAGE_FRAG_CACHE_PFMEMALLOC_BIT);
> +}
> +
> +static inline void *page_frag_encoded_page_address(unsigned long encoded_page)
> +{
> + return (void *)(encoded_page & PAGE_MASK);
> +}
> +
> +static inline struct page *page_frag_encoded_page_ptr(unsigned long encoded_page)
> +{
> + return virt_to_page((void *)encoded_page);
> +}
> +
> static inline void page_frag_cache_init(struct page_frag_cache *nc)
> {
> - nc->va = NULL;
> + nc->encoded_page = 0;
> }
>
> static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc)
> {
> - return !!nc->pfmemalloc;
> + return page_frag_encoded_page_pfmemalloc(nc->encoded_page);
> +}
> +
> +static inline unsigned int page_frag_cache_page_size(unsigned long encoded_page)
> +{
> + return PAGE_SIZE << page_frag_encoded_page_order(encoded_page);
> }
>
> void page_frag_cache_drain(struct page_frag_cache *nc);
So how many of these additions are actually needed outside of the
page_frag_cache.c file? I'm just wondering if we could look at moving
most of these into the c file itself instead of making them accessible
to all callers as I don't believe we currently have anyone looking
into the size of the frag cache or anything like that and I would
prefer to avoid exposing such functionality if possible. As the
non-order0 allocation problem with this has pointed out people will
exploit any interface exposed even if unintentionally.
I would want to move the size/order logic as well as splitting out the
virtual address as we shouldn't be allowing the user to look at that
without going through an allocation function.
On 2024/8/27 0:46, Alexander Duyck wrote:
> On Mon, Aug 26, 2024 at 5:46 AM Yunsheng Lin <linyunsheng@huawei.com> wrote:
>>
>> Currently there is one 'struct page_frag' for every 'struct
>> sock' and 'struct task_struct', we are about to replace the
>> 'struct page_frag' with 'struct page_frag_cache' for them.
>> Before begin the replacing, we need to ensure the size of
>> 'struct page_frag_cache' is not bigger than the size of
>> 'struct page_frag', as there may be tens of thousands of
>> 'struct sock' and 'struct task_struct' instances in the
>> system.
>>
>> By or'ing the page order & pfmemalloc with lower bits of
>> 'va' instead of using 'u16' or 'u32' for page size and 'u8'
>> for pfmemalloc, we are able to avoid 3 or 5 bytes space waste.
>> And page address & pfmemalloc & order is unchanged for the
>> same page in the same 'page_frag_cache' instance, it makes
>> sense to fit them together.
>>
>> After this patch, the size of 'struct page_frag_cache' should be
>> the same as the size of 'struct page_frag'.
>>
>> CC: Alexander Duyck <alexander.duyck@gmail.com>
>> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
>> ---
>> include/linux/mm_types_task.h | 19 ++++++-----
>> include/linux/page_frag_cache.h | 60 +++++++++++++++++++++++++++++++--
>> mm/page_frag_cache.c | 51 +++++++++++++++-------------
>> 3 files changed, 97 insertions(+), 33 deletions(-)
>>
>> diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
>> index cdc1e3696439..a8635460e027 100644
>> --- a/include/linux/mm_types_task.h
>> +++ b/include/linux/mm_types_task.h
>> @@ -50,18 +50,21 @@ struct page_frag {
>> #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
>> #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
>> struct page_frag_cache {
>> - void *va;
>> -#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>> + /* encoded_page consists of the virtual address, pfmemalloc bit and order
>> + * of a page.
>> + */
>> + unsigned long encoded_page;
>> +
>> + /* we maintain a pagecount bias, so that we dont dirty cache line
>> + * containing page->_refcount every time we allocate a fragment.
>> + */
>> +#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
>> __u16 offset;
>> - __u16 size;
>> + __u16 pagecnt_bias;
>> #else
>> __u32 offset;
>> + __u32 pagecnt_bias;
>> #endif
>> - /* we maintain a pagecount bias, so that we dont dirty cache line
>> - * containing page->_refcount every time we allocate a fragment.
>> - */
>> - unsigned int pagecnt_bias;
>> - bool pfmemalloc;
>> };
>>
>> /* Track pages that require TLB flushes */
>> diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
>> index 0a52f7a179c8..372d6ed7e20a 100644
>> --- a/include/linux/page_frag_cache.h
>> +++ b/include/linux/page_frag_cache.h
>> @@ -3,18 +3,74 @@
>> #ifndef _LINUX_PAGE_FRAG_CACHE_H
>> #define _LINUX_PAGE_FRAG_CACHE_H
>>
>> +#include <linux/bits.h>
>> +#include <linux/build_bug.h>
>> #include <linux/log2.h>
>> +#include <linux/mm.h>
>> #include <linux/mm_types_task.h>
>> #include <linux/types.h>
>>
>> +#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
>> +/* Use a full byte here to enable assembler optimization as the shift
>> + * operation is usually expecting a byte.
>> + */
>> +#define PAGE_FRAG_CACHE_ORDER_MASK GENMASK(7, 0)
>> +#define PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT 8
>> +#define PAGE_FRAG_CACHE_PFMEMALLOC_BIT BIT(PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT)
>> +#else
>> +/* Compiler should be able to figure out we don't read things as any value
>> + * ANDed with 0 is 0.
>> + */
>> +#define PAGE_FRAG_CACHE_ORDER_MASK 0
>> +#define PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT 0
>> +#define PAGE_FRAG_CACHE_PFMEMALLOC_BIT BIT(PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT)
>> +#endif
>> +
>> +static inline unsigned long page_frag_encode_page(struct page *page,
>> + unsigned int order,
>> + bool pfmemalloc)
>> +{
>> + BUILD_BUG_ON(PAGE_FRAG_CACHE_MAX_ORDER > PAGE_FRAG_CACHE_ORDER_MASK);
>> + BUILD_BUG_ON(PAGE_FRAG_CACHE_PFMEMALLOC_BIT >= PAGE_SIZE);
>> +
>> + return (unsigned long)page_address(page) |
>> + (order & PAGE_FRAG_CACHE_ORDER_MASK) |
>> + ((unsigned long)pfmemalloc << PAGE_FRAG_CACHE_PFMEMALLOC_SHIFT);
>> +}
>> +
>> +static inline unsigned long page_frag_encoded_page_order(unsigned long encoded_page)
>> +{
>> + return encoded_page & PAGE_FRAG_CACHE_ORDER_MASK;
>> +}
>> +
>> +static inline bool page_frag_encoded_page_pfmemalloc(unsigned long encoded_page)
>> +{
>> + return !!(encoded_page & PAGE_FRAG_CACHE_PFMEMALLOC_BIT);
>> +}
>> +
>> +static inline void *page_frag_encoded_page_address(unsigned long encoded_page)
>> +{
>> + return (void *)(encoded_page & PAGE_MASK);
>> +}
>> +
>> +static inline struct page *page_frag_encoded_page_ptr(unsigned long encoded_page)
>> +{
>> + return virt_to_page((void *)encoded_page);
>> +}
>> +
>> static inline void page_frag_cache_init(struct page_frag_cache *nc)
>> {
>> - nc->va = NULL;
>> + nc->encoded_page = 0;
>> }
>>
>> static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc)
>> {
>> - return !!nc->pfmemalloc;
>> + return page_frag_encoded_page_pfmemalloc(nc->encoded_page);
>> +}
>> +
>> +static inline unsigned int page_frag_cache_page_size(unsigned long encoded_page)
>> +{
>> + return PAGE_SIZE << page_frag_encoded_page_order(encoded_page);
>> }
>>
>> void page_frag_cache_drain(struct page_frag_cache *nc);
>
> So how many of these additions are actually needed outside of the
> page_frag_cache.c file? I'm just wondering if we could look at moving
At least page_frag_cache_is_pfmemalloc(), page_frag_encoded_page_order(),
page_frag_encoded_page_ptr(), page_frag_encoded_page_address() are needed
out of the page_frag_cache.c file for now, which are used mostly in
__page_frag_cache_commit() and __page_frag_alloc_refill_probe_align() for
debugging and performance reason, see patch 7 & 10.
The only left one is page_frag_encode_page(), I am not sure if it makes
much sense to move it to page_frag_cache.c while the rest of them are in
.h file.
> most of these into the c file itself instead of making them accessible
> to all callers as I don't believe we currently have anyone looking
> into the size of the frag cache or anything like that and I would
> prefer to avoid exposing such functionality if possible. As the
> non-order0 allocation problem with this has pointed out people will
> exploit any interface exposed even if unintentionally.
>
> I would want to move the size/order logic as well as splitting out the
> virtual address as we shouldn't be allowing the user to look at that
> without going through an allocation function.
I am generally agreed with the above argument if there are ways to do
that without sacrificing the above mentioned debugging and performance.
On Tue, Aug 27, 2024 at 5:06 AM Yunsheng Lin <linyunsheng@huawei.com> wrote: > > On 2024/8/27 0:46, Alexander Duyck wrote: > > On Mon, Aug 26, 2024 at 5:46 AM Yunsheng Lin <linyunsheng@huawei.com> wrote: > >> > >> Currently there is one 'struct page_frag' for every 'struct > >> sock' and 'struct task_struct', we are about to replace the > >> 'struct page_frag' with 'struct page_frag_cache' for them. > >> Before begin the replacing, we need to ensure the size of > >> 'struct page_frag_cache' is not bigger than the size of > >> 'struct page_frag', as there may be tens of thousands of > >> 'struct sock' and 'struct task_struct' instances in the > >> system. > >> > >> By or'ing the page order & pfmemalloc with lower bits of > >> 'va' instead of using 'u16' or 'u32' for page size and 'u8' > >> for pfmemalloc, we are able to avoid 3 or 5 bytes space waste. > >> And page address & pfmemalloc & order is unchanged for the > >> same page in the same 'page_frag_cache' instance, it makes > >> sense to fit them together. > >> > >> After this patch, the size of 'struct page_frag_cache' should be > >> the same as the size of 'struct page_frag'. > >> > >> CC: Alexander Duyck <alexander.duyck@gmail.com> > >> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> > >> --- > >> include/linux/mm_types_task.h | 19 ++++++----- > >> include/linux/page_frag_cache.h | 60 +++++++++++++++++++++++++++++++-- > >> mm/page_frag_cache.c | 51 +++++++++++++++------------- > >> 3 files changed, 97 insertions(+), 33 deletions(-) > >> ... > >> void page_frag_cache_drain(struct page_frag_cache *nc); > > > > So how many of these additions are actually needed outside of the > > page_frag_cache.c file? I'm just wondering if we could look at moving > > At least page_frag_cache_is_pfmemalloc(), page_frag_encoded_page_order(), > page_frag_encoded_page_ptr(), page_frag_encoded_page_address() are needed > out of the page_frag_cache.c file for now, which are used mostly in > __page_frag_cache_commit() and __page_frag_alloc_refill_probe_align() for > debugging and performance reason, see patch 7 & 10. As far as the __page_frag_cache_commit I might say that could be moved to page_frag_cache.c, but admittedly I don't know how much that would impact the performance. > The only left one is page_frag_encode_page(), I am not sure if it makes > much sense to move it to page_frag_cache.c while the rest of them are in > .h file. I would move it. There is no point in exposing internals more than necessary. Also since you are carrying a BUILD_BUG_ON it would make sense to keep that internal to your implementation.
On 2024/8/28 2:16, Alexander Duyck wrote:
> On Tue, Aug 27, 2024 at 5:06 AM Yunsheng Lin <linyunsheng@huawei.com> wrote:
>>
>> On 2024/8/27 0:46, Alexander Duyck wrote:
>>> On Mon, Aug 26, 2024 at 5:46 AM Yunsheng Lin <linyunsheng@huawei.com> wrote:
>>>>
>>>> Currently there is one 'struct page_frag' for every 'struct
>>>> sock' and 'struct task_struct', we are about to replace the
>>>> 'struct page_frag' with 'struct page_frag_cache' for them.
>>>> Before begin the replacing, we need to ensure the size of
>>>> 'struct page_frag_cache' is not bigger than the size of
>>>> 'struct page_frag', as there may be tens of thousands of
>>>> 'struct sock' and 'struct task_struct' instances in the
>>>> system.
>>>>
>>>> By or'ing the page order & pfmemalloc with lower bits of
>>>> 'va' instead of using 'u16' or 'u32' for page size and 'u8'
>>>> for pfmemalloc, we are able to avoid 3 or 5 bytes space waste.
>>>> And page address & pfmemalloc & order is unchanged for the
>>>> same page in the same 'page_frag_cache' instance, it makes
>>>> sense to fit them together.
>>>>
>>>> After this patch, the size of 'struct page_frag_cache' should be
>>>> the same as the size of 'struct page_frag'.
>>>>
>>>> CC: Alexander Duyck <alexander.duyck@gmail.com>
>>>> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
>>>> ---
>>>> include/linux/mm_types_task.h | 19 ++++++-----
>>>> include/linux/page_frag_cache.h | 60 +++++++++++++++++++++++++++++++--
>>>> mm/page_frag_cache.c | 51 +++++++++++++++-------------
>>>> 3 files changed, 97 insertions(+), 33 deletions(-)
>>>>
>
> ...
>
>>>> void page_frag_cache_drain(struct page_frag_cache *nc);
>>>
>>> So how many of these additions are actually needed outside of the
>>> page_frag_cache.c file? I'm just wondering if we could look at moving
>>
>> At least page_frag_cache_is_pfmemalloc(), page_frag_encoded_page_order(),
>> page_frag_encoded_page_ptr(), page_frag_encoded_page_address() are needed
>> out of the page_frag_cache.c file for now, which are used mostly in
>> __page_frag_cache_commit() and __page_frag_alloc_refill_probe_align() for
>> debugging and performance reason, see patch 7 & 10.
>
> As far as the __page_frag_cache_commit I might say that could be moved
> to page_frag_cache.c, but admittedly I don't know how much that would
> impact the performance.
The performance impact seems large enough that it does not seem to justify
the moving to page_frag_cache.c,
Before the moving:
Performance counter stats for 'insmod page_frag_test.ko test_push_cpu=16 test_pop_cpu=17 test_alloc_len=256 nr_test=512000000 test_align=0 test_prepare=0' (20 runs):
17.749582 task-clock (msec) # 0.002 CPUs utilized ( +- 0.15% )
5 context-switches # 0.304 K/sec ( +- 2.48% )
0 cpu-migrations # 0.017 K/sec ( +- 35.04% )
76 page-faults # 0.004 M/sec ( +- 0.45% )
46103462 cycles # 2.597 GHz ( +- 0.14% )
60692196 instructions # 1.32 insn per cycle ( +- 0.12% )
14734050 branches # 830.107 M/sec ( +- 0.12% )
19792 branch-misses # 0.13% of all branches ( +- 0.75% )
9.837758611 seconds time elapsed ( +- 0.38% )
After the moving:
Performance counter stats for 'insmod page_frag_test.ko test_push_cpu=16 test_pop_cpu=17 test_alloc_len=256 nr_test=512000000 test_align=0 test_prepare=0' (20 runs):
19.682296 task-clock (msec) # 0.002 CPUs utilized ( +- 4.08% )
6 context-switches # 0.305 K/sec ( +- 3.42% )
0 cpu-migrations # 0.000 K/sec
76 page-faults # 0.004 M/sec ( +- 0.44% )
51128091 cycles # 2.598 GHz ( +- 4.08% )
58833583 instructions # 1.15 insn per cycle ( +- 4.50% )
14260855 branches # 724.552 M/sec ( +- 4.63% )
20120 branch-misses # 0.14% of all branches ( +- 0.92% )
12.318770150 seconds time elapsed ( +- 0.15% )
>
>> The only left one is page_frag_encode_page(), I am not sure if it makes
>> much sense to move it to page_frag_cache.c while the rest of them are in
>> .h file.
>
> I would move it. There is no point in exposing internals more than
> necessary. Also since you are carrying a BUILD_BUG_ON it would make
> sense to keep that internal to your implementation.
© 2016 - 2025 Red Hat, Inc.