...
...
35
Let me know what you think,
35
Let me know what you think,
36
Maxime
36
Maxime
37
37
38
Signed-off-by: Maxime Ripard <mripard@kernel.org>
38
Signed-off-by: Maxime Ripard <mripard@kernel.org>
39
---
39
---
40
Changes in v3:
41
- Reworked global variable patch
42
- Link to v2: https://lore.kernel.org/r/20250401-dma-buf-ecc-heap-v2-0-043fd006a1af@kernel.org
43
40
Changes in v2:
44
Changes in v2:
41
- Add vmap/vunmap operations
45
- Add vmap/vunmap operations
42
- Drop ECC flags uapi
46
- Drop ECC flags uapi
43
- Rebase on top of 6.14
47
- Rebase on top of 6.14
44
- Link to v1: https://lore.kernel.org/r/20240515-dma-buf-ecc-heap-v1-0-54cbbd049511@kernel.org
48
- Link to v1: https://lore.kernel.org/r/20240515-dma-buf-ecc-heap-v1-0-54cbbd049511@kernel.org
...
...
49
dma-buf: heaps: Introduce a new heap for reserved memory
53
dma-buf: heaps: Introduce a new heap for reserved memory
50
54
51
drivers/dma-buf/heaps/Kconfig | 8 +
55
drivers/dma-buf/heaps/Kconfig | 8 +
52
drivers/dma-buf/heaps/Makefile | 1 +
56
drivers/dma-buf/heaps/Makefile | 1 +
53
drivers/dma-buf/heaps/carveout_heap.c | 360 ++++++++++++++++++++++++++++++++++
57
drivers/dma-buf/heaps/carveout_heap.c | 360 ++++++++++++++++++++++++++++++++++
54
drivers/dma-buf/heaps/system_heap.c | 17 +-
58
drivers/dma-buf/heaps/system_heap.c | 3 +-
55
4 files changed, 381 insertions(+), 5 deletions(-)
59
4 files changed, 370 insertions(+), 2 deletions(-)
56
---
60
---
57
base-commit: fcbf30774e82a441890b722bf0c26542fb82150f
61
base-commit: fcbf30774e82a441890b722bf0c26542fb82150f
58
change-id: 20240515-dma-buf-ecc-heap-28a311d2c94e
62
change-id: 20240515-dma-buf-ecc-heap-28a311d2c94e
59
63
60
Best regards,
64
Best regards,
61
--
65
--
62
Maxime Ripard <mripard@kernel.org>
66
Maxime Ripard <mripard@kernel.org>
diff view generated by jsdifflib
1
The system heap has been using its struct dma_heap pointer but wasn't
1
The system heap is storing its struct dma_heap pointer in a global
2
using it anywhere.
2
variable but isn't using it anywhere.
3
3
4
Since we'll need additional parameters to attach to that heap type,
4
Let's move the global variable into system_heap_create() to make it
5
let's create a private structure and set it as the dma_heap drvdata,
5
local.
6
removing the global variable in the process.
7
6
8
Signed-off-by: Maxime Ripard <mripard@kernel.org>
7
Signed-off-by: Maxime Ripard <mripard@kernel.org>
9
---
8
---
10
drivers/dma-buf/heaps/system_heap.c | 17 ++++++++++++-----
9
drivers/dma-buf/heaps/system_heap.c | 3 +--
11
1 file changed, 12 insertions(+), 5 deletions(-)
10
1 file changed, 1 insertion(+), 2 deletions(-)
12
11
13
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
12
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
14
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
15
--- a/drivers/dma-buf/heaps/system_heap.c
14
--- a/drivers/dma-buf/heaps/system_heap.c
16
+++ b/drivers/dma-buf/heaps/system_heap.c
15
+++ b/drivers/dma-buf/heaps/system_heap.c
...
...
19
#include <linux/scatterlist.h>
18
#include <linux/scatterlist.h>
20
#include <linux/slab.h>
19
#include <linux/slab.h>
21
#include <linux/vmalloc.h>
20
#include <linux/vmalloc.h>
22
21
23
-static struct dma_heap *sys_heap;
22
-static struct dma_heap *sys_heap;
24
+struct system_heap {
23
-
25
+    struct dma_heap *heap;
26
+};
27
28
struct system_heap_buffer {
24
struct system_heap_buffer {
29
    struct dma_heap *heap;
25
    struct dma_heap *heap;
30
    struct list_head attachments;
26
    struct list_head attachments;
31
    struct mutex lock;
27
    struct mutex lock;
28
    unsigned long len;
32
@@ -XXX,XX +XXX,XX @@ static const struct dma_heap_ops system_heap_ops = {
29
@@ -XXX,XX +XXX,XX @@ static const struct dma_heap_ops system_heap_ops = {
33
};
30
};
34
31
35
static int __init system_heap_create(void)
32
static int __init system_heap_create(void)
36
{
33
{
37
    struct dma_heap_export_info exp_info;
34
    struct dma_heap_export_info exp_info;
38
+    struct system_heap *sys_heap;
35
+    struct dma_heap *sys_heap;
39
+
40
+    sys_heap = kzalloc(sizeof(*sys_heap), GFP_KERNEL);
41
+    if (!sys_heap)
42
+        return -ENOMEM;
43
36
44
    exp_info.name = "system";
37
    exp_info.name = "system";
45
    exp_info.ops = &system_heap_ops;
38
    exp_info.ops = &system_heap_ops;
46
-    exp_info.priv = NULL;
39
    exp_info.priv = NULL;
47
+    exp_info.priv = sys_heap;
40
48
49
-    sys_heap = dma_heap_add(&exp_info);
50
-    if (IS_ERR(sys_heap))
51
-        return PTR_ERR(sys_heap);
52
+    sys_heap->heap = dma_heap_add(&exp_info);
53
+    if (IS_ERR(sys_heap->heap))
54
+        return PTR_ERR(sys_heap->heap);
55
56
    return 0;
57
}
58
module_init(system_heap_create);
59
41
60
--
42
--
61
2.49.0
43
2.49.0
diff view generated by jsdifflib
1
Some reserved memory regions might have particular memory setup or
1
Some reserved memory regions might have particular memory setup or
2
attributes that make them good candidates for heaps.
2
attributes that make them good candidates for heaps.
3
3
4
Let's provide a heap type that will create a new heap for each reserved
4
Let's provide a heap type that will create a new heap for each reserved
5
memory region flagged as such.
5
memory region flagged as such.
6
6
7
Signed-off-by: Maxime Ripard <mripard@kernel.org>
7
Signed-off-by: Maxime Ripard <mripard@kernel.org>
8
---
8
---
9
drivers/dma-buf/heaps/Kconfig | 8 +
9
drivers/dma-buf/heaps/Kconfig | 8 +
10
drivers/dma-buf/heaps/Makefile | 1 +
10
drivers/dma-buf/heaps/Makefile | 1 +
11
drivers/dma-buf/heaps/carveout_heap.c | 360 ++++++++++++++++++++++++++++++++++
11
drivers/dma-buf/heaps/carveout_heap.c | 360 ++++++++++++++++++++++++++++++++++
12
3 files changed, 369 insertions(+)
12
3 files changed, 369 insertions(+)
13
13
14
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
14
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
15
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
16
--- a/drivers/dma-buf/heaps/Kconfig
16
--- a/drivers/dma-buf/heaps/Kconfig
17
+++ b/drivers/dma-buf/heaps/Kconfig
17
+++ b/drivers/dma-buf/heaps/Kconfig
18
@@ -XXX,XX +XXX,XX @@
18
@@ -XXX,XX +XXX,XX @@
19
+config DMABUF_HEAPS_CARVEOUT
19
+config DMABUF_HEAPS_CARVEOUT
20
+    bool "Carveout Heaps"
20
+    bool "Carveout Heaps"
21
+    depends on DMABUF_HEAPS
21
+    depends on DMABUF_HEAPS
22
+    help
22
+    help
23
+     Choose this option to enable the carveout dmabuf heap. The carveout
23
+     Choose this option to enable the carveout dmabuf heap. The carveout
24
+     heap is backed by pages from reserved memory regions flagged as
24
+     heap is backed by pages from reserved memory regions flagged as
25
+     exportable. If in doubt, say Y.
25
+     exportable. If in doubt, say Y.
26
+
26
+
27
config DMABUF_HEAPS_SYSTEM
27
config DMABUF_HEAPS_SYSTEM
28
    bool "DMA-BUF System Heap"
28
    bool "DMA-BUF System Heap"
29
    depends on DMABUF_HEAPS
29
    depends on DMABUF_HEAPS
30
    help
30
    help
31
     Choose this option to enable the system dmabuf heap. The system heap
31
     Choose this option to enable the system dmabuf heap. The system heap
32
diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
32
diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
33
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
34
--- a/drivers/dma-buf/heaps/Makefile
34
--- a/drivers/dma-buf/heaps/Makefile
35
+++ b/drivers/dma-buf/heaps/Makefile
35
+++ b/drivers/dma-buf/heaps/Makefile
36
@@ -XXX,XX +XXX,XX @@
36
@@ -XXX,XX +XXX,XX @@
37
# SPDX-License-Identifier: GPL-2.0
37
# SPDX-License-Identifier: GPL-2.0
38
+obj-$(CONFIG_DMABUF_HEAPS_CARVEOUT)    += carveout_heap.o
38
+obj-$(CONFIG_DMABUF_HEAPS_CARVEOUT)    += carveout_heap.o
39
obj-$(CONFIG_DMABUF_HEAPS_SYSTEM)    += system_heap.o
39
obj-$(CONFIG_DMABUF_HEAPS_SYSTEM)    += system_heap.o
40
obj-$(CONFIG_DMABUF_HEAPS_CMA)        += cma_heap.o
40
obj-$(CONFIG_DMABUF_HEAPS_CMA)        += cma_heap.o
41
diff --git a/drivers/dma-buf/heaps/carveout_heap.c b/drivers/dma-buf/heaps/carveout_heap.c
41
diff --git a/drivers/dma-buf/heaps/carveout_heap.c b/drivers/dma-buf/heaps/carveout_heap.c
42
new file mode 100644
42
new file mode 100644
43
index XXXXXXX..XXXXXXX
43
index XXXXXXX..XXXXXXX
44
--- /dev/null
44
--- /dev/null
45
+++ b/drivers/dma-buf/heaps/carveout_heap.c
45
+++ b/drivers/dma-buf/heaps/carveout_heap.c
46
@@ -XXX,XX +XXX,XX @@
46
@@ -XXX,XX +XXX,XX @@
47
+// SPDX-License-Identifier: GPL-2.0
47
+// SPDX-License-Identifier: GPL-2.0
48
+
48
+
49
+#include <linux/dma-buf.h>
49
+#include <linux/dma-buf.h>
50
+#include <linux/dma-heap.h>
50
+#include <linux/dma-heap.h>
51
+#include <linux/genalloc.h>
51
+#include <linux/genalloc.h>
52
+#include <linux/highmem.h>
52
+#include <linux/highmem.h>
53
+#include <linux/of_reserved_mem.h>
53
+#include <linux/of_reserved_mem.h>
54
+
54
+
55
+struct carveout_heap_priv {
55
+struct carveout_heap_priv {
56
+    struct dma_heap *heap;
56
+    struct dma_heap *heap;
57
+    struct gen_pool *pool;
57
+    struct gen_pool *pool;
58
+};
58
+};
59
+
59
+
60
+struct carveout_heap_buffer_priv {
60
+struct carveout_heap_buffer_priv {
61
+    struct mutex lock;
61
+    struct mutex lock;
62
+    struct list_head attachments;
62
+    struct list_head attachments;
63
+
63
+
64
+    unsigned long num_pages;
64
+    unsigned long num_pages;
65
+    struct carveout_heap_priv *heap;
65
+    struct carveout_heap_priv *heap;
66
+    dma_addr_t daddr;
66
+    dma_addr_t daddr;
67
+    void *vaddr;
67
+    void *vaddr;
68
+    unsigned int vmap_cnt;
68
+    unsigned int vmap_cnt;
69
+};
69
+};
70
+
70
+
71
+struct carveout_heap_attachment {
71
+struct carveout_heap_attachment {
72
+    struct list_head head;
72
+    struct list_head head;
73
+    struct sg_table table;
73
+    struct sg_table table;
74
+
74
+
75
+    struct device *dev;
75
+    struct device *dev;
76
+    bool mapped;
76
+    bool mapped;
77
+};
77
+};
78
+
78
+
79
+static int carveout_heap_attach(struct dma_buf *buf,
79
+static int carveout_heap_attach(struct dma_buf *buf,
80
+                struct dma_buf_attachment *attachment)
80
+                struct dma_buf_attachment *attachment)
81
+{
81
+{
82
+    struct carveout_heap_buffer_priv *priv = buf->priv;
82
+    struct carveout_heap_buffer_priv *priv = buf->priv;
83
+    struct carveout_heap_attachment *a;
83
+    struct carveout_heap_attachment *a;
84
+    struct sg_table *sgt;
84
+    struct sg_table *sgt;
85
+    unsigned long len = priv->num_pages * PAGE_SIZE;
85
+    unsigned long len = priv->num_pages * PAGE_SIZE;
86
+    int ret;
86
+    int ret;
87
+
87
+
88
+    a = kzalloc(sizeof(*a), GFP_KERNEL);
88
+    a = kzalloc(sizeof(*a), GFP_KERNEL);
89
+    if (!a)
89
+    if (!a)
90
+        return -ENOMEM;
90
+        return -ENOMEM;
91
+    INIT_LIST_HEAD(&a->head);
91
+    INIT_LIST_HEAD(&a->head);
92
+    a->dev = attachment->dev;
92
+    a->dev = attachment->dev;
93
+    attachment->priv = a;
93
+    attachment->priv = a;
94
+
94
+
95
+    sgt = &a->table;
95
+    sgt = &a->table;
96
+    ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
96
+    ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
97
+    if (ret)
97
+    if (ret)
98
+        goto err_cleanup_attach;
98
+        goto err_cleanup_attach;
99
+
99
+
100
+    sg_dma_address(sgt->sgl) = priv->daddr;
100
+    sg_dma_address(sgt->sgl) = priv->daddr;
101
+    sg_dma_len(sgt->sgl) = len;
101
+    sg_dma_len(sgt->sgl) = len;
102
+
102
+
103
+    mutex_lock(&priv->lock);
103
+    mutex_lock(&priv->lock);
104
+    list_add(&a->head, &priv->attachments);
104
+    list_add(&a->head, &priv->attachments);
105
+    mutex_unlock(&priv->lock);
105
+    mutex_unlock(&priv->lock);
106
+
106
+
107
+    return 0;
107
+    return 0;
108
+
108
+
109
+err_cleanup_attach:
109
+err_cleanup_attach:
110
+    kfree(a);
110
+    kfree(a);
111
+    return ret;
111
+    return ret;
112
+}
112
+}
113
+
113
+
114
+static void carveout_heap_detach(struct dma_buf *dmabuf,
114
+static void carveout_heap_detach(struct dma_buf *dmabuf,
115
+                 struct dma_buf_attachment *attachment)
115
+                 struct dma_buf_attachment *attachment)
116
+{
116
+{
117
+    struct carveout_heap_buffer_priv *priv = dmabuf->priv;
117
+    struct carveout_heap_buffer_priv *priv = dmabuf->priv;
118
+    struct carveout_heap_attachment *a = attachment->priv;
118
+    struct carveout_heap_attachment *a = attachment->priv;
119
+
119
+
120
+    mutex_lock(&priv->lock);
120
+    mutex_lock(&priv->lock);
121
+    list_del(&a->head);
121
+    list_del(&a->head);
122
+    mutex_unlock(&priv->lock);
122
+    mutex_unlock(&priv->lock);
123
+
123
+
124
+    sg_free_table(&a->table);
124
+    sg_free_table(&a->table);
125
+    kfree(a);
125
+    kfree(a);
126
+}
126
+}
127
+
127
+
128
+static struct sg_table *
128
+static struct sg_table *
129
+carveout_heap_map_dma_buf(struct dma_buf_attachment *attachment,
129
+carveout_heap_map_dma_buf(struct dma_buf_attachment *attachment,
130
+             enum dma_data_direction direction)
130
+             enum dma_data_direction direction)
131
+{
131
+{
132
+    struct carveout_heap_attachment *a = attachment->priv;
132
+    struct carveout_heap_attachment *a = attachment->priv;
133
+    struct sg_table *table = &a->table;
133
+    struct sg_table *table = &a->table;
134
+    int ret;
134
+    int ret;
135
+
135
+
136
+    ret = dma_map_sgtable(a->dev, table, direction, 0);
136
+    ret = dma_map_sgtable(a->dev, table, direction, 0);
137
+    if (ret)
137
+    if (ret)
138
+        return ERR_PTR(-ENOMEM);
138
+        return ERR_PTR(-ENOMEM);
139
+
139
+
140
+    a->mapped = true;
140
+    a->mapped = true;
141
+
141
+
142
+    return table;
142
+    return table;
143
+}
143
+}
144
+
144
+
145
+static void carveout_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
145
+static void carveout_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
146
+                    struct sg_table *table,
146
+                    struct sg_table *table,
147
+                    enum dma_data_direction direction)
147
+                    enum dma_data_direction direction)
148
+{
148
+{
149
+    struct carveout_heap_attachment *a = attachment->priv;
149
+    struct carveout_heap_attachment *a = attachment->priv;
150
+
150
+
151
+    a->mapped = false;
151
+    a->mapped = false;
152
+    dma_unmap_sgtable(a->dev, table, direction, 0);
152
+    dma_unmap_sgtable(a->dev, table, direction, 0);
153
+}
153
+}
154
+
154
+
155
+static int
155
+static int
156
+carveout_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
156
+carveout_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
157
+                 enum dma_data_direction direction)
157
+                 enum dma_data_direction direction)
158
+{
158
+{
159
+    struct carveout_heap_buffer_priv *priv = dmabuf->priv;
159
+    struct carveout_heap_buffer_priv *priv = dmabuf->priv;
160
+    struct carveout_heap_attachment *a;
160
+    struct carveout_heap_attachment *a;
161
+    unsigned long len = priv->num_pages * PAGE_SIZE;
161
+    unsigned long len = priv->num_pages * PAGE_SIZE;
162
+
162
+
163
+    mutex_lock(&priv->lock);
163
+    mutex_lock(&priv->lock);
164
+
164
+
165
+    if (priv->vmap_cnt > 0)
165
+    if (priv->vmap_cnt > 0)
166
+        invalidate_kernel_vmap_range(priv->vaddr, len);
166
+        invalidate_kernel_vmap_range(priv->vaddr, len);
167
+
167
+
168
+    list_for_each_entry(a, &priv->attachments, head) {
168
+    list_for_each_entry(a, &priv->attachments, head) {
169
+        if (!a->mapped)
169
+        if (!a->mapped)
170
+            continue;
170
+            continue;
171
+
171
+
172
+        dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
172
+        dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
173
+    }
173
+    }
174
+
174
+
175
+    mutex_unlock(&priv->lock);
175
+    mutex_unlock(&priv->lock);
176
+
176
+
177
+    return 0;
177
+    return 0;
178
+}
178
+}
179
+
179
+
180
+static int
180
+static int
181
+carveout_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
181
+carveout_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
182
+                 enum dma_data_direction direction)
182
+                 enum dma_data_direction direction)
183
+{
183
+{
184
+    struct carveout_heap_buffer_priv *priv = dmabuf->priv;
184
+    struct carveout_heap_buffer_priv *priv = dmabuf->priv;
185
+    struct carveout_heap_attachment *a;
185
+    struct carveout_heap_attachment *a;
186
+    unsigned long len = priv->num_pages * PAGE_SIZE;
186
+    unsigned long len = priv->num_pages * PAGE_SIZE;
187
+
187
+
188
+    mutex_lock(&priv->lock);
188
+    mutex_lock(&priv->lock);
189
+
189
+
190
+    if (priv->vmap_cnt > 0)
190
+    if (priv->vmap_cnt > 0)
191
+        flush_kernel_vmap_range(priv->vaddr, len);
191
+        flush_kernel_vmap_range(priv->vaddr, len);
192
+
192
+
193
+    list_for_each_entry(a, &priv->attachments, head) {
193
+    list_for_each_entry(a, &priv->attachments, head) {
194
+        if (!a->mapped)
194
+        if (!a->mapped)
195
+            continue;
195
+            continue;
196
+
196
+
197
+        dma_sync_sgtable_for_device(a->dev, &a->table, direction);
197
+        dma_sync_sgtable_for_device(a->dev, &a->table, direction);
198
+    }
198
+    }
199
+
199
+
200
+    mutex_unlock(&priv->lock);
200
+    mutex_unlock(&priv->lock);
201
+
201
+
202
+    return 0;
202
+    return 0;
203
+}
203
+}
204
+
204
+
205
+static int carveout_heap_mmap(struct dma_buf *dmabuf,
205
+static int carveout_heap_mmap(struct dma_buf *dmabuf,
206
+             struct vm_area_struct *vma)
206
+             struct vm_area_struct *vma)
207
+{
207
+{
208
+    struct carveout_heap_buffer_priv *priv = dmabuf->priv;
208
+    struct carveout_heap_buffer_priv *priv = dmabuf->priv;
209
+    unsigned long len = priv->num_pages * PAGE_SIZE;
209
+    unsigned long len = priv->num_pages * PAGE_SIZE;
210
+    struct page *page = virt_to_page(priv->vaddr);
210
+    struct page *page = virt_to_page(priv->vaddr);
211
+
211
+
212
+    return remap_pfn_range(vma, vma->vm_start, page_to_pfn(page),
212
+    return remap_pfn_range(vma, vma->vm_start, page_to_pfn(page),
213
+             len, vma->vm_page_prot);
213
+             len, vma->vm_page_prot);
214
+}
214
+}
215
+
215
+
216
+static int carveout_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
216
+static int carveout_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
217
+{
217
+{
218
+    struct carveout_heap_buffer_priv *priv = dmabuf->priv;
218
+    struct carveout_heap_buffer_priv *priv = dmabuf->priv;
219
+
219
+
220
+    mutex_lock(&priv->lock);
220
+    mutex_lock(&priv->lock);
221
+
221
+
222
+    iosys_map_set_vaddr(map, priv->vaddr);
222
+    iosys_map_set_vaddr(map, priv->vaddr);
223
+    priv->vmap_cnt++;
223
+    priv->vmap_cnt++;
224
+
224
+
225
+    mutex_unlock(&priv->lock);
225
+    mutex_unlock(&priv->lock);
226
+
226
+
227
+    return 0;
227
+    return 0;
228
+}
228
+}
229
+
229
+
230
+static void carveout_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
230
+static void carveout_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
231
+{
231
+{
232
+    struct carveout_heap_buffer_priv *priv = dmabuf->priv;
232
+    struct carveout_heap_buffer_priv *priv = dmabuf->priv;
233
+
233
+
234
+    mutex_lock(&priv->lock);
234
+    mutex_lock(&priv->lock);
235
+
235
+
236
+    priv->vmap_cnt--;
236
+    priv->vmap_cnt--;
237
+    mutex_unlock(&priv->lock);
237
+    mutex_unlock(&priv->lock);
238
+
238
+
239
+    iosys_map_clear(map);
239
+    iosys_map_clear(map);
240
+}
240
+}
241
+
241
+
242
+static void carveout_heap_dma_buf_release(struct dma_buf *buf)
242
+static void carveout_heap_dma_buf_release(struct dma_buf *buf)
243
+{
243
+{
244
+    struct carveout_heap_buffer_priv *buffer_priv = buf->priv;
244
+    struct carveout_heap_buffer_priv *buffer_priv = buf->priv;
245
+    struct carveout_heap_priv *heap_priv = buffer_priv->heap;
245
+    struct carveout_heap_priv *heap_priv = buffer_priv->heap;
246
+    unsigned long len = buffer_priv->num_pages * PAGE_SIZE;
246
+    unsigned long len = buffer_priv->num_pages * PAGE_SIZE;
247
+
247
+
248
+    gen_pool_free(heap_priv->pool, (unsigned long)buffer_priv->vaddr, len);
248
+    gen_pool_free(heap_priv->pool, (unsigned long)buffer_priv->vaddr, len);
249
+    kfree(buffer_priv);
249
+    kfree(buffer_priv);
250
+}
250
+}
251
+
251
+
252
+static const struct dma_buf_ops carveout_heap_buf_ops = {
252
+static const struct dma_buf_ops carveout_heap_buf_ops = {
253
+    .attach        = carveout_heap_attach,
253
+    .attach        = carveout_heap_attach,
254
+    .detach        = carveout_heap_detach,
254
+    .detach        = carveout_heap_detach,
255
+    .map_dma_buf    = carveout_heap_map_dma_buf,
255
+    .map_dma_buf    = carveout_heap_map_dma_buf,
256
+    .unmap_dma_buf    = carveout_heap_unmap_dma_buf,
256
+    .unmap_dma_buf    = carveout_heap_unmap_dma_buf,
257
+    .begin_cpu_access    = carveout_heap_dma_buf_begin_cpu_access,
257
+    .begin_cpu_access    = carveout_heap_dma_buf_begin_cpu_access,
258
+    .end_cpu_access    = carveout_heap_dma_buf_end_cpu_access,
258
+    .end_cpu_access    = carveout_heap_dma_buf_end_cpu_access,
259
+    .mmap        = carveout_heap_mmap,
259
+    .mmap        = carveout_heap_mmap,
260
+    .vmap        = carveout_heap_vmap,
260
+    .vmap        = carveout_heap_vmap,
261
+    .vunmap        = carveout_heap_vunmap,
261
+    .vunmap        = carveout_heap_vunmap,
262
+    .release    = carveout_heap_dma_buf_release,
262
+    .release    = carveout_heap_dma_buf_release,
263
+};
263
+};
264
+
264
+
265
+static struct dma_buf *carveout_heap_allocate(struct dma_heap *heap,
265
+static struct dma_buf *carveout_heap_allocate(struct dma_heap *heap,
266
+                     unsigned long len,
266
+                     unsigned long len,
267
+                     u32 fd_flags,
267
+                     u32 fd_flags,
268
+                     u64 heap_flags)
268
+                     u64 heap_flags)
269
+{
269
+{
270
+    struct carveout_heap_priv *heap_priv = dma_heap_get_drvdata(heap);
270
+    struct carveout_heap_priv *heap_priv = dma_heap_get_drvdata(heap);
271
+    struct carveout_heap_buffer_priv *buffer_priv;
271
+    struct carveout_heap_buffer_priv *buffer_priv;
272
+    DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
272
+    DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
273
+    struct dma_buf *buf;
273
+    struct dma_buf *buf;
274
+    dma_addr_t daddr;
274
+    dma_addr_t daddr;
275
+    size_t size = PAGE_ALIGN(len);
275
+    size_t size = PAGE_ALIGN(len);
276
+    void *vaddr;
276
+    void *vaddr;
277
+    int ret;
277
+    int ret;
278
+
278
+
279
+    buffer_priv = kzalloc(sizeof(*buffer_priv), GFP_KERNEL);
279
+    buffer_priv = kzalloc(sizeof(*buffer_priv), GFP_KERNEL);
280
+    if (!buffer_priv)
280
+    if (!buffer_priv)
281
+        return ERR_PTR(-ENOMEM);
281
+        return ERR_PTR(-ENOMEM);
282
+
282
+
283
+    INIT_LIST_HEAD(&buffer_priv->attachments);
283
+    INIT_LIST_HEAD(&buffer_priv->attachments);
284
+    mutex_init(&buffer_priv->lock);
284
+    mutex_init(&buffer_priv->lock);
285
+
285
+
286
+    vaddr = gen_pool_dma_zalloc(heap_priv->pool, size, &daddr);
286
+    vaddr = gen_pool_dma_zalloc(heap_priv->pool, size, &daddr);
287
+    if (!vaddr) {
287
+    if (!vaddr) {
288
+        ret = -ENOMEM;
288
+        ret = -ENOMEM;
289
+        goto err_free_buffer_priv;
289
+        goto err_free_buffer_priv;
290
+    }
290
+    }
291
+
291
+
292
+    buffer_priv->vaddr = vaddr;
292
+    buffer_priv->vaddr = vaddr;
293
+    buffer_priv->daddr = daddr;
293
+    buffer_priv->daddr = daddr;
294
+    buffer_priv->heap = heap_priv;
294
+    buffer_priv->heap = heap_priv;
295
+    buffer_priv->num_pages = size >> PAGE_SHIFT;
295
+    buffer_priv->num_pages = size >> PAGE_SHIFT;
296
+
296
+
297
+    /* create the dmabuf */
297
+    /* create the dmabuf */
298
+    exp_info.exp_name = dma_heap_get_name(heap);
298
+    exp_info.exp_name = dma_heap_get_name(heap);
299
+    exp_info.ops = &carveout_heap_buf_ops;
299
+    exp_info.ops = &carveout_heap_buf_ops;
300
+    exp_info.size = size;
300
+    exp_info.size = size;
301
+    exp_info.flags = fd_flags;
301
+    exp_info.flags = fd_flags;
302
+    exp_info.priv = buffer_priv;
302
+    exp_info.priv = buffer_priv;
303
+
303
+
304
+    buf = dma_buf_export(&exp_info);
304
+    buf = dma_buf_export(&exp_info);
305
+    if (IS_ERR(buf)) {
305
+    if (IS_ERR(buf)) {
306
+        ret = PTR_ERR(buf);
306
+        ret = PTR_ERR(buf);
307
+        goto err_free_buffer;
307
+        goto err_free_buffer;
308
+    }
308
+    }
309
+
309
+
310
+    return buf;
310
+    return buf;
311
+
311
+
312
+err_free_buffer:
312
+err_free_buffer:
313
+    gen_pool_free(heap_priv->pool, (unsigned long)vaddr, len);
313
+    gen_pool_free(heap_priv->pool, (unsigned long)vaddr, len);
314
+err_free_buffer_priv:
314
+err_free_buffer_priv:
315
+    kfree(buffer_priv);
315
+    kfree(buffer_priv);
316
+
316
+
317
+    return ERR_PTR(ret);
317
+    return ERR_PTR(ret);
318
+}
318
+}
319
+
319
+
320
+static const struct dma_heap_ops carveout_heap_ops = {
320
+static const struct dma_heap_ops carveout_heap_ops = {
321
+    .allocate = carveout_heap_allocate,
321
+    .allocate = carveout_heap_allocate,
322
+};
322
+};
323
+
323
+
324
+static int __init carveout_heap_setup(struct device_node *node)
324
+static int __init carveout_heap_setup(struct device_node *node)
325
+{
325
+{
326
+    struct dma_heap_export_info exp_info = {};
326
+    struct dma_heap_export_info exp_info = {};
327
+    const struct reserved_mem *rmem;
327
+    const struct reserved_mem *rmem;
328
+    struct carveout_heap_priv *priv;
328
+    struct carveout_heap_priv *priv;
329
+    struct dma_heap *heap;
329
+    struct dma_heap *heap;
330
+    struct gen_pool *pool;
330
+    struct gen_pool *pool;
331
+    void *base;
331
+    void *base;
332
+    int ret;
332
+    int ret;
333
+
333
+
334
+    rmem = of_reserved_mem_lookup(node);
334
+    rmem = of_reserved_mem_lookup(node);
335
+    if (!rmem)
335
+    if (!rmem)
336
+        return -EINVAL;
336
+        return -EINVAL;
337
+
337
+
338
+    priv = kzalloc(sizeof(*priv), GFP_KERNEL);
338
+    priv = kzalloc(sizeof(*priv), GFP_KERNEL);
339
+    if (!priv)
339
+    if (!priv)
340
+        return -ENOMEM;
340
+        return -ENOMEM;
341
+
341
+
342
+    pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
342
+    pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
343
+    if (!pool) {
343
+    if (!pool) {
344
+        ret = -ENOMEM;
344
+        ret = -ENOMEM;
345
+        goto err_cleanup_heap;
345
+        goto err_cleanup_heap;
346
+    }
346
+    }
347
+    priv->pool = pool;
347
+    priv->pool = pool;
348
+
348
+
349
+    base = memremap(rmem->base, rmem->size, MEMREMAP_WB);
349
+    base = memremap(rmem->base, rmem->size, MEMREMAP_WB);
350
+    if (!base) {
350
+    if (!base) {
351
+        ret = -ENOMEM;
351
+        ret = -ENOMEM;
352
+        goto err_release_mem_region;
352
+        goto err_release_mem_region;
353
+    }
353
+    }
354
+
354
+
355
+    ret = gen_pool_add_virt(pool, (unsigned long)base, rmem->base,
355
+    ret = gen_pool_add_virt(pool, (unsigned long)base, rmem->base,
356
+                rmem->size, NUMA_NO_NODE);
356
+                rmem->size, NUMA_NO_NODE);
357
+    if (ret)
357
+    if (ret)
358
+        goto err_unmap;
358
+        goto err_unmap;
359
+
359
+
360
+    exp_info.name = node->full_name;
360
+    exp_info.name = node->full_name;
361
+    exp_info.ops = &carveout_heap_ops;
361
+    exp_info.ops = &carveout_heap_ops;
362
+    exp_info.priv = priv;
362
+    exp_info.priv = priv;
363
+
363
+
364
+    heap = dma_heap_add(&exp_info);
364
+    heap = dma_heap_add(&exp_info);
365
+    if (IS_ERR(heap)) {
365
+    if (IS_ERR(heap)) {
366
+        ret = PTR_ERR(heap);
366
+        ret = PTR_ERR(heap);
367
+        goto err_cleanup_pool_region;
367
+        goto err_cleanup_pool_region;
368
+    }
368
+    }
369
+    priv->heap = heap;
369
+    priv->heap = heap;
370
+
370
+
371
+    return 0;
371
+    return 0;
372
+
372
+
373
+err_cleanup_pool_region:
373
+err_cleanup_pool_region:
374
+    gen_pool_free(pool, (unsigned long)base, rmem->size);
374
+    gen_pool_free(pool, (unsigned long)base, rmem->size);
375
+err_unmap:
375
+err_unmap:
376
+    memunmap(base);
376
+    memunmap(base);
377
+err_release_mem_region:
377
+err_release_mem_region:
378
+    gen_pool_destroy(pool);
378
+    gen_pool_destroy(pool);
379
+err_cleanup_heap:
379
+err_cleanup_heap:
380
+    kfree(priv);
380
+    kfree(priv);
381
+    return ret;
381
+    return ret;
382
+}
382
+}
383
+
383
+
384
+static int __init carveout_heap_init(void)
384
+static int __init carveout_heap_init(void)
385
+{
385
+{
386
+    struct device_node *rmem_node;
386
+    struct device_node *rmem_node;
387
+    struct device_node *node;
387
+    struct device_node *node;
388
+    int ret;
388
+    int ret;
389
+
389
+
390
+    rmem_node = of_find_node_by_path("/reserved-memory");
390
+    rmem_node = of_find_node_by_path("/reserved-memory");
391
+    if (!rmem_node)
391
+    if (!rmem_node)
392
+        return 0;
392
+        return 0;
393
+
393
+
394
+    for_each_child_of_node(rmem_node, node) {
394
+    for_each_child_of_node(rmem_node, node) {
395
+        if (!of_property_read_bool(node, "export"))
395
+        if (!of_property_read_bool(node, "export"))
396
+            continue;
396
+            continue;
397
+
397
+
398
+        ret = carveout_heap_setup(node);
398
+        ret = carveout_heap_setup(node);
399
+        if (ret)
399
+        if (ret)
400
+            return ret;
400
+            return ret;
401
+    }
401
+    }
402
+
402
+
403
+    return 0;
403
+    return 0;
404
+}
404
+}
405
+
405
+
406
+module_init(carveout_heap_init);
406
+module_init(carveout_heap_init);
407
407
408
--
408
--
409
2.49.0
409
2.49.0
diff view generated by jsdifflib