1
From: Alexei Starovoitov <ast@kernel.org>
1
From: Alexei Starovoitov <ast@kernel.org>
2
2
3
Partially revert commit 0aaddfb06882 ("locking/local_lock: Introduce localtry_lock_t").
3
Partially revert commit 0aaddfb06882 ("locking/local_lock: Introduce localtry_lock_t").
4
Remove localtry_*() helpers, since localtry_lock() name might
4
Remove localtry_*() helpers, since localtry_lock() name might
5
be misinterpreted as "try lock".
5
be misinterpreted as "try lock".
6
6
7
Introduce local_trylock_irqsave() helper that only works
7
Introduce local_trylock[_irqsave]() helpers that only work
8
with newly introduced local_trylock_t type.
8
with newly introduced local_trylock_t type.
9
Note that attempt to use local_trylock_irqsave() with local_lock_t
9
Note that attempt to use local_trylock[_irqsave]() with local_lock_t
10
will cause compilation failure.
10
will cause compilation failure.
11
11
12
Usage and behavior in !PREEMPT_RT:
12
Usage and behavior in !PREEMPT_RT:
13
13
14
local_lock_t lock; // sizeof(lock) == 0
14
local_lock_t lock; // sizeof(lock) == 0
15
local_lock(&lock); // preempt disable
15
local_lock_irqsave(&lock, ...); // irq save
16
local_lock_irqsave(&lock, ...); // irq save
16
if (local_trylock_irqsave(&lock, ...)) // compilation error
17
if (local_trylock_irqsave(&lock, ...)) // compilation error
17
18
18
local_trylock_t lock; // sizeof(lock) == 4
19
local_trylock_t lock; // sizeof(lock) == 4
19
local_lock_irqsave(&lock, ...); // irq save and acquired = 1
20
local_lock(&lock); // preempt disable, acquired = 1
21
local_lock_irqsave(&lock, ...); // irq save, acquired = 1
22
if (local_trylock(&lock)) // if (!acquired) preempt disable
20
if (local_trylock_irqsave(&lock, ...)) // if (!acquired) irq save
23
if (local_trylock_irqsave(&lock, ...)) // if (!acquired) irq save
21
24
22
The existing local_lock_*() macros can be used either with
25
The existing local_lock_*() macros can be used either with
23
local_lock_t or local_trylock_t.
26
local_lock_t or local_trylock_t.
24
With local_trylock_t they set acquired = 1 while local_unlock_*() clears it.
27
With local_trylock_t they set acquired = 1 while local_unlock_*() clears it.
...
...
37
When in hard IRQ or NMI return false right away, since
40
When in hard IRQ or NMI return false right away, since
38
spin_trylock() is not safe due to explicit locking in the underneath
41
spin_trylock() is not safe due to explicit locking in the underneath
39
rt_spin_trylock() implementation. Removing this explicit locking and
42
rt_spin_trylock() implementation. Removing this explicit locking and
40
attempting only "trylock" is undesired due to PI implications.
43
attempting only "trylock" is undesired due to PI implications.
41
44
45
The local_trylock() without _irqsave can be used to avoid the cost of
46
disabling/enabling interrupts by only disabling preemption, so
47
local_trylock() in an interrupt attempting to acquire the same
48
lock will return false.
49
42
Note there is no need to use local_inc for acquired variable,
50
Note there is no need to use local_inc for acquired variable,
43
since it's a percpu variable with strict nesting scopes.
51
since it's a percpu variable with strict nesting scopes.
44
52
53
Acked-by: Vlastimil Babka <vbabka@suse.cz>
45
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
54
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
46
---
55
---
47
include/linux/local_lock.h | 58 +--------
56
include/linux/local_lock.h | 59 +-------
48
include/linux/local_lock_internal.h | 193 ++++++++++------------------
57
include/linux/local_lock_internal.h | 208 ++++++++++++----------------
49
mm/memcontrol.c | 39 +++---
58
mm/memcontrol.c | 39 +++---
50
3 files changed, 95 insertions(+), 195 deletions(-)
59
3 files changed, 111 insertions(+), 195 deletions(-)
51
60
52
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
61
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
53
index XXXXXXX..XXXXXXX 100644
62
index XXXXXXX..XXXXXXX 100644
54
--- a/include/linux/local_lock.h
63
--- a/include/linux/local_lock.h
55
+++ b/include/linux/local_lock.h
64
+++ b/include/linux/local_lock.h
...
...
74
-#define localtry_lock_irq(lock)        __localtry_lock_irq(lock)
83
-#define localtry_lock_irq(lock)        __localtry_lock_irq(lock)
75
-
84
-
76
-/**
85
-/**
77
- * localtry_lock_irqsave - Acquire a per CPU local lock, save and disable
86
- * localtry_lock_irqsave - Acquire a per CPU local lock, save and disable
78
- *             interrupts
87
- *             interrupts
79
+ * local_trylock_irqsave - Try to acquire a per CPU local lock
88
- * @lock:    The lock variable
80
* @lock:    The lock variable
89
- * @flags:    Storage for interrupt flags
81
* @flags:    Storage for interrupt flags
82
- */
90
- */
83
-#define localtry_lock_irqsave(lock, flags)                \
91
-#define localtry_lock_irqsave(lock, flags)                \
84
-    __localtry_lock_irqsave(lock, flags)
92
-    __localtry_lock_irqsave(lock, flags)
85
-
93
-
86
-/**
94
-/**
87
- * localtry_trylock - Try to acquire a per CPU local lock.
95
- * localtry_trylock - Try to acquire a per CPU local lock.
88
- * @lock:    The lock variable
96
+ * local_trylock - Try to acquire a per CPU local lock
97
* @lock:    The lock variable
89
*
98
*
90
* The function can be used in any context such as NMI or HARDIRQ. Due to
99
* The function can be used in any context such as NMI or HARDIRQ. Due to
91
* locking constrains it will _always_ fail to acquire the lock in NMI or
100
* locking constrains it will _always_ fail to acquire the lock in NMI or
92
* HARDIRQ context on PREEMPT_RT.
101
* HARDIRQ context on PREEMPT_RT.
93
*/
102
*/
94
-#define localtry_trylock(lock)        __localtry_trylock(lock)
103
-#define localtry_trylock(lock)        __localtry_trylock(lock)
95
+#define local_trylock(lock, flags)    __local_trylock(lock, flags)
104
+#define local_trylock(lock)        __local_trylock(lock)
96
105
97
/**
106
/**
98
- * localtry_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
107
- * localtry_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
99
- *             interrupts if acquired
108
- *             interrupts if acquired
100
+ * local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
109
+ * local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
...
...
151
+    struct lockdep_map    dep_map;
160
+    struct lockdep_map    dep_map;
152
+    struct task_struct    *owner;
161
+    struct task_struct    *owner;
153
+#endif
162
+#endif
154
+    /*
163
+    /*
155
+     * Same layout as local_lock_t with 'acquired' field at the end.
164
+     * Same layout as local_lock_t with 'acquired' field at the end.
156
+     * (local_trylock_t *) will be casted to (local_lock_t *).
165
+     * (local_trylock_t *) will be cast to (local_lock_t *).
157
+     */
166
+     */
158
+    int acquired;
167
+    int acquired;
159
+} local_trylock_t;
168
+} local_trylock_t;
160
169
161
#ifdef CONFIG_DEBUG_LOCK_ALLOC
170
#ifdef CONFIG_DEBUG_LOCK_ALLOC
...
...
167
-#define INIT_LOCALTRY_LOCK(lockname)    { .llock = { LOCAL_LOCK_DEBUG_INIT(lockname.llock) }}
176
-#define INIT_LOCALTRY_LOCK(lockname)    { .llock = { LOCAL_LOCK_DEBUG_INIT(lockname.llock) }}
168
177
169
#define __local_lock_init(lock)                    \
178
#define __local_lock_init(lock)                    \
170
do {                                \
179
do {                                \
171
@@ -XXX,XX +XXX,XX @@ do {                                \
180
@@ -XXX,XX +XXX,XX @@ do {                                \
181
    lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
182
             0, LD_WAIT_CONFIG, LD_WAIT_INV,    \
183
             LD_LOCK_PERCPU);            \
184
-    local_lock_debug_init(lock);                \
185
+    local_lock_debug_init((local_lock_t *)lock);        \
186
} while (0)
187
188
#define __spinlock_nested_bh_init(lock)                \
189
@@ -XXX,XX +XXX,XX @@ do {                                \
172
    local_lock_debug_init(lock);                \
190
    local_lock_debug_init(lock);                \
173
} while (0)
191
} while (0)
174
192
175
+#define __local_lock_acquire(lock)                    \
193
+#define __local_lock_acquire(lock)                    \
176
+    do {                                \
194
+    do {                                \
...
...
204
222
205
#define __local_lock_irqsave(lock, flags)            \
223
#define __local_lock_irqsave(lock, flags)            \
206
    do {                            \
224
    do {                            \
207
        local_irq_save(flags);                \
225
        local_irq_save(flags);                \
208
-        local_lock_acquire(this_cpu_ptr(lock));        \
226
-        local_lock_acquire(this_cpu_ptr(lock));        \
227
-    } while (0)
228
-
229
-#define __local_unlock(lock)                    \
230
-    do {                            \
231
-        local_lock_release(this_cpu_ptr(lock));        \
232
-        preempt_enable();                \
233
-    } while (0)
234
-
235
-#define __local_unlock_irq(lock)                \
236
-    do {                            \
237
-        local_lock_release(this_cpu_ptr(lock));        \
238
-        local_irq_enable();                \
209
+        __local_lock_acquire(lock);            \
239
+        __local_lock_acquire(lock);            \
210
+    } while (0)
240
    } while (0)
211
+
241
212
+#define __local_trylock_irqsave(lock, flags)            \
242
-#define __local_unlock_irqrestore(lock, flags)            \
213
+    ({                            \
243
-    do {                            \
244
-        local_lock_release(this_cpu_ptr(lock));        \
245
-        local_irq_restore(flags);            \
246
-    } while (0)
247
-
248
-#define __local_lock_nested_bh(lock)                \
249
-    do {                            \
250
-        lockdep_assert_in_softirq();            \
251
-        local_lock_acquire(this_cpu_ptr(lock));    \
252
-    } while (0)
253
-
254
-#define __local_unlock_nested_bh(lock)                \
255
-    local_lock_release(this_cpu_ptr(lock))
256
-
257
-/* localtry_lock_t variants */
258
-
259
-#define __localtry_lock_init(lock)                \
260
-do {                                \
261
-    __local_lock_init(&(lock)->llock);            \
262
-    WRITE_ONCE((lock)->acquired, 0);            \
263
-} while (0)
264
-
265
-#define __localtry_lock(lock)                    \
266
-    do {                            \
267
-        localtry_lock_t *lt;                \
268
-        preempt_disable();                \
269
-        lt = this_cpu_ptr(lock);            \
270
-        local_lock_acquire(&lt->llock);            \
271
-        WRITE_ONCE(lt->acquired, 1);            \
272
-    } while (0)
273
-
274
-#define __localtry_lock_irq(lock)                \
275
-    do {                            \
276
-        localtry_lock_t *lt;                \
277
-        local_irq_disable();                \
278
-        lt = this_cpu_ptr(lock);            \
279
-        local_lock_acquire(&lt->llock);            \
280
-        WRITE_ONCE(lt->acquired, 1);            \
281
-    } while (0)
282
-
283
-#define __localtry_lock_irqsave(lock, flags)            \
284
-    do {                            \
285
-        localtry_lock_t *lt;                \
286
-        local_irq_save(flags);                \
287
-        lt = this_cpu_ptr(lock);            \
288
-        local_lock_acquire(&lt->llock);            \
289
-        WRITE_ONCE(lt->acquired, 1);            \
290
-    } while (0)
291
-
292
-#define __localtry_trylock(lock)                \
293
+#define __local_trylock(lock)                    \
294
    ({                            \
295
-        localtry_lock_t *lt;                \
296
-        bool _ret;                    \
214
+        local_trylock_t *tl;                \
297
+        local_trylock_t *tl;                \
215
+                                \
298
                                \
216
+        local_irq_save(flags);                \
299
        preempt_disable();                \
300
-        lt = this_cpu_ptr(lock);            \
301
-        if (!READ_ONCE(lt->acquired)) {            \
302
-            WRITE_ONCE(lt->acquired, 1);        \
303
-            local_trylock_acquire(&lt->llock);    \
304
-            _ret = true;                \
305
-        } else {                    \
306
-            _ret = false;                \
217
+        tl = this_cpu_ptr(lock);            \
307
+        tl = this_cpu_ptr(lock);            \
218
+        if (READ_ONCE(tl->acquired) == 1) {        \
308
+        if (READ_ONCE(tl->acquired)) {            \
219
+            local_irq_restore(flags);        \
309
            preempt_enable();            \
220
+            tl = NULL;                \
310
+            tl = NULL;                \
221
+        } else {                    \
311
+        } else {                    \
222
+            WRITE_ONCE(tl->acquired, 1);        \
312
+            WRITE_ONCE(tl->acquired, 1);        \
223
+            local_trylock_acquire(            \
313
+            local_trylock_acquire(            \
224
+                (local_lock_t *)tl);        \
314
+                (local_lock_t *)tl);        \
225
+        }                        \
315
        }                        \
316
-        _ret;                        \
226
+        !!tl;                        \
317
+        !!tl;                        \
227
+    })
318
    })
228
+
319
320
-#define __localtry_trylock_irqsave(lock, flags)            \
321
+#define __local_trylock_irqsave(lock, flags)            \
322
    ({                            \
323
-        localtry_lock_t *lt;                \
324
-        bool _ret;                    \
325
+        local_trylock_t *tl;                \
326
                                \
327
        local_irq_save(flags);                \
328
-        lt = this_cpu_ptr(lock);            \
329
-        if (!READ_ONCE(lt->acquired)) {            \
330
-            WRITE_ONCE(lt->acquired, 1);        \
331
-            local_trylock_acquire(&lt->llock);    \
332
-            _ret = true;                \
333
-        } else {                    \
334
-            _ret = false;                \
335
+        tl = this_cpu_ptr(lock);            \
336
+        if (READ_ONCE(tl->acquired)) {            \
337
            local_irq_restore(flags);        \
338
+            tl = NULL;                \
339
+        } else {                    \
340
+            WRITE_ONCE(tl->acquired, 1);        \
341
+            local_trylock_acquire(            \
342
+                (local_lock_t *)tl);        \
343
        }                        \
344
-        _ret;                        \
345
+        !!tl;                        \
346
    })
347
348
-#define __localtry_unlock(lock)                    \
229
+#define __local_lock_release(lock)                    \
349
+#define __local_lock_release(lock)                    \
230
+    do {                                \
350
+    do {                                \
231
+        local_trylock_t *tl;                    \
351
+        local_trylock_t *tl;                    \
232
+        local_lock_t *l;                    \
352
+        local_lock_t *l;                    \
233
+                                    \
353
+                                    \
...
...
238
+                lockdep_assert(tl->acquired == 1);    \
358
+                lockdep_assert(tl->acquired == 1);    \
239
+                WRITE_ONCE(tl->acquired, 0);        \
359
+                WRITE_ONCE(tl->acquired, 0);        \
240
+            }),                        \
360
+            }),                        \
241
+            default:(void)0);                \
361
+            default:(void)0);                \
242
+        local_lock_release(l);                    \
362
+        local_lock_release(l);                    \
243
    } while (0)
363
+    } while (0)
244
364
+
245
#define __local_unlock(lock)                    \
365
+#define __local_unlock(lock)                    \
246
    do {                            \
366
    do {                            \
247
-        local_lock_release(this_cpu_ptr(lock));        \
367
-        localtry_lock_t *lt;                \
368
-        lt = this_cpu_ptr(lock);            \
369
-        WRITE_ONCE(lt->acquired, 0);            \
370
-        local_lock_release(&lt->llock);            \
248
+        __local_lock_release(lock);            \
371
+        __local_lock_release(lock);            \
249
        preempt_enable();                \
372
        preempt_enable();                \
250
    } while (0)
373
    } while (0)
251
374
252
#define __local_unlock_irq(lock)                \
375
-#define __localtry_unlock_irq(lock)                \
376
+#define __local_unlock_irq(lock)                \
253
    do {                            \
377
    do {                            \
254
-        local_lock_release(this_cpu_ptr(lock));        \
378
-        localtry_lock_t *lt;                \
379
-        lt = this_cpu_ptr(lock);            \
380
-        WRITE_ONCE(lt->acquired, 0);            \
381
-        local_lock_release(&lt->llock);            \
255
+        __local_lock_release(lock);            \
382
+        __local_lock_release(lock);            \
256
        local_irq_enable();                \
383
        local_irq_enable();                \
257
    } while (0)
384
    } while (0)
258
385
259
#define __local_unlock_irqrestore(lock, flags)            \
386
-#define __localtry_unlock_irqrestore(lock, flags)        \
387
+#define __local_unlock_irqrestore(lock, flags)            \
260
    do {                            \
388
    do {                            \
261
-        local_lock_release(this_cpu_ptr(lock));        \
389
-        localtry_lock_t *lt;                \
390
-        lt = this_cpu_ptr(lock);            \
391
-        WRITE_ONCE(lt->acquired, 0);            \
392
-        local_lock_release(&lt->llock);            \
262
+        __local_lock_release(lock);            \
393
+        __local_lock_release(lock);            \
263
        local_irq_restore(flags);            \
394
        local_irq_restore(flags);            \
264
    } while (0)
395
    } while (0)
265
396
266
@@ -XXX,XX +XXX,XX @@ do {                                \
397
+#define __local_lock_nested_bh(lock)                \
267
#define __local_unlock_nested_bh(lock)                \
398
+    do {                            \
268
    local_lock_release(this_cpu_ptr(lock))
399
+        lockdep_assert_in_softirq();            \
269
400
+        local_lock_acquire(this_cpu_ptr(lock));    \
270
-/* localtry_lock_t variants */
401
+    } while (0)
271
-
402
+
272
-#define __localtry_lock_init(lock)                \
403
+#define __local_unlock_nested_bh(lock)                \
273
-do {                                \
404
+    local_lock_release(this_cpu_ptr(lock))
274
-    __local_lock_init(&(lock)->llock);            \
405
+
275
-    WRITE_ONCE((lock)->acquired, 0);            \
276
-} while (0)
277
-
278
-#define __localtry_lock(lock)                    \
279
-    do {                            \
280
-        localtry_lock_t *lt;                \
281
-        preempt_disable();                \
282
-        lt = this_cpu_ptr(lock);            \
283
-        local_lock_acquire(&lt->llock);            \
284
-        WRITE_ONCE(lt->acquired, 1);            \
285
-    } while (0)
286
-
287
-#define __localtry_lock_irq(lock)                \
288
-    do {                            \
289
-        localtry_lock_t *lt;                \
290
-        local_irq_disable();                \
291
-        lt = this_cpu_ptr(lock);            \
292
-        local_lock_acquire(&lt->llock);            \
293
-        WRITE_ONCE(lt->acquired, 1);            \
294
-    } while (0)
295
-
296
-#define __localtry_lock_irqsave(lock, flags)            \
297
-    do {                            \
298
-        localtry_lock_t *lt;                \
299
-        local_irq_save(flags);                \
300
-        lt = this_cpu_ptr(lock);            \
301
-        local_lock_acquire(&lt->llock);            \
302
-        WRITE_ONCE(lt->acquired, 1);            \
303
-    } while (0)
304
-
305
-#define __localtry_trylock(lock)                \
306
-    ({                            \
307
-        localtry_lock_t *lt;                \
308
-        bool _ret;                    \
309
-                                \
310
-        preempt_disable();                \
311
-        lt = this_cpu_ptr(lock);            \
312
-        if (!READ_ONCE(lt->acquired)) {            \
313
-            WRITE_ONCE(lt->acquired, 1);        \
314
-            local_trylock_acquire(&lt->llock);    \
315
-            _ret = true;                \
316
-        } else {                    \
317
-            _ret = false;                \
318
-            preempt_enable();            \
319
-        }                        \
320
-        _ret;                        \
321
-    })
322
-
323
-#define __localtry_trylock_irqsave(lock, flags)            \
324
-    ({                            \
325
-        localtry_lock_t *lt;                \
326
-        bool _ret;                    \
327
-                                \
328
-        local_irq_save(flags);                \
329
-        lt = this_cpu_ptr(lock);            \
330
-        if (!READ_ONCE(lt->acquired)) {            \
331
-            WRITE_ONCE(lt->acquired, 1);        \
332
-            local_trylock_acquire(&lt->llock);    \
333
-            _ret = true;                \
334
-        } else {                    \
335
-            _ret = false;                \
336
-            local_irq_restore(flags);        \
337
-        }                        \
338
-        _ret;                        \
339
-    })
340
-
341
-#define __localtry_unlock(lock)                    \
342
-    do {                            \
343
-        localtry_lock_t *lt;                \
344
-        lt = this_cpu_ptr(lock);            \
345
-        WRITE_ONCE(lt->acquired, 0);            \
346
-        local_lock_release(&lt->llock);            \
347
-        preempt_enable();                \
348
-    } while (0)
349
-
350
-#define __localtry_unlock_irq(lock)                \
351
-    do {                            \
352
-        localtry_lock_t *lt;                \
353
-        lt = this_cpu_ptr(lock);            \
354
-        WRITE_ONCE(lt->acquired, 0);            \
355
-        local_lock_release(&lt->llock);            \
356
-        local_irq_enable();                \
357
-    } while (0)
358
-
359
-#define __localtry_unlock_irqrestore(lock, flags)        \
360
-    do {                            \
361
-        localtry_lock_t *lt;                \
362
-        lt = this_cpu_ptr(lock);            \
363
-        WRITE_ONCE(lt->acquired, 0);            \
364
-        local_lock_release(&lt->llock);            \
365
-        local_irq_restore(flags);            \
366
-    } while (0)
367
-
368
#else /* !CONFIG_PREEMPT_RT */
406
#else /* !CONFIG_PREEMPT_RT */
369
407
370
/*
408
/*
371
@@ -XXX,XX +XXX,XX @@ do {                                \
409
@@ -XXX,XX +XXX,XX @@ do {                                \
372
* critical section while staying preemptible.
410
* critical section while staying preemptible.
...
...
diff view generated by jsdifflib