1 | From: Alexei Starovoitov <ast@kernel.org> | 1 | From: Alexei Starovoitov <ast@kernel.org> |
---|---|---|---|
2 | 2 | ||
3 | Partially revert commit 0aaddfb06882 ("locking/local_lock: Introduce localtry_lock_t"). | 3 | Partially revert commit 0aaddfb06882 ("locking/local_lock: Introduce localtry_lock_t"). |
4 | Remove localtry_*() helpers, since localtry_lock() name might | 4 | Remove localtry_*() helpers, since localtry_lock() name might |
5 | be misinterpreted as "try lock". | 5 | be misinterpreted as "try lock". |
6 | 6 | ||
7 | Introduce local_trylock_irqsave() helper that only works | 7 | Introduce local_trylock[_irqsave]() helpers that only work |
8 | with newly introduced local_trylock_t type. | 8 | with newly introduced local_trylock_t type. |
9 | Note that attempt to use local_trylock_irqsave() with local_lock_t | 9 | Note that attempt to use local_trylock[_irqsave]() with local_lock_t |
10 | will cause compilation failure. | 10 | will cause compilation failure. |
11 | 11 | ||
12 | Usage and behavior in !PREEMPT_RT: | 12 | Usage and behavior in !PREEMPT_RT: |
13 | 13 | ||
14 | local_lock_t lock; // sizeof(lock) == 0 | 14 | local_lock_t lock; // sizeof(lock) == 0 |
15 | local_lock(&lock); // preempt disable | ||
15 | local_lock_irqsave(&lock, ...); // irq save | 16 | local_lock_irqsave(&lock, ...); // irq save |
16 | if (local_trylock_irqsave(&lock, ...)) // compilation error | 17 | if (local_trylock_irqsave(&lock, ...)) // compilation error |
17 | 18 | ||
18 | local_trylock_t lock; // sizeof(lock) == 4 | 19 | local_trylock_t lock; // sizeof(lock) == 4 |
19 | local_lock_irqsave(&lock, ...); // irq save and acquired = 1 | 20 | local_lock(&lock); // preempt disable, acquired = 1 |
20 | if (local_trylock_irqsave(&lock, ...)) // if (!acquired) irq save | 21 | local_lock_irqsave(&lock, ...); // irq save, acquired = 1 |
22 | if (local_trylock(&lock)) // if (!acquired) preempt disable, acquired = 1 | ||
23 | if (local_trylock_irqsave(&lock, ...)) // if (!acquired) irq save, acquired = 1 | ||
21 | 24 | ||
22 | The existing local_lock_*() macros can be used either with | 25 | The existing local_lock_*() macros can be used either with |
23 | local_lock_t or local_trylock_t. | 26 | local_lock_t or local_trylock_t. |
24 | With local_trylock_t they set acquired = 1 while local_unlock_*() clears it. | 27 | With local_trylock_t they set acquired = 1 while local_unlock_*() clears it. |
25 | 28 | ||
... | ... | ||
37 | When in hard IRQ or NMI return false right away, since | 40 | When in hard IRQ or NMI return false right away, since |
38 | spin_trylock() is not safe due to explicit locking in the underneath | 41 | spin_trylock() is not safe due to explicit locking in the underneath |
39 | rt_spin_trylock() implementation. Removing this explicit locking and | 42 | rt_spin_trylock() implementation. Removing this explicit locking and |
40 | attempting only "trylock" is undesired due to PI implications. | 43 | attempting only "trylock" is undesired due to PI implications. |
41 | 44 | ||
45 | The local_trylock() without _irqsave can be used to avoid the cost of | ||
46 | disabling/enabling interrupts by only disabling preemption, so | ||
47 | local_trylock() in an interrupt attempting to acquire the same | ||
48 | lock will return false. | ||
49 | |||
42 | Note there is no need to use local_inc for acquired variable, | 50 | Note there is no need to use local_inc for acquired variable, |
43 | since it's a percpu variable with strict nesting scopes. | 51 | since it's a percpu variable with strict nesting scopes. |
44 | 52 | ||
53 | Note that guard(local_lock)(&lock) works only for "local_lock_t lock". | ||
54 | |||
55 | The patch also makes sure that local_lock_release(l) is called before | ||
56 | WRITE_ONCE(l->acquired, 0). Though IRQs are disabled at this point | ||
57 | the local_trylock() from NMI will succeed and local_lock_acquire(l) | ||
58 | will warn. | ||
59 | |||
60 | Acked-by: Vlastimil Babka <vbabka@suse.cz> | ||
61 | Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> | ||
62 | Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev> | ||
63 | Fixes: 0aaddfb06882 ("locking/local_lock: Introduce localtry_lock_t") | ||
45 | Signed-off-by: Alexei Starovoitov <ast@kernel.org> | 64 | Signed-off-by: Alexei Starovoitov <ast@kernel.org> |
46 | --- | 65 | --- |
47 | include/linux/local_lock.h | 58 +-------- | 66 | include/linux/local_lock.h | 58 ++------ |
48 | include/linux/local_lock_internal.h | 193 ++++++++++------------------ | 67 | include/linux/local_lock_internal.h | 207 ++++++++++++---------------- |
49 | mm/memcontrol.c | 39 +++--- | 68 | mm/memcontrol.c | 39 +++--- |
50 | 3 files changed, 95 insertions(+), 195 deletions(-) | 69 | 3 files changed, 114 insertions(+), 190 deletions(-) |
51 | 70 | ||
52 | diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h | 71 | diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h |
53 | index XXXXXXX..XXXXXXX 100644 | 72 | index XXXXXXX..XXXXXXX 100644 |
54 | --- a/include/linux/local_lock.h | 73 | --- a/include/linux/local_lock.h |
55 | +++ b/include/linux/local_lock.h | 74 | +++ b/include/linux/local_lock.h |
... | ... | ||
74 | -#define localtry_lock_irq(lock) __localtry_lock_irq(lock) | 93 | -#define localtry_lock_irq(lock) __localtry_lock_irq(lock) |
75 | - | 94 | - |
76 | -/** | 95 | -/** |
77 | - * localtry_lock_irqsave - Acquire a per CPU local lock, save and disable | 96 | - * localtry_lock_irqsave - Acquire a per CPU local lock, save and disable |
78 | - * interrupts | 97 | - * interrupts |
79 | + * local_trylock_irqsave - Try to acquire a per CPU local lock | 98 | - * @lock: The lock variable |
80 | * @lock: The lock variable | 99 | - * @flags: Storage for interrupt flags |
81 | * @flags: Storage for interrupt flags | 100 | + * local_lock_init - Runtime initialize a lock instance |
82 | - */ | 101 | */ |
83 | -#define localtry_lock_irqsave(lock, flags) \ | 102 | -#define localtry_lock_irqsave(lock, flags) \ |
84 | - __localtry_lock_irqsave(lock, flags) | 103 | - __localtry_lock_irqsave(lock, flags) |
85 | - | 104 | +#define local_trylock_init(lock) __local_trylock_init(lock) |
86 | -/** | 105 | |
106 | /** | ||
87 | - * localtry_trylock - Try to acquire a per CPU local lock. | 107 | - * localtry_trylock - Try to acquire a per CPU local lock. |
88 | - * @lock: The lock variable | 108 | + * local_trylock - Try to acquire a per CPU local lock |
109 | * @lock: The lock variable | ||
89 | * | 110 | * |
90 | * The function can be used in any context such as NMI or HARDIRQ. Due to | 111 | * The function can be used in any context such as NMI or HARDIRQ. Due to |
91 | * locking constrains it will _always_ fail to acquire the lock in NMI or | 112 | * locking constrains it will _always_ fail to acquire the lock in NMI or |
92 | * HARDIRQ context on PREEMPT_RT. | 113 | * HARDIRQ context on PREEMPT_RT. |
93 | */ | 114 | */ |
94 | -#define localtry_trylock(lock) __localtry_trylock(lock) | 115 | -#define localtry_trylock(lock) __localtry_trylock(lock) |
95 | +#define local_trylock(lock, flags) __local_trylock(lock, flags) | 116 | +#define local_trylock(lock) __local_trylock(lock) |
96 | 117 | ||
97 | /** | 118 | /** |
98 | - * localtry_trylock_irqsave - Try to acquire a per CPU local lock, save and disable | 119 | - * localtry_trylock_irqsave - Try to acquire a per CPU local lock, save and disable |
99 | - * interrupts if acquired | 120 | - * interrupts if acquired |
100 | + * local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable | 121 | + * local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable |
... | ... | ||
142 | #endif | 163 | #endif |
143 | } local_lock_t; | 164 | } local_lock_t; |
144 | 165 | ||
145 | +/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */ | 166 | +/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */ |
146 | typedef struct { | 167 | typedef struct { |
147 | - local_lock_t llock; | 168 | local_lock_t llock; |
148 | - unsigned int acquired; | 169 | - unsigned int acquired; |
149 | -} localtry_lock_t; | 170 | -} localtry_lock_t; |
150 | +#ifdef CONFIG_DEBUG_LOCK_ALLOC | 171 | + u8 acquired; |
151 | + struct lockdep_map dep_map; | ||
152 | + struct task_struct *owner; | ||
153 | +#endif | ||
154 | + /* | ||
155 | + * Same layout as local_lock_t with 'acquired' field at the end. | ||
156 | + * (local_trylock_t *) will be casted to (local_lock_t *). | ||
157 | + */ | ||
158 | + int acquired; | ||
159 | +} local_trylock_t; | 172 | +} local_trylock_t; |
160 | 173 | ||
161 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 174 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
162 | # define LOCAL_LOCK_DEBUG_INIT(lockname) \ | 175 | # define LOCAL_LOCK_DEBUG_INIT(lockname) \ |
176 | @@ -XXX,XX +XXX,XX @@ typedef struct { | ||
177 | }, \ | ||
178 | .owner = NULL, | ||
179 | |||
180 | +# define LOCAL_TRYLOCK_DEBUG_INIT(lockname) \ | ||
181 | + .llock = { LOCAL_LOCK_DEBUG_INIT((lockname).llock) }, | ||
182 | + | ||
183 | static inline void local_lock_acquire(local_lock_t *l) | ||
184 | { | ||
185 | lock_map_acquire(&l->dep_map); | ||
186 | @@ -XXX,XX +XXX,XX @@ static inline void local_lock_debug_init(local_lock_t *l) | ||
187 | } | ||
188 | #else /* CONFIG_DEBUG_LOCK_ALLOC */ | ||
189 | # define LOCAL_LOCK_DEBUG_INIT(lockname) | ||
190 | +# define LOCAL_TRYLOCK_DEBUG_INIT(lockname) | ||
191 | static inline void local_lock_acquire(local_lock_t *l) { } | ||
192 | static inline void local_trylock_acquire(local_lock_t *l) { } | ||
193 | static inline void local_lock_release(local_lock_t *l) { } | ||
163 | @@ -XXX,XX +XXX,XX @@ static inline void local_lock_debug_init(local_lock_t *l) { } | 194 | @@ -XXX,XX +XXX,XX @@ static inline void local_lock_debug_init(local_lock_t *l) { } |
164 | #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ | 195 | #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ |
165 | 196 | ||
166 | #define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) } | 197 | #define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) } |
167 | -#define INIT_LOCALTRY_LOCK(lockname) { .llock = { LOCAL_LOCK_DEBUG_INIT(lockname.llock) }} | 198 | -#define INIT_LOCALTRY_LOCK(lockname) { .llock = { LOCAL_LOCK_DEBUG_INIT(lockname.llock) }} |
199 | +#define INIT_LOCAL_TRYLOCK(lockname) { LOCAL_TRYLOCK_DEBUG_INIT(lockname) } | ||
168 | 200 | ||
169 | #define __local_lock_init(lock) \ | 201 | #define __local_lock_init(lock) \ |
170 | do { \ | 202 | do { \ |
171 | @@ -XXX,XX +XXX,XX @@ do { \ | 203 | @@ -XXX,XX +XXX,XX @@ do { \ |
172 | local_lock_debug_init(lock); \ | 204 | local_lock_debug_init(lock); \ |
173 | } while (0) | 205 | } while (0) |
206 | |||
207 | +#define __local_trylock_init(lock) __local_lock_init(lock.llock) | ||
208 | + | ||
209 | #define __spinlock_nested_bh_init(lock) \ | ||
210 | do { \ | ||
211 | static struct lock_class_key __key; \ | ||
212 | @@ -XXX,XX +XXX,XX @@ do { \ | ||
213 | local_lock_debug_init(lock); \ | ||
214 | } while (0) | ||
174 | 215 | ||
175 | +#define __local_lock_acquire(lock) \ | 216 | +#define __local_lock_acquire(lock) \ |
176 | + do { \ | 217 | + do { \ |
177 | + local_trylock_t *tl; \ | 218 | + local_trylock_t *tl; \ |
178 | + local_lock_t *l; \ | 219 | + local_lock_t *l; \ |
... | ... | ||
204 | 245 | ||
205 | #define __local_lock_irqsave(lock, flags) \ | 246 | #define __local_lock_irqsave(lock, flags) \ |
206 | do { \ | 247 | do { \ |
207 | local_irq_save(flags); \ | 248 | local_irq_save(flags); \ |
208 | - local_lock_acquire(this_cpu_ptr(lock)); \ | 249 | - local_lock_acquire(this_cpu_ptr(lock)); \ |
250 | - } while (0) | ||
251 | - | ||
252 | -#define __local_unlock(lock) \ | ||
253 | - do { \ | ||
254 | - local_lock_release(this_cpu_ptr(lock)); \ | ||
255 | - preempt_enable(); \ | ||
209 | + __local_lock_acquire(lock); \ | 256 | + __local_lock_acquire(lock); \ |
210 | + } while (0) | 257 | } while (0) |
211 | + | 258 | |
212 | +#define __local_trylock_irqsave(lock, flags) \ | 259 | -#define __local_unlock_irq(lock) \ |
213 | + ({ \ | 260 | - do { \ |
261 | - local_lock_release(this_cpu_ptr(lock)); \ | ||
262 | - local_irq_enable(); \ | ||
263 | - } while (0) | ||
264 | - | ||
265 | -#define __local_unlock_irqrestore(lock, flags) \ | ||
266 | - do { \ | ||
267 | - local_lock_release(this_cpu_ptr(lock)); \ | ||
268 | - local_irq_restore(flags); \ | ||
269 | - } while (0) | ||
270 | - | ||
271 | -#define __local_lock_nested_bh(lock) \ | ||
272 | - do { \ | ||
273 | - lockdep_assert_in_softirq(); \ | ||
274 | - local_lock_acquire(this_cpu_ptr(lock)); \ | ||
275 | - } while (0) | ||
276 | - | ||
277 | -#define __local_unlock_nested_bh(lock) \ | ||
278 | - local_lock_release(this_cpu_ptr(lock)) | ||
279 | - | ||
280 | -/* localtry_lock_t variants */ | ||
281 | - | ||
282 | -#define __localtry_lock_init(lock) \ | ||
283 | -do { \ | ||
284 | - __local_lock_init(&(lock)->llock); \ | ||
285 | - WRITE_ONCE((lock)->acquired, 0); \ | ||
286 | -} while (0) | ||
287 | - | ||
288 | -#define __localtry_lock(lock) \ | ||
289 | - do { \ | ||
290 | - localtry_lock_t *lt; \ | ||
291 | - preempt_disable(); \ | ||
292 | - lt = this_cpu_ptr(lock); \ | ||
293 | - local_lock_acquire(<->llock); \ | ||
294 | - WRITE_ONCE(lt->acquired, 1); \ | ||
295 | - } while (0) | ||
296 | - | ||
297 | -#define __localtry_lock_irq(lock) \ | ||
298 | - do { \ | ||
299 | - localtry_lock_t *lt; \ | ||
300 | - local_irq_disable(); \ | ||
301 | - lt = this_cpu_ptr(lock); \ | ||
302 | - local_lock_acquire(<->llock); \ | ||
303 | - WRITE_ONCE(lt->acquired, 1); \ | ||
304 | - } while (0) | ||
305 | - | ||
306 | -#define __localtry_lock_irqsave(lock, flags) \ | ||
307 | - do { \ | ||
308 | - localtry_lock_t *lt; \ | ||
309 | - local_irq_save(flags); \ | ||
310 | - lt = this_cpu_ptr(lock); \ | ||
311 | - local_lock_acquire(<->llock); \ | ||
312 | - WRITE_ONCE(lt->acquired, 1); \ | ||
313 | - } while (0) | ||
314 | - | ||
315 | -#define __localtry_trylock(lock) \ | ||
316 | +#define __local_trylock(lock) \ | ||
317 | ({ \ | ||
318 | - localtry_lock_t *lt; \ | ||
319 | - bool _ret; \ | ||
214 | + local_trylock_t *tl; \ | 320 | + local_trylock_t *tl; \ |
215 | + \ | 321 | \ |
216 | + local_irq_save(flags); \ | 322 | preempt_disable(); \ |
323 | - lt = this_cpu_ptr(lock); \ | ||
324 | - if (!READ_ONCE(lt->acquired)) { \ | ||
325 | - WRITE_ONCE(lt->acquired, 1); \ | ||
326 | - local_trylock_acquire(<->llock); \ | ||
327 | - _ret = true; \ | ||
328 | - } else { \ | ||
329 | - _ret = false; \ | ||
217 | + tl = this_cpu_ptr(lock); \ | 330 | + tl = this_cpu_ptr(lock); \ |
218 | + if (READ_ONCE(tl->acquired) == 1) { \ | 331 | + if (READ_ONCE(tl->acquired)) { \ |
219 | + local_irq_restore(flags); \ | 332 | preempt_enable(); \ |
220 | + tl = NULL; \ | 333 | + tl = NULL; \ |
221 | + } else { \ | 334 | + } else { \ |
222 | + WRITE_ONCE(tl->acquired, 1); \ | 335 | + WRITE_ONCE(tl->acquired, 1); \ |
223 | + local_trylock_acquire( \ | 336 | + local_trylock_acquire( \ |
224 | + (local_lock_t *)tl); \ | 337 | + (local_lock_t *)tl); \ |
225 | + } \ | 338 | } \ |
339 | - _ret; \ | ||
226 | + !!tl; \ | 340 | + !!tl; \ |
227 | + }) | 341 | }) |
228 | + | 342 | |
343 | -#define __localtry_trylock_irqsave(lock, flags) \ | ||
344 | +#define __local_trylock_irqsave(lock, flags) \ | ||
345 | ({ \ | ||
346 | - localtry_lock_t *lt; \ | ||
347 | - bool _ret; \ | ||
348 | + local_trylock_t *tl; \ | ||
349 | \ | ||
350 | local_irq_save(flags); \ | ||
351 | - lt = this_cpu_ptr(lock); \ | ||
352 | - if (!READ_ONCE(lt->acquired)) { \ | ||
353 | - WRITE_ONCE(lt->acquired, 1); \ | ||
354 | - local_trylock_acquire(<->llock); \ | ||
355 | - _ret = true; \ | ||
356 | - } else { \ | ||
357 | - _ret = false; \ | ||
358 | + tl = this_cpu_ptr(lock); \ | ||
359 | + if (READ_ONCE(tl->acquired)) { \ | ||
360 | local_irq_restore(flags); \ | ||
361 | + tl = NULL; \ | ||
362 | + } else { \ | ||
363 | + WRITE_ONCE(tl->acquired, 1); \ | ||
364 | + local_trylock_acquire( \ | ||
365 | + (local_lock_t *)tl); \ | ||
366 | } \ | ||
367 | - _ret; \ | ||
368 | + !!tl; \ | ||
369 | }) | ||
370 | |||
371 | -#define __localtry_unlock(lock) \ | ||
229 | +#define __local_lock_release(lock) \ | 372 | +#define __local_lock_release(lock) \ |
230 | + do { \ | 373 | + do { \ |
231 | + local_trylock_t *tl; \ | 374 | + local_trylock_t *tl; \ |
232 | + local_lock_t *l; \ | 375 | + local_lock_t *l; \ |
233 | + \ | 376 | + \ |
234 | + l = (local_lock_t *)this_cpu_ptr(lock); \ | 377 | + l = (local_lock_t *)this_cpu_ptr(lock); \ |
235 | + tl = (local_trylock_t *)l; \ | 378 | + tl = (local_trylock_t *)l; \ |
379 | + local_lock_release(l); \ | ||
236 | + _Generic((lock), \ | 380 | + _Generic((lock), \ |
237 | + local_trylock_t *: ({ \ | 381 | + local_trylock_t *: ({ \ |
238 | + lockdep_assert(tl->acquired == 1); \ | 382 | + lockdep_assert(tl->acquired == 1); \ |
239 | + WRITE_ONCE(tl->acquired, 0); \ | 383 | + WRITE_ONCE(tl->acquired, 0); \ |
240 | + }), \ | 384 | + }), \ |
241 | + default:(void)0); \ | 385 | + default:(void)0); \ |
242 | + local_lock_release(l); \ | 386 | + } while (0) |
243 | } while (0) | 387 | + |
244 | 388 | +#define __local_unlock(lock) \ | |
245 | #define __local_unlock(lock) \ | 389 | do { \ |
246 | do { \ | 390 | - localtry_lock_t *lt; \ |
247 | - local_lock_release(this_cpu_ptr(lock)); \ | 391 | - lt = this_cpu_ptr(lock); \ |
392 | - WRITE_ONCE(lt->acquired, 0); \ | ||
393 | - local_lock_release(<->llock); \ | ||
248 | + __local_lock_release(lock); \ | 394 | + __local_lock_release(lock); \ |
249 | preempt_enable(); \ | 395 | preempt_enable(); \ |
250 | } while (0) | 396 | } while (0) |
251 | 397 | ||
252 | #define __local_unlock_irq(lock) \ | 398 | -#define __localtry_unlock_irq(lock) \ |
253 | do { \ | 399 | +#define __local_unlock_irq(lock) \ |
254 | - local_lock_release(this_cpu_ptr(lock)); \ | 400 | do { \ |
401 | - localtry_lock_t *lt; \ | ||
402 | - lt = this_cpu_ptr(lock); \ | ||
403 | - WRITE_ONCE(lt->acquired, 0); \ | ||
404 | - local_lock_release(<->llock); \ | ||
255 | + __local_lock_release(lock); \ | 405 | + __local_lock_release(lock); \ |
256 | local_irq_enable(); \ | 406 | local_irq_enable(); \ |
257 | } while (0) | 407 | } while (0) |
258 | 408 | ||
259 | #define __local_unlock_irqrestore(lock, flags) \ | 409 | -#define __localtry_unlock_irqrestore(lock, flags) \ |
260 | do { \ | 410 | +#define __local_unlock_irqrestore(lock, flags) \ |
261 | - local_lock_release(this_cpu_ptr(lock)); \ | 411 | do { \ |
412 | - localtry_lock_t *lt; \ | ||
413 | - lt = this_cpu_ptr(lock); \ | ||
414 | - WRITE_ONCE(lt->acquired, 0); \ | ||
415 | - local_lock_release(<->llock); \ | ||
262 | + __local_lock_release(lock); \ | 416 | + __local_lock_release(lock); \ |
263 | local_irq_restore(flags); \ | 417 | local_irq_restore(flags); \ |
264 | } while (0) | 418 | } while (0) |
265 | 419 | ||
266 | @@ -XXX,XX +XXX,XX @@ do { \ | 420 | +#define __local_lock_nested_bh(lock) \ |
267 | #define __local_unlock_nested_bh(lock) \ | 421 | + do { \ |
268 | local_lock_release(this_cpu_ptr(lock)) | 422 | + lockdep_assert_in_softirq(); \ |
269 | 423 | + local_lock_acquire(this_cpu_ptr(lock)); \ | |
270 | -/* localtry_lock_t variants */ | 424 | + } while (0) |
271 | - | 425 | + |
272 | -#define __localtry_lock_init(lock) \ | 426 | +#define __local_unlock_nested_bh(lock) \ |
273 | -do { \ | 427 | + local_lock_release(this_cpu_ptr(lock)) |
274 | - __local_lock_init(&(lock)->llock); \ | 428 | + |
275 | - WRITE_ONCE((lock)->acquired, 0); \ | ||
276 | -} while (0) | ||
277 | - | ||
278 | -#define __localtry_lock(lock) \ | ||
279 | - do { \ | ||
280 | - localtry_lock_t *lt; \ | ||
281 | - preempt_disable(); \ | ||
282 | - lt = this_cpu_ptr(lock); \ | ||
283 | - local_lock_acquire(<->llock); \ | ||
284 | - WRITE_ONCE(lt->acquired, 1); \ | ||
285 | - } while (0) | ||
286 | - | ||
287 | -#define __localtry_lock_irq(lock) \ | ||
288 | - do { \ | ||
289 | - localtry_lock_t *lt; \ | ||
290 | - local_irq_disable(); \ | ||
291 | - lt = this_cpu_ptr(lock); \ | ||
292 | - local_lock_acquire(<->llock); \ | ||
293 | - WRITE_ONCE(lt->acquired, 1); \ | ||
294 | - } while (0) | ||
295 | - | ||
296 | -#define __localtry_lock_irqsave(lock, flags) \ | ||
297 | - do { \ | ||
298 | - localtry_lock_t *lt; \ | ||
299 | - local_irq_save(flags); \ | ||
300 | - lt = this_cpu_ptr(lock); \ | ||
301 | - local_lock_acquire(<->llock); \ | ||
302 | - WRITE_ONCE(lt->acquired, 1); \ | ||
303 | - } while (0) | ||
304 | - | ||
305 | -#define __localtry_trylock(lock) \ | ||
306 | - ({ \ | ||
307 | - localtry_lock_t *lt; \ | ||
308 | - bool _ret; \ | ||
309 | - \ | ||
310 | - preempt_disable(); \ | ||
311 | - lt = this_cpu_ptr(lock); \ | ||
312 | - if (!READ_ONCE(lt->acquired)) { \ | ||
313 | - WRITE_ONCE(lt->acquired, 1); \ | ||
314 | - local_trylock_acquire(<->llock); \ | ||
315 | - _ret = true; \ | ||
316 | - } else { \ | ||
317 | - _ret = false; \ | ||
318 | - preempt_enable(); \ | ||
319 | - } \ | ||
320 | - _ret; \ | ||
321 | - }) | ||
322 | - | ||
323 | -#define __localtry_trylock_irqsave(lock, flags) \ | ||
324 | - ({ \ | ||
325 | - localtry_lock_t *lt; \ | ||
326 | - bool _ret; \ | ||
327 | - \ | ||
328 | - local_irq_save(flags); \ | ||
329 | - lt = this_cpu_ptr(lock); \ | ||
330 | - if (!READ_ONCE(lt->acquired)) { \ | ||
331 | - WRITE_ONCE(lt->acquired, 1); \ | ||
332 | - local_trylock_acquire(<->llock); \ | ||
333 | - _ret = true; \ | ||
334 | - } else { \ | ||
335 | - _ret = false; \ | ||
336 | - local_irq_restore(flags); \ | ||
337 | - } \ | ||
338 | - _ret; \ | ||
339 | - }) | ||
340 | - | ||
341 | -#define __localtry_unlock(lock) \ | ||
342 | - do { \ | ||
343 | - localtry_lock_t *lt; \ | ||
344 | - lt = this_cpu_ptr(lock); \ | ||
345 | - WRITE_ONCE(lt->acquired, 0); \ | ||
346 | - local_lock_release(<->llock); \ | ||
347 | - preempt_enable(); \ | ||
348 | - } while (0) | ||
349 | - | ||
350 | -#define __localtry_unlock_irq(lock) \ | ||
351 | - do { \ | ||
352 | - localtry_lock_t *lt; \ | ||
353 | - lt = this_cpu_ptr(lock); \ | ||
354 | - WRITE_ONCE(lt->acquired, 0); \ | ||
355 | - local_lock_release(<->llock); \ | ||
356 | - local_irq_enable(); \ | ||
357 | - } while (0) | ||
358 | - | ||
359 | -#define __localtry_unlock_irqrestore(lock, flags) \ | ||
360 | - do { \ | ||
361 | - localtry_lock_t *lt; \ | ||
362 | - lt = this_cpu_ptr(lock); \ | ||
363 | - WRITE_ONCE(lt->acquired, 0); \ | ||
364 | - local_lock_release(<->llock); \ | ||
365 | - local_irq_restore(flags); \ | ||
366 | - } while (0) | ||
367 | - | ||
368 | #else /* !CONFIG_PREEMPT_RT */ | 429 | #else /* !CONFIG_PREEMPT_RT */ |
369 | 430 | ||
370 | /* | 431 | /* |
371 | @@ -XXX,XX +XXX,XX @@ do { \ | 432 | @@ -XXX,XX +XXX,XX @@ do { \ |
372 | * critical section while staying preemptible. | 433 | * critical section while staying preemptible. |
... | ... | ||
375 | -typedef spinlock_t localtry_lock_t; | 436 | -typedef spinlock_t localtry_lock_t; |
376 | +typedef spinlock_t local_trylock_t; | 437 | +typedef spinlock_t local_trylock_t; |
377 | 438 | ||
378 | #define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname)) | 439 | #define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname)) |
379 | -#define INIT_LOCALTRY_LOCK(lockname) INIT_LOCAL_LOCK(lockname) | 440 | -#define INIT_LOCALTRY_LOCK(lockname) INIT_LOCAL_LOCK(lockname) |
441 | +#define INIT_LOCAL_TRYLOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname)) | ||
380 | 442 | ||
381 | #define __local_lock_init(l) \ | 443 | #define __local_lock_init(l) \ |
382 | do { \ | 444 | do { \ |
445 | local_spin_lock_init((l)); \ | ||
446 | } while (0) | ||
447 | |||
448 | +#define __local_trylock_init(l) __local_lock_init(l) | ||
449 | + | ||
450 | #define __local_lock(__lock) \ | ||
451 | do { \ | ||
452 | migrate_disable(); \ | ||
383 | @@ -XXX,XX +XXX,XX @@ do { \ | 453 | @@ -XXX,XX +XXX,XX @@ do { \ |
384 | spin_unlock(this_cpu_ptr((lock))); \ | 454 | spin_unlock(this_cpu_ptr((lock))); \ |
385 | } while (0) | 455 | } while (0) |
386 | 456 | ||
387 | -/* localtry_lock_t variants */ | 457 | -/* localtry_lock_t variants */ |
... | ... | ||
429 | @@ -XXX,XX +XXX,XX @@ struct memcg_stock_pcp { | 499 | @@ -XXX,XX +XXX,XX @@ struct memcg_stock_pcp { |
430 | #define FLUSHING_CACHED_CHARGE 0 | 500 | #define FLUSHING_CACHED_CHARGE 0 |
431 | }; | 501 | }; |
432 | static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = { | 502 | static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = { |
433 | - .stock_lock = INIT_LOCALTRY_LOCK(stock_lock), | 503 | - .stock_lock = INIT_LOCALTRY_LOCK(stock_lock), |
434 | + .stock_lock = INIT_LOCAL_LOCK(stock_lock), | 504 | + .stock_lock = INIT_LOCAL_TRYLOCK(stock_lock), |
435 | }; | 505 | }; |
436 | static DEFINE_MUTEX(percpu_charge_mutex); | 506 | static DEFINE_MUTEX(percpu_charge_mutex); |
437 | 507 | ||
438 | @@ -XXX,XX +XXX,XX @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages, | 508 | @@ -XXX,XX +XXX,XX @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages, |
439 | if (nr_pages > MEMCG_CHARGE_BATCH) | 509 | if (nr_pages > MEMCG_CHARGE_BATCH) |
... | ... | diff view generated by jsdifflib |