Skip to content

Commit b341afb

Browse files
Waiman Longingomolnar
authored andcommitted
locking/mutex: Enable optimistic spinning of woken waiter
This patch makes the waiter that sets the HANDOFF flag start spinning instead of sleeping until the handoff is complete or the owner sleeps. Otherwise, the handoff will cause the optimistic spinners to abort spinning as the handed-off owner may not be running. Tested-by: Jason Low <jason.low2@hpe.com> Signed-off-by: Waiman Long <Waiman.Long@hpe.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Ding Tianhong <dingtianhong@huawei.com> Cc: Imre Deak <imre.deak@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul E. McKenney <paulmck@us.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: Will Deacon <Will.Deacon@arm.com> Link: http://lkml.kernel.org/r/1472254509-27508-2-git-send-email-Waiman.Long@hpe.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent a40ca56 commit b341afb

File tree

1 file changed

+54
-23
lines changed

1 file changed

+54
-23
lines changed

kernel/locking/mutex.c

Lines changed: 54 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -416,24 +416,39 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
416416
*
417417
* Returns true when the lock was taken, otherwise false, indicating
418418
* that we need to jump to the slowpath and sleep.
419+
*
420+
* The waiter flag is set to true if the spinner is a waiter in the wait
421+
* queue. The waiter-spinner will spin on the lock directly and concurrently
422+
* with the spinner at the head of the OSQ, if present, until the owner is
423+
* changed to itself.
419424
*/
420425
static bool mutex_optimistic_spin(struct mutex *lock,
421-
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
426+
struct ww_acquire_ctx *ww_ctx,
427+
const bool use_ww_ctx, const bool waiter)
422428
{
423429
struct task_struct *task = current;
424430

425-
if (!mutex_can_spin_on_owner(lock))
426-
goto done;
431+
if (!waiter) {
432+
/*
433+
* The purpose of the mutex_can_spin_on_owner() function is
434+
* to eliminate the overhead of osq_lock() and osq_unlock()
435+
* in case spinning isn't possible. As a waiter-spinner
436+
* is not going to take OSQ lock anyway, there is no need
437+
* to call mutex_can_spin_on_owner().
438+
*/
439+
if (!mutex_can_spin_on_owner(lock))
440+
goto fail;
427441

428-
/*
429-
* In order to avoid a stampede of mutex spinners trying to
430-
* acquire the mutex all at once, the spinners need to take a
431-
* MCS (queued) lock first before spinning on the owner field.
432-
*/
433-
if (!osq_lock(&lock->osq))
434-
goto done;
442+
/*
443+
* In order to avoid a stampede of mutex spinners trying to
444+
* acquire the mutex all at once, the spinners need to take a
445+
* MCS (queued) lock first before spinning on the owner field.
446+
*/
447+
if (!osq_lock(&lock->osq))
448+
goto fail;
449+
}
435450

436-
while (true) {
451+
for (;;) {
437452
struct task_struct *owner;
438453

439454
if (use_ww_ctx && ww_ctx->acquired > 0) {
@@ -449,23 +464,28 @@ static bool mutex_optimistic_spin(struct mutex *lock,
449464
* performed the optimistic spinning cannot be done.
450465
*/
451466
if (READ_ONCE(ww->ctx))
452-
break;
467+
goto fail_unlock;
453468
}
454469

455470
/*
456471
* If there's an owner, wait for it to either
457472
* release the lock or go to sleep.
458473
*/
459474
owner = __mutex_owner(lock);
460-
if (owner && !mutex_spin_on_owner(lock, owner))
461-
break;
475+
if (owner) {
476+
if (waiter && owner == task) {
477+
smp_mb(); /* ACQUIRE */
478+
break;
479+
}
462480

463-
/* Try to acquire the mutex if it is unlocked. */
464-
if (__mutex_trylock(lock, false)) {
465-
osq_unlock(&lock->osq);
466-
return true;
481+
if (!mutex_spin_on_owner(lock, owner))
482+
goto fail_unlock;
467483
}
468484

485+
/* Try to acquire the mutex if it is unlocked. */
486+
if (__mutex_trylock(lock, waiter))
487+
break;
488+
469489
/*
470490
* The cpu_relax() call is a compiler barrier which forces
471491
* everything in this loop to be re-loaded. We don't need
@@ -475,8 +495,17 @@ static bool mutex_optimistic_spin(struct mutex *lock,
475495
cpu_relax_lowlatency();
476496
}
477497

478-
osq_unlock(&lock->osq);
479-
done:
498+
if (!waiter)
499+
osq_unlock(&lock->osq);
500+
501+
return true;
502+
503+
504+
fail_unlock:
505+
if (!waiter)
506+
osq_unlock(&lock->osq);
507+
508+
fail:
480509
/*
481510
* If we fell out of the spin path because of need_resched(),
482511
* reschedule now, before we try-lock the mutex. This avoids getting
@@ -495,7 +524,8 @@ static bool mutex_optimistic_spin(struct mutex *lock,
495524
}
496525
#else
497526
static bool mutex_optimistic_spin(struct mutex *lock,
498-
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
527+
struct ww_acquire_ctx *ww_ctx,
528+
const bool use_ww_ctx, const bool waiter)
499529
{
500530
return false;
501531
}
@@ -600,7 +630,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
600630
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
601631

602632
if (__mutex_trylock(lock, false) ||
603-
mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
633+
mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
604634
/* got the lock, yay! */
605635
lock_acquired(&lock->dep_map, ip);
606636
if (use_ww_ctx)
@@ -669,7 +699,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
669699
* state back to RUNNING and fall through the next schedule(),
670700
* or we must see its unlock and acquire.
671701
*/
672-
if (__mutex_trylock(lock, first))
702+
if ((first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)) ||
703+
__mutex_trylock(lock, first))
673704
break;
674705

675706
spin_lock_mutex(&lock->wait_lock, flags);

0 commit comments

Comments
 (0)