--- zzzz-none-000/linux-4.9.279/kernel/locking/mutex-debug.c 2021-08-08 06:38:54.000000000 +0000 +++ puma7-atom-6591-750/linux-4.9.279/kernel/locking/mutex-debug.c 2023-02-08 11:43:43.000000000 +0000 @@ -31,6 +31,7 @@ { memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); waiter->magic = waiter; + waiter->lock = lock; INIT_LIST_HEAD(&waiter->list); } @@ -54,7 +55,9 @@ SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); /* Mark the current thread as blocked on the lock: */ + spin_lock(&task->blocked_on_ptrlock); task->blocked_on = waiter; + spin_unlock(&task->blocked_on_ptrlock); } void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, @@ -63,7 +66,10 @@ DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); DEBUG_LOCKS_WARN_ON(waiter->task != task); DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter); + + spin_lock(&task->blocked_on_ptrlock); task->blocked_on = NULL; + spin_unlock(&task->blocked_on_ptrlock); list_del_init(&waiter->list); waiter->task = NULL; @@ -101,6 +107,10 @@ lockdep_init_map(&lock->dep_map, name, key, 0); #endif lock->magic = lock; + + spin_lock_init(&lock->owner_lock); + lock->owner_info.pid = NULL; + lock->owner_info.ip = 0; } /*** @@ -118,3 +128,97 @@ } EXPORT_SYMBOL_GPL(mutex_destroy); + +int debug_mutex_task_blocked_by(struct task_struct *task, + struct callerinfo *owner_info) +{ + struct mutex_waiter *waiter; + struct callerinfo *oi; + unsigned long flags; + struct mutex *lock; + int ret; + + /* + * The locking procedure here is a bit delicate, since the waiter + * lives on the stack of the waiting task, and basically the general + * assumption is that only the waiting task may mess with it. + * + * But here, we're accessing the waiter out-of-context by following + * the task's .blocking_on pointer. Dereferencing it thus requires + * protection by holding an extra pointer lock. + * + * Since we're out-of-context, we also have to store a lock pointer in + * the waiter data, and make sure that the waiting task doesn't change + * it while we hold the lock. + */ + + ret = spin_trylock_irqsave(&task->blocked_on_ptrlock, flags); + if (!ret) + /* + * Blocking or unblocking in progress; + * consider this "not blocked", at least not + * in a critical way. + */ + goto notblocking; + + waiter = task->blocked_on; + if (!waiter) { + spin_unlock_irqrestore(&task->blocked_on_ptrlock, flags); + goto notblocking; + } + + lock = waiter->lock; + oi = &lock->owner_info; + + ret = spin_trylock(&lock->owner_lock); + if (!ret) { + spin_unlock_irqrestore(&task->blocked_on_ptrlock, flags); + return -EBUSY; + } + + owner_info->pid = get_pid(oi->pid); + owner_info->ip = oi->ip; + + spin_unlock(&lock->owner_lock); + spin_unlock_irqrestore(&task->blocked_on_ptrlock, flags); + + return 0; + +notblocking: + owner_info->pid = NULL; + owner_info->ip = 0; + return 0; +} + +void show_blocking_mutex_holder(struct task_struct *task) +{ + struct callerinfo mutex_owner; + struct task_struct *ownertask; + int ret; + + if (!task) + return; + + ret = debug_mutex_task_blocked_by(task, &mutex_owner); + if (ret == 0 && mutex_owner.pid) { + printk("Task %s[%u] is blocked on a mutex\n", + task->comm, + task_pid_nr(task)); + printk("Blocking mutex is held by: PID %d, taken at %pS\n", + pid_nr(mutex_owner.pid), + (void *)mutex_owner.ip); + + ownertask = get_pid_task(mutex_owner.pid, PIDTYPE_PID); + if (ownertask) { + printk("Stack dump of holding task %s[%u]:\n", + ownertask->comm, + task_pid_nr(ownertask)); + show_stack(ownertask, NULL); + put_task_struct(ownertask); + } else { + printk("Task holding mutex not found\n"); + } + + put_pid(mutex_owner.pid); + } +}