On Tue, Apr 07, 2020 at 07:04:10PM +0100, Chris Wilson wrote:
Quoting Linus Torvalds (2020-04-07 18:28:34)
On Tue, Apr 7, 2020 at 9:04 AM Chris Wilson chris@chris-wilson.co.uk wrote:
[ . . . ]
There's some more shutting up required for KCSAN to bring the noise down to usable levels which I hope has been done so I don't have to argue for it, such as
diff --git a/include/linux/timer.h b/include/linux/timer.h index 1e6650ed066d..c7c8dd89f279 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -164,7 +164,7 @@ static inline void destroy_timer_on_stack(struct timer_list *timer) { } */ static inline int timer_pending(const struct timer_list * timer) {
- return timer->entry.pprev != NULL;
- return READ_ONCE(timer->entry.pprev) != NULL;
This one is in mainline, courtesy of Eric Dumazet, though in a different form.
The rest are still TBD.
Thanx, Paul
}
extern void add_timer_on(struct timer_list *timer, int cpu); diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 5352ce50a97e..7461b3f33629 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -565,8 +565,9 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, /* * Use vcpu_is_preempted to detect lock holder preemption issue. */
if (!owner->on_cpu || need_resched() ||
vcpu_is_preempted(task_cpu(owner))) {
if (!READ_ONCE(owner->on_cpu) ||
need_resched() ||
}vcpu_is_preempted(task_cpu(owner))) { ret = false; break;
@@ -602,7 +603,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock) * on cpu or its cpu is preempted */ if (owner)
retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
retval = READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
rcu_read_unlock();
/*
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c index 1f7734949ac8..4a81fba4cf70 100644 --- a/kernel/locking/osq_lock.c +++ b/kernel/locking/osq_lock.c @@ -75,7 +75,7 @@ osq_wait_next(struct optimistic_spin_queue *lock, * wait for either @lock to point to us, through its Step-B, or * wait for a new @node->next from its Step-C. */
if (node->next) {
if (READ_ONCE(node->next)) { next = xchg(&node->next, NULL); if (next) break;
@@ -154,7 +154,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) */
for (;;) {
if (prev->next == node &&
cmpxchg(&prev->next, node, NULL) == node) break;if (READ_ONCE(prev->next) == node &&
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 0d9b6be9ecc8..eef4835cecf2 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -650,7 +650,7 @@ static inline bool owner_on_cpu(struct task_struct *owner) * As lock holder preemption issue, we both skip spinning if * task is not on cpu or its cpu is preempted */
- return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
- return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
}
static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,