[PATCH] sched_clock: Disable seqlock lockdep usage in sched_clock
John Stultz
john.stultz at linaro.org
Thu Jan 2 16:54:46 EST 2014
Unforunately the seqlock lockdep enablmenet can't be used
in sched_clock, since the lockdep infrastructure eventually
calls into sched_clock, which causes a deadlock.
Thus, this patch adds _no_lockdep() seqlock methods for the
writer side, and changes all generic sched_clock usage to use
the _no_lockdep methods.
This solves the issue I was able to reproduce, but it would
be good to get Krzysztof to confirm it solves his problem.
Cc: Krzysztof Hałasa <khalasa at piap.pl>
Cc: Uwe Kleine-König <u.kleine-koenig at pengutronix.de>
Cc: Willy Tarreau <w at 1wt.eu>
Cc: Ingo Molnar <mingo at kernel.org>,
Cc: Peter Zijlstra <peterz at infradead.org>
Cc: Stephen Boyd <sboyd at codeaurora.org>
Cc: Linus Torvalds <torvalds at linux-foundation.org>
Cc: linux-arm-kernel at lists.infradead.org
Reported-by: Krzysztof Hałasa <khalasa at piap.pl>
Signed-off-by: John Stultz <john.stultz at linaro.org>
---
include/linux/seqlock.h | 19 +++++++++++++++----
kernel/time/sched_clock.c | 6 +++---
2 files changed, 18 insertions(+), 7 deletions(-)
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index cf87a24..7664f68 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -206,14 +206,26 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
}
+
+static inline void write_seqcount_begin_no_lockdep(seqcount_t *s)
+{
+ s->sequence++;
+ smp_wmb();
+}
+
+static inline void write_seqcount_end_no_lockdep(seqcount_t *s)
+{
+ smp_wmb();
+ s->sequence++;
+}
+
/*
* Sequence counter only version assumes that callers are using their
* own mutexing.
*/
static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
- s->sequence++;
- smp_wmb();
+ write_seqcount_begin_no_lockdep(s);
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
}
@@ -225,8 +237,7 @@ static inline void write_seqcount_begin(seqcount_t *s)
static inline void write_seqcount_end(seqcount_t *s)
{
seqcount_release(&s->dep_map, 1, _RET_IP_);
- smp_wmb();
- s->sequence++;
+ write_seqcount_end_no_lockdep(s);
}
/**
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 68b7993..13561a0 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -74,7 +74,7 @@ unsigned long long notrace sched_clock(void)
return cd.epoch_ns;
do {
- seq = read_seqcount_begin(&cd.seq);
+ seq = read_seqcount_begin_no_lockdep(&cd.seq);
epoch_cyc = cd.epoch_cyc;
epoch_ns = cd.epoch_ns;
} while (read_seqcount_retry(&cd.seq, seq));
@@ -99,10 +99,10 @@ static void notrace update_sched_clock(void)
cd.mult, cd.shift);
raw_local_irq_save(flags);
- write_seqcount_begin(&cd.seq);
+ write_seqcount_begin_no_lockdep(&cd.seq);
cd.epoch_ns = ns;
cd.epoch_cyc = cyc;
- write_seqcount_end(&cd.seq);
+ write_seqcount_end_no_lockdep(&cd.seq);
raw_local_irq_restore(flags);
}
--
1.8.3.2
More information about the linux-arm-kernel
mailing list