mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2026-04-29 12:28:27 +02:00
seqlock/latch: Provide raw_read_seqcount_latch_retry()
[ Upstream commitd16317de9b] The read side of seqcount_latch consists of: do { seq = raw_read_seqcount_latch(&latch->seq); ... } while (read_seqcount_latch_retry(&latch->seq, seq)); which is asymmetric in the raw_ department, and sure enough, read_seqcount_latch_retry() includes (explicit) instrumentation where raw_read_seqcount_latch() does not. This inconsistency becomes a problem when trying to use it from noinstr code. As such, fix it by renaming and re-implementing raw_read_seqcount_latch_retry() without the instrumentation. Specifically the instrumentation in question is kcsan_atomic_next(0) in do___read_seqcount_retry(). Loosing this annotation is not a problem because raw_read_seqcount_latch() does not pass through kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Petr Mladek <pmladek@suse.com> Tested-by: Michael Kelley <mikelley@microsoft.com> # Hyper-V Link: https://lore.kernel.org/r/20230519102715.233598176@infradead.org Stable-dep-of:5c1806c41c("kcsan, seqlock: Support seqcount_latch_t") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
6c1edccbc9
commit
2f7e3eac5f
@@ -206,7 +206,7 @@ latch_tree_find(void *key, struct latch_tree_root *root,
|
||||
do {
|
||||
seq = raw_read_seqcount_latch(&root->seq);
|
||||
node = __lt_find(key, root, seq & 1, ops->comp);
|
||||
} while (read_seqcount_latch_retry(&root->seq, seq));
|
||||
} while (raw_read_seqcount_latch_retry(&root->seq, seq));
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
@@ -675,9 +675,9 @@ typedef struct {
|
||||
*
|
||||
* Return: sequence counter raw value. Use the lowest bit as an index for
|
||||
* picking which data copy to read. The full counter must then be checked
|
||||
* with read_seqcount_latch_retry().
|
||||
* with raw_read_seqcount_latch_retry().
|
||||
*/
|
||||
static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
|
||||
static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
|
||||
{
|
||||
/*
|
||||
* Pairs with the first smp_wmb() in raw_write_seqcount_latch().
|
||||
@@ -687,16 +687,17 @@ static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
|
||||
}
|
||||
|
||||
/**
|
||||
* read_seqcount_latch_retry() - end a seqcount_latch_t read section
|
||||
* raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
|
||||
* @s: Pointer to seqcount_latch_t
|
||||
* @start: count, from raw_read_seqcount_latch()
|
||||
*
|
||||
* Return: true if a read section retry is required, else false
|
||||
*/
|
||||
static inline int
|
||||
read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
|
||||
static __always_inline int
|
||||
raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
|
||||
{
|
||||
return read_seqcount_retry(&s->seqcount, start);
|
||||
smp_rmb();
|
||||
return unlikely(READ_ONCE(s->seqcount.sequence) != start);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -756,7 +757,7 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
|
||||
* entry = data_query(latch->data[idx], ...);
|
||||
*
|
||||
* // This includes needed smp_rmb()
|
||||
* } while (read_seqcount_latch_retry(&latch->seq, seq));
|
||||
* } while (raw_read_seqcount_latch_retry(&latch->seq, seq));
|
||||
*
|
||||
* return entry;
|
||||
* }
|
||||
|
||||
@@ -457,7 +457,7 @@ static u64 latched_seq_read_nolock(struct latched_seq *ls)
|
||||
seq = raw_read_seqcount_latch(&ls->latch);
|
||||
idx = seq & 0x1;
|
||||
val = ls->val[idx];
|
||||
} while (read_seqcount_latch_retry(&ls->latch, seq));
|
||||
} while (raw_read_seqcount_latch_retry(&ls->latch, seq));
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
@@ -76,7 +76,7 @@ notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
|
||||
|
||||
notrace int sched_clock_read_retry(unsigned int seq)
|
||||
{
|
||||
return read_seqcount_latch_retry(&cd.seq, seq);
|
||||
return raw_read_seqcount_latch_retry(&cd.seq, seq);
|
||||
}
|
||||
|
||||
unsigned long long notrace sched_clock(void)
|
||||
|
||||
@@ -450,7 +450,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
|
||||
tkr = tkf->base + (seq & 0x01);
|
||||
now = ktime_to_ns(tkr->base);
|
||||
now += fast_tk_get_delta_ns(tkr);
|
||||
} while (read_seqcount_latch_retry(&tkf->seq, seq));
|
||||
} while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
|
||||
|
||||
return now;
|
||||
}
|
||||
@@ -549,7 +549,7 @@ static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
|
||||
basem = ktime_to_ns(tkr->base);
|
||||
baser = ktime_to_ns(tkr->base_real);
|
||||
delta = fast_tk_get_delta_ns(tkr);
|
||||
} while (read_seqcount_latch_retry(&tkf->seq, seq));
|
||||
} while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
|
||||
|
||||
if (mono)
|
||||
*mono = basem + delta;
|
||||
|
||||
Reference in New Issue
Block a user