1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * jump label support
4  *
5  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6  * Copyright (C) 2011 Peter Zijlstra
7  *
8  */
9 #include <linux/memory.h>
10 #include <linux/uaccess.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/slab.h>
14 #include <linux/sort.h>
15 #include <linux/err.h>
16 #include <linux/static_key.h>
17 #include <linux/jump_label_ratelimit.h>
18 #include <linux/bug.h>
19 #include <linux/cpu.h>
20 #include <asm/sections.h>
21 
22 /* mutex to protect coming/going of the jump_label table */
23 static DEFINE_MUTEX(jump_label_mutex);
24 
jump_label_lock(void)25 void jump_label_lock(void)
26 {
27 	mutex_lock(&jump_label_mutex);
28 }
29 
jump_label_unlock(void)30 void jump_label_unlock(void)
31 {
32 	mutex_unlock(&jump_label_mutex);
33 }
34 
jump_label_cmp(const void * a,const void * b)35 static int jump_label_cmp(const void *a, const void *b)
36 {
37 	const struct jump_entry *jea = a;
38 	const struct jump_entry *jeb = b;
39 
40 	/*
41 	 * Entrires are sorted by key.
42 	 */
43 	if (jump_entry_key(jea) < jump_entry_key(jeb))
44 		return -1;
45 
46 	if (jump_entry_key(jea) > jump_entry_key(jeb))
47 		return 1;
48 
49 	/*
50 	 * In the batching mode, entries should also be sorted by the code
51 	 * inside the already sorted list of entries, enabling a bsearch in
52 	 * the vector.
53 	 */
54 	if (jump_entry_code(jea) < jump_entry_code(jeb))
55 		return -1;
56 
57 	if (jump_entry_code(jea) > jump_entry_code(jeb))
58 		return 1;
59 
60 	return 0;
61 }
62 
jump_label_swap(void * a,void * b,int size)63 static void jump_label_swap(void *a, void *b, int size)
64 {
65 	long delta = (unsigned long)a - (unsigned long)b;
66 	struct jump_entry *jea = a;
67 	struct jump_entry *jeb = b;
68 	struct jump_entry tmp = *jea;
69 
70 	jea->code	= jeb->code - delta;
71 	jea->target	= jeb->target - delta;
72 	jea->key	= jeb->key - delta;
73 
74 	jeb->code	= tmp.code + delta;
75 	jeb->target	= tmp.target + delta;
76 	jeb->key	= tmp.key + delta;
77 }
78 
79 static void
jump_label_sort_entries(struct jump_entry * start,struct jump_entry * stop)80 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
81 {
82 	unsigned long size;
83 	void *swapfn = NULL;
84 
85 	if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
86 		swapfn = jump_label_swap;
87 
88 	size = (((unsigned long)stop - (unsigned long)start)
89 					/ sizeof(struct jump_entry));
90 	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
91 }
92 
93 static void jump_label_update(struct static_key *key);
94 
95 /*
96  * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
97  * The use of 'atomic_read()' requires atomic.h and its problematic for some
98  * kernel headers such as kernel.h and others. Since static_key_count() is not
99  * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
100  * to have it be a function here. Similarly, for 'static_key_enable()' and
101  * 'static_key_disable()', which require bug.h. This should allow jump_label.h
102  * to be included from most/all places for CONFIG_JUMP_LABEL.
103  */
static_key_count(struct static_key * key)104 int static_key_count(struct static_key *key)
105 {
106 	/*
107 	 * -1 means the first static_key_slow_inc() is in progress.
108 	 *  static_key_enabled() must return true, so return 1 here.
109 	 */
110 	int n = atomic_read(&key->enabled);
111 
112 	return n >= 0 ? n : 1;
113 }
114 EXPORT_SYMBOL_GPL(static_key_count);
115 
116 /*
117  * static_key_fast_inc_not_disabled - adds a user for a static key
118  * @key: static key that must be already enabled
119  *
120  * The caller must make sure that the static key can't get disabled while
121  * in this function. It doesn't patch jump labels, only adds a user to
122  * an already enabled static key.
123  *
124  * Returns true if the increment was done. Unlike refcount_t the ref counter
125  * is not saturated, but will fail to increment on overflow.
126  */
static_key_fast_inc_not_disabled(struct static_key * key)127 bool static_key_fast_inc_not_disabled(struct static_key *key)
128 {
129 	int v;
130 
131 	STATIC_KEY_CHECK_USE(key);
132 	/*
133 	 * Negative key->enabled has a special meaning: it sends
134 	 * static_key_slow_inc/dec() down the slow path, and it is non-zero
135 	 * so it counts as "enabled" in jump_label_update().
136 	 *
137 	 * The INT_MAX overflow condition is either used by the networking
138 	 * code to reset or detected in the slow path of
139 	 * static_key_slow_inc_cpuslocked().
140 	 */
141 	v = atomic_read(&key->enabled);
142 	do {
143 		if (v <= 0 || v == INT_MAX)
144 			return false;
145 	} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
146 
147 	return true;
148 }
149 EXPORT_SYMBOL_GPL(static_key_fast_inc_not_disabled);
150 
static_key_slow_inc_cpuslocked(struct static_key * key)151 bool static_key_slow_inc_cpuslocked(struct static_key *key)
152 {
153 	lockdep_assert_cpus_held();
154 
155 	/*
156 	 * Careful if we get concurrent static_key_slow_inc/dec() calls;
157 	 * later calls must wait for the first one to _finish_ the
158 	 * jump_label_update() process.  At the same time, however,
159 	 * the jump_label_update() call below wants to see
160 	 * static_key_enabled(&key) for jumps to be updated properly.
161 	 */
162 	if (static_key_fast_inc_not_disabled(key))
163 		return true;
164 
165 	guard(mutex)(&jump_label_mutex);
166 	/* Try to mark it as 'enabling in progress. */
167 	if (!atomic_cmpxchg(&key->enabled, 0, -1)) {
168 		jump_label_update(key);
169 		/*
170 		 * Ensure that when static_key_fast_inc_not_disabled() or
171 		 * static_key_dec_not_one() observe the positive value,
172 		 * they must also observe all the text changes.
173 		 */
174 		atomic_set_release(&key->enabled, 1);
175 	} else {
176 		/*
177 		 * While holding the mutex this should never observe
178 		 * anything else than a value >= 1 and succeed
179 		 */
180 		if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key)))
181 			return false;
182 	}
183 	return true;
184 }
185 
static_key_slow_inc(struct static_key * key)186 bool static_key_slow_inc(struct static_key *key)
187 {
188 	bool ret;
189 
190 	cpus_read_lock();
191 	ret = static_key_slow_inc_cpuslocked(key);
192 	cpus_read_unlock();
193 	return ret;
194 }
195 EXPORT_SYMBOL_GPL(static_key_slow_inc);
196 
static_key_enable_cpuslocked(struct static_key * key)197 void static_key_enable_cpuslocked(struct static_key *key)
198 {
199 	STATIC_KEY_CHECK_USE(key);
200 	lockdep_assert_cpus_held();
201 
202 	if (atomic_read(&key->enabled) > 0) {
203 		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
204 		return;
205 	}
206 
207 	jump_label_lock();
208 	if (atomic_read(&key->enabled) == 0) {
209 		atomic_set(&key->enabled, -1);
210 		jump_label_update(key);
211 		/*
212 		 * See static_key_slow_inc().
213 		 */
214 		atomic_set_release(&key->enabled, 1);
215 	}
216 	jump_label_unlock();
217 }
218 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
219 
static_key_enable(struct static_key * key)220 void static_key_enable(struct static_key *key)
221 {
222 	cpus_read_lock();
223 	static_key_enable_cpuslocked(key);
224 	cpus_read_unlock();
225 }
226 EXPORT_SYMBOL_GPL(static_key_enable);
227 
static_key_disable_cpuslocked(struct static_key * key)228 void static_key_disable_cpuslocked(struct static_key *key)
229 {
230 	STATIC_KEY_CHECK_USE(key);
231 	lockdep_assert_cpus_held();
232 
233 	if (atomic_read(&key->enabled) != 1) {
234 		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
235 		return;
236 	}
237 
238 	jump_label_lock();
239 	if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
240 		jump_label_update(key);
241 	jump_label_unlock();
242 }
243 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
244 
static_key_disable(struct static_key * key)245 void static_key_disable(struct static_key *key)
246 {
247 	cpus_read_lock();
248 	static_key_disable_cpuslocked(key);
249 	cpus_read_unlock();
250 }
251 EXPORT_SYMBOL_GPL(static_key_disable);
252 
static_key_dec_not_one(struct static_key * key)253 static bool static_key_dec_not_one(struct static_key *key)
254 {
255 	int v;
256 
257 	/*
258 	 * Go into the slow path if key::enabled is less than or equal than
259 	 * one. One is valid to shut down the key, anything less than one
260 	 * is an imbalance, which is handled at the call site.
261 	 *
262 	 * That includes the special case of '-1' which is set in
263 	 * static_key_slow_inc_cpuslocked(), but that's harmless as it is
264 	 * fully serialized in the slow path below. By the time this task
265 	 * acquires the jump label lock the value is back to one and the
266 	 * retry under the lock must succeed.
267 	 */
268 	v = atomic_read(&key->enabled);
269 	do {
270 		/*
271 		 * Warn about the '-1' case though; since that means a
272 		 * decrement is concurrent with a first (0->1) increment. IOW
273 		 * people are trying to disable something that wasn't yet fully
274 		 * enabled. This suggests an ordering problem on the user side.
275 		 */
276 		WARN_ON_ONCE(v < 0);
277 
278 		/*
279 		 * Warn about underflow, and lie about success in an attempt to
280 		 * not make things worse.
281 		 */
282 		if (WARN_ON_ONCE(v == 0))
283 			return true;
284 
285 		if (v <= 1)
286 			return false;
287 	} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1)));
288 
289 	return true;
290 }
291 
__static_key_slow_dec_cpuslocked(struct static_key * key)292 static void __static_key_slow_dec_cpuslocked(struct static_key *key)
293 {
294 	lockdep_assert_cpus_held();
295 	int val;
296 
297 	if (static_key_dec_not_one(key))
298 		return;
299 
300 	guard(mutex)(&jump_label_mutex);
301 	val = atomic_read(&key->enabled);
302 	/*
303 	 * It should be impossible to observe -1 with jump_label_mutex held,
304 	 * see static_key_slow_inc_cpuslocked().
305 	 */
306 	if (WARN_ON_ONCE(val == -1))
307 		return;
308 	/*
309 	 * Cannot already be 0, something went sideways.
310 	 */
311 	if (WARN_ON_ONCE(val == 0))
312 		return;
313 
314 	if (atomic_dec_and_test(&key->enabled))
315 		jump_label_update(key);
316 }
317 
__static_key_slow_dec(struct static_key * key)318 static void __static_key_slow_dec(struct static_key *key)
319 {
320 	cpus_read_lock();
321 	__static_key_slow_dec_cpuslocked(key);
322 	cpus_read_unlock();
323 }
324 
jump_label_update_timeout(struct work_struct * work)325 void jump_label_update_timeout(struct work_struct *work)
326 {
327 	struct static_key_deferred *key =
328 		container_of(work, struct static_key_deferred, work.work);
329 	__static_key_slow_dec(&key->key);
330 }
331 EXPORT_SYMBOL_GPL(jump_label_update_timeout);
332 
static_key_slow_dec(struct static_key * key)333 void static_key_slow_dec(struct static_key *key)
334 {
335 	STATIC_KEY_CHECK_USE(key);
336 	__static_key_slow_dec(key);
337 }
338 EXPORT_SYMBOL_GPL(static_key_slow_dec);
339 
static_key_slow_dec_cpuslocked(struct static_key * key)340 void static_key_slow_dec_cpuslocked(struct static_key *key)
341 {
342 	STATIC_KEY_CHECK_USE(key);
343 	__static_key_slow_dec_cpuslocked(key);
344 }
345 
__static_key_slow_dec_deferred(struct static_key * key,struct delayed_work * work,unsigned long timeout)346 void __static_key_slow_dec_deferred(struct static_key *key,
347 				    struct delayed_work *work,
348 				    unsigned long timeout)
349 {
350 	STATIC_KEY_CHECK_USE(key);
351 
352 	if (static_key_dec_not_one(key))
353 		return;
354 
355 	schedule_delayed_work(work, timeout);
356 }
357 EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
358 
__static_key_deferred_flush(void * key,struct delayed_work * work)359 void __static_key_deferred_flush(void *key, struct delayed_work *work)
360 {
361 	STATIC_KEY_CHECK_USE(key);
362 	flush_delayed_work(work);
363 }
364 EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
365 
jump_label_rate_limit(struct static_key_deferred * key,unsigned long rl)366 void jump_label_rate_limit(struct static_key_deferred *key,
367 		unsigned long rl)
368 {
369 	STATIC_KEY_CHECK_USE(key);
370 	key->timeout = rl;
371 	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
372 }
373 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
374 
addr_conflict(struct jump_entry * entry,void * start,void * end)375 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
376 {
377 	if (jump_entry_code(entry) <= (unsigned long)end &&
378 	    jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start)
379 		return 1;
380 
381 	return 0;
382 }
383 
__jump_label_text_reserved(struct jump_entry * iter_start,struct jump_entry * iter_stop,void * start,void * end,bool init)384 static int __jump_label_text_reserved(struct jump_entry *iter_start,
385 		struct jump_entry *iter_stop, void *start, void *end, bool init)
386 {
387 	struct jump_entry *iter;
388 
389 	iter = iter_start;
390 	while (iter < iter_stop) {
391 		if (init || !jump_entry_is_init(iter)) {
392 			if (addr_conflict(iter, start, end))
393 				return 1;
394 		}
395 		iter++;
396 	}
397 
398 	return 0;
399 }
400 
401 #ifndef arch_jump_label_transform_static
arch_jump_label_transform_static(struct jump_entry * entry,enum jump_label_type type)402 static void arch_jump_label_transform_static(struct jump_entry *entry,
403 					     enum jump_label_type type)
404 {
405 	/* nothing to do on most architectures */
406 }
407 #endif
408 
static_key_entries(struct static_key * key)409 static inline struct jump_entry *static_key_entries(struct static_key *key)
410 {
411 	WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
412 	return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
413 }
414 
static_key_type(struct static_key * key)415 static inline bool static_key_type(struct static_key *key)
416 {
417 	return key->type & JUMP_TYPE_TRUE;
418 }
419 
static_key_linked(struct static_key * key)420 static inline bool static_key_linked(struct static_key *key)
421 {
422 	return key->type & JUMP_TYPE_LINKED;
423 }
424 
static_key_clear_linked(struct static_key * key)425 static inline void static_key_clear_linked(struct static_key *key)
426 {
427 	key->type &= ~JUMP_TYPE_LINKED;
428 }
429 
static_key_set_linked(struct static_key * key)430 static inline void static_key_set_linked(struct static_key *key)
431 {
432 	key->type |= JUMP_TYPE_LINKED;
433 }
434 
435 /***
436  * A 'struct static_key' uses a union such that it either points directly
437  * to a table of 'struct jump_entry' or to a linked list of modules which in
438  * turn point to 'struct jump_entry' tables.
439  *
440  * The two lower bits of the pointer are used to keep track of which pointer
441  * type is in use and to store the initial branch direction, we use an access
442  * function which preserves these bits.
443  */
static_key_set_entries(struct static_key * key,struct jump_entry * entries)444 static void static_key_set_entries(struct static_key *key,
445 				   struct jump_entry *entries)
446 {
447 	unsigned long type;
448 
449 	WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
450 	type = key->type & JUMP_TYPE_MASK;
451 	key->entries = entries;
452 	key->type |= type;
453 }
454 
jump_label_type(struct jump_entry * entry)455 static enum jump_label_type jump_label_type(struct jump_entry *entry)
456 {
457 	struct static_key *key = jump_entry_key(entry);
458 	bool enabled = static_key_enabled(key);
459 	bool branch = jump_entry_is_branch(entry);
460 
461 	/* See the comment in linux/jump_label.h */
462 	return enabled ^ branch;
463 }
464 
jump_label_can_update(struct jump_entry * entry,bool init)465 static bool jump_label_can_update(struct jump_entry *entry, bool init)
466 {
467 	/*
468 	 * Cannot update code that was in an init text area.
469 	 */
470 	if (!init && jump_entry_is_init(entry))
471 		return false;
472 
473 	if (!kernel_text_address(jump_entry_code(entry))) {
474 		/*
475 		 * This skips patching built-in __exit, which
476 		 * is part of init_section_contains() but is
477 		 * not part of kernel_text_address().
478 		 *
479 		 * Skipping built-in __exit is fine since it
480 		 * will never be executed.
481 		 */
482 		WARN_ONCE(!jump_entry_is_init(entry),
483 			  "can't patch jump_label at %pS",
484 			  (void *)jump_entry_code(entry));
485 		return false;
486 	}
487 
488 	return true;
489 }
490 
491 #ifndef HAVE_JUMP_LABEL_BATCH
__jump_label_update(struct static_key * key,struct jump_entry * entry,struct jump_entry * stop,bool init)492 static void __jump_label_update(struct static_key *key,
493 				struct jump_entry *entry,
494 				struct jump_entry *stop,
495 				bool init)
496 {
497 	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
498 		if (jump_label_can_update(entry, init))
499 			arch_jump_label_transform(entry, jump_label_type(entry));
500 	}
501 }
502 #else
__jump_label_update(struct static_key * key,struct jump_entry * entry,struct jump_entry * stop,bool init)503 static void __jump_label_update(struct static_key *key,
504 				struct jump_entry *entry,
505 				struct jump_entry *stop,
506 				bool init)
507 {
508 	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
509 
510 		if (!jump_label_can_update(entry, init))
511 			continue;
512 
513 		if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
514 			/*
515 			 * Queue is full: Apply the current queue and try again.
516 			 */
517 			arch_jump_label_transform_apply();
518 			BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
519 		}
520 	}
521 	arch_jump_label_transform_apply();
522 }
523 #endif
524 
jump_label_init(void)525 void __init jump_label_init(void)
526 {
527 	struct jump_entry *iter_start = __start___jump_table;
528 	struct jump_entry *iter_stop = __stop___jump_table;
529 	struct static_key *key = NULL;
530 	struct jump_entry *iter;
531 
532 	/*
533 	 * Since we are initializing the static_key.enabled field with
534 	 * with the 'raw' int values (to avoid pulling in atomic.h) in
535 	 * jump_label.h, let's make sure that is safe. There are only two
536 	 * cases to check since we initialize to 0 or 1.
537 	 */
538 	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
539 	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
540 
541 	if (static_key_initialized)
542 		return;
543 
544 	cpus_read_lock();
545 	jump_label_lock();
546 	jump_label_sort_entries(iter_start, iter_stop);
547 
548 	for (iter = iter_start; iter < iter_stop; iter++) {
549 		struct static_key *iterk;
550 		bool in_init;
551 
552 		/* rewrite NOPs */
553 		if (jump_label_type(iter) == JUMP_LABEL_NOP)
554 			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
555 
556 		in_init = init_section_contains((void *)jump_entry_code(iter), 1);
557 		jump_entry_set_init(iter, in_init);
558 
559 		iterk = jump_entry_key(iter);
560 		if (iterk == key)
561 			continue;
562 
563 		key = iterk;
564 		static_key_set_entries(key, iter);
565 	}
566 	static_key_initialized = true;
567 	jump_label_unlock();
568 	cpus_read_unlock();
569 }
570 
static_key_sealed(struct static_key * key)571 static inline bool static_key_sealed(struct static_key *key)
572 {
573 	return (key->type & JUMP_TYPE_LINKED) && !(key->type & ~JUMP_TYPE_MASK);
574 }
575 
static_key_seal(struct static_key * key)576 static inline void static_key_seal(struct static_key *key)
577 {
578 	unsigned long type = key->type & JUMP_TYPE_TRUE;
579 	key->type = JUMP_TYPE_LINKED | type;
580 }
581 
jump_label_init_ro(void)582 void jump_label_init_ro(void)
583 {
584 	struct jump_entry *iter_start = __start___jump_table;
585 	struct jump_entry *iter_stop = __stop___jump_table;
586 	struct jump_entry *iter;
587 
588 	if (WARN_ON_ONCE(!static_key_initialized))
589 		return;
590 
591 	cpus_read_lock();
592 	jump_label_lock();
593 
594 	for (iter = iter_start; iter < iter_stop; iter++) {
595 		struct static_key *iterk = jump_entry_key(iter);
596 
597 		if (!is_kernel_ro_after_init((unsigned long)iterk))
598 			continue;
599 
600 		if (static_key_sealed(iterk))
601 			continue;
602 
603 		static_key_seal(iterk);
604 	}
605 
606 	jump_label_unlock();
607 	cpus_read_unlock();
608 }
609 
610 #ifdef CONFIG_MODULES
611 
jump_label_init_type(struct jump_entry * entry)612 enum jump_label_type jump_label_init_type(struct jump_entry *entry)
613 {
614 	struct static_key *key = jump_entry_key(entry);
615 	bool type = static_key_type(key);
616 	bool branch = jump_entry_is_branch(entry);
617 
618 	/* See the comment in linux/jump_label.h */
619 	return type ^ branch;
620 }
621 
622 struct static_key_mod {
623 	struct static_key_mod *next;
624 	struct jump_entry *entries;
625 	struct module *mod;
626 };
627 
static_key_mod(struct static_key * key)628 static inline struct static_key_mod *static_key_mod(struct static_key *key)
629 {
630 	WARN_ON_ONCE(!static_key_linked(key));
631 	return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
632 }
633 
634 /***
635  * key->type and key->next are the same via union.
636  * This sets key->next and preserves the type bits.
637  *
638  * See additional comments above static_key_set_entries().
639  */
static_key_set_mod(struct static_key * key,struct static_key_mod * mod)640 static void static_key_set_mod(struct static_key *key,
641 			       struct static_key_mod *mod)
642 {
643 	unsigned long type;
644 
645 	WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
646 	type = key->type & JUMP_TYPE_MASK;
647 	key->next = mod;
648 	key->type |= type;
649 }
650 
__jump_label_mod_text_reserved(void * start,void * end)651 static int __jump_label_mod_text_reserved(void *start, void *end)
652 {
653 	struct module *mod;
654 	int ret;
655 
656 	preempt_disable();
657 	mod = __module_text_address((unsigned long)start);
658 	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
659 	if (!try_module_get(mod))
660 		mod = NULL;
661 	preempt_enable();
662 
663 	if (!mod)
664 		return 0;
665 
666 	ret = __jump_label_text_reserved(mod->jump_entries,
667 				mod->jump_entries + mod->num_jump_entries,
668 				start, end, mod->state == MODULE_STATE_COMING);
669 
670 	module_put(mod);
671 
672 	return ret;
673 }
674 
__jump_label_mod_update(struct static_key * key)675 static void __jump_label_mod_update(struct static_key *key)
676 {
677 	struct static_key_mod *mod;
678 
679 	for (mod = static_key_mod(key); mod; mod = mod->next) {
680 		struct jump_entry *stop;
681 		struct module *m;
682 
683 		/*
684 		 * NULL if the static_key is defined in a module
685 		 * that does not use it
686 		 */
687 		if (!mod->entries)
688 			continue;
689 
690 		m = mod->mod;
691 		if (!m)
692 			stop = __stop___jump_table;
693 		else
694 			stop = m->jump_entries + m->num_jump_entries;
695 		__jump_label_update(key, mod->entries, stop,
696 				    m && m->state == MODULE_STATE_COMING);
697 	}
698 }
699 
jump_label_add_module(struct module * mod)700 static int jump_label_add_module(struct module *mod)
701 {
702 	struct jump_entry *iter_start = mod->jump_entries;
703 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
704 	struct jump_entry *iter;
705 	struct static_key *key = NULL;
706 	struct static_key_mod *jlm, *jlm2;
707 
708 	/* if the module doesn't have jump label entries, just return */
709 	if (iter_start == iter_stop)
710 		return 0;
711 
712 	jump_label_sort_entries(iter_start, iter_stop);
713 
714 	for (iter = iter_start; iter < iter_stop; iter++) {
715 		struct static_key *iterk;
716 		bool in_init;
717 
718 		in_init = within_module_init(jump_entry_code(iter), mod);
719 		jump_entry_set_init(iter, in_init);
720 
721 		iterk = jump_entry_key(iter);
722 		if (iterk == key)
723 			continue;
724 
725 		key = iterk;
726 		if (within_module((unsigned long)key, mod)) {
727 			static_key_set_entries(key, iter);
728 			continue;
729 		}
730 
731 		/*
732 		 * If the key was sealed at init, then there's no need to keep a
733 		 * reference to its module entries - just patch them now and be
734 		 * done with it.
735 		 */
736 		if (static_key_sealed(key))
737 			goto do_poke;
738 
739 		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
740 		if (!jlm)
741 			return -ENOMEM;
742 		if (!static_key_linked(key)) {
743 			jlm2 = kzalloc(sizeof(struct static_key_mod),
744 				       GFP_KERNEL);
745 			if (!jlm2) {
746 				kfree(jlm);
747 				return -ENOMEM;
748 			}
749 			preempt_disable();
750 			jlm2->mod = __module_address((unsigned long)key);
751 			preempt_enable();
752 			jlm2->entries = static_key_entries(key);
753 			jlm2->next = NULL;
754 			static_key_set_mod(key, jlm2);
755 			static_key_set_linked(key);
756 		}
757 		jlm->mod = mod;
758 		jlm->entries = iter;
759 		jlm->next = static_key_mod(key);
760 		static_key_set_mod(key, jlm);
761 		static_key_set_linked(key);
762 
763 		/* Only update if we've changed from our initial state */
764 do_poke:
765 		if (jump_label_type(iter) != jump_label_init_type(iter))
766 			__jump_label_update(key, iter, iter_stop, true);
767 	}
768 
769 	return 0;
770 }
771 
jump_label_del_module(struct module * mod)772 static void jump_label_del_module(struct module *mod)
773 {
774 	struct jump_entry *iter_start = mod->jump_entries;
775 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
776 	struct jump_entry *iter;
777 	struct static_key *key = NULL;
778 	struct static_key_mod *jlm, **prev;
779 
780 	for (iter = iter_start; iter < iter_stop; iter++) {
781 		if (jump_entry_key(iter) == key)
782 			continue;
783 
784 		key = jump_entry_key(iter);
785 
786 		if (within_module((unsigned long)key, mod))
787 			continue;
788 
789 		/* No @jlm allocated because key was sealed at init. */
790 		if (static_key_sealed(key))
791 			continue;
792 
793 		/* No memory during module load */
794 		if (WARN_ON(!static_key_linked(key)))
795 			continue;
796 
797 		prev = &key->next;
798 		jlm = static_key_mod(key);
799 
800 		while (jlm && jlm->mod != mod) {
801 			prev = &jlm->next;
802 			jlm = jlm->next;
803 		}
804 
805 		/* No memory during module load */
806 		if (WARN_ON(!jlm))
807 			continue;
808 
809 		if (prev == &key->next)
810 			static_key_set_mod(key, jlm->next);
811 		else
812 			*prev = jlm->next;
813 
814 		kfree(jlm);
815 
816 		jlm = static_key_mod(key);
817 		/* if only one etry is left, fold it back into the static_key */
818 		if (jlm->next == NULL) {
819 			static_key_set_entries(key, jlm->entries);
820 			static_key_clear_linked(key);
821 			kfree(jlm);
822 		}
823 	}
824 }
825 
826 static int
jump_label_module_notify(struct notifier_block * self,unsigned long val,void * data)827 jump_label_module_notify(struct notifier_block *self, unsigned long val,
828 			 void *data)
829 {
830 	struct module *mod = data;
831 	int ret = 0;
832 
833 	cpus_read_lock();
834 	jump_label_lock();
835 
836 	switch (val) {
837 	case MODULE_STATE_COMING:
838 		ret = jump_label_add_module(mod);
839 		if (ret) {
840 			WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
841 			jump_label_del_module(mod);
842 		}
843 		break;
844 	case MODULE_STATE_GOING:
845 		jump_label_del_module(mod);
846 		break;
847 	}
848 
849 	jump_label_unlock();
850 	cpus_read_unlock();
851 
852 	return notifier_from_errno(ret);
853 }
854 
855 static struct notifier_block jump_label_module_nb = {
856 	.notifier_call = jump_label_module_notify,
857 	.priority = 1, /* higher than tracepoints */
858 };
859 
jump_label_init_module(void)860 static __init int jump_label_init_module(void)
861 {
862 	return register_module_notifier(&jump_label_module_nb);
863 }
864 early_initcall(jump_label_init_module);
865 
866 #endif /* CONFIG_MODULES */
867 
868 /***
869  * jump_label_text_reserved - check if addr range is reserved
870  * @start: start text addr
871  * @end: end text addr
872  *
873  * checks if the text addr located between @start and @end
874  * overlaps with any of the jump label patch addresses. Code
875  * that wants to modify kernel text should first verify that
876  * it does not overlap with any of the jump label addresses.
877  * Caller must hold jump_label_mutex.
878  *
879  * returns 1 if there is an overlap, 0 otherwise
880  */
jump_label_text_reserved(void * start,void * end)881 int jump_label_text_reserved(void *start, void *end)
882 {
883 	bool init = system_state < SYSTEM_RUNNING;
884 	int ret = __jump_label_text_reserved(__start___jump_table,
885 			__stop___jump_table, start, end, init);
886 
887 	if (ret)
888 		return ret;
889 
890 #ifdef CONFIG_MODULES
891 	ret = __jump_label_mod_text_reserved(start, end);
892 #endif
893 	return ret;
894 }
895 
jump_label_update(struct static_key * key)896 static void jump_label_update(struct static_key *key)
897 {
898 	struct jump_entry *stop = __stop___jump_table;
899 	bool init = system_state < SYSTEM_RUNNING;
900 	struct jump_entry *entry;
901 #ifdef CONFIG_MODULES
902 	struct module *mod;
903 
904 	if (static_key_linked(key)) {
905 		__jump_label_mod_update(key);
906 		return;
907 	}
908 
909 	preempt_disable();
910 	mod = __module_address((unsigned long)key);
911 	if (mod) {
912 		stop = mod->jump_entries + mod->num_jump_entries;
913 		init = mod->state == MODULE_STATE_COMING;
914 	}
915 	preempt_enable();
916 #endif
917 	entry = static_key_entries(key);
918 	/* if there are no users, entry can be NULL */
919 	if (entry)
920 		__jump_label_update(key, entry, stop, init);
921 }
922 
923 #ifdef CONFIG_STATIC_KEYS_SELFTEST
924 static DEFINE_STATIC_KEY_TRUE(sk_true);
925 static DEFINE_STATIC_KEY_FALSE(sk_false);
926 
jump_label_test(void)927 static __init int jump_label_test(void)
928 {
929 	int i;
930 
931 	for (i = 0; i < 2; i++) {
932 		WARN_ON(static_key_enabled(&sk_true.key) != true);
933 		WARN_ON(static_key_enabled(&sk_false.key) != false);
934 
935 		WARN_ON(!static_branch_likely(&sk_true));
936 		WARN_ON(!static_branch_unlikely(&sk_true));
937 		WARN_ON(static_branch_likely(&sk_false));
938 		WARN_ON(static_branch_unlikely(&sk_false));
939 
940 		static_branch_disable(&sk_true);
941 		static_branch_enable(&sk_false);
942 
943 		WARN_ON(static_key_enabled(&sk_true.key) == true);
944 		WARN_ON(static_key_enabled(&sk_false.key) == false);
945 
946 		WARN_ON(static_branch_likely(&sk_true));
947 		WARN_ON(static_branch_unlikely(&sk_true));
948 		WARN_ON(!static_branch_likely(&sk_false));
949 		WARN_ON(!static_branch_unlikely(&sk_false));
950 
951 		static_branch_enable(&sk_true);
952 		static_branch_disable(&sk_false);
953 	}
954 
955 	return 0;
956 }
957 early_initcall(jump_label_test);
958 #endif /* STATIC_KEYS_SELFTEST */
959