1  /* SPDX-License-Identifier: GPL-2.0-or-later */
2  #ifndef _LINUX_KPROBES_H
3  #define _LINUX_KPROBES_H
4  /*
5   *  Kernel Probes (KProbes)
6   *
7   * Copyright (C) IBM Corporation, 2002, 2004
8   *
9   * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
10   *		Probes initial implementation ( includes suggestions from
11   *		Rusty Russell).
12   * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
13   *		interface to access function arguments.
14   * 2005-May	Hien Nguyen <hien@us.ibm.com> and Jim Keniston
15   *		<jkenisto@us.ibm.com>  and Prasanna S Panchamukhi
16   *		<prasanna@in.ibm.com> added function-return probes.
17   */
18  #include <linux/compiler.h>
19  #include <linux/linkage.h>
20  #include <linux/list.h>
21  #include <linux/notifier.h>
22  #include <linux/smp.h>
23  #include <linux/bug.h>
24  #include <linux/percpu.h>
25  #include <linux/spinlock.h>
26  #include <linux/rcupdate.h>
27  #include <linux/mutex.h>
28  #include <linux/ftrace.h>
29  #include <linux/objpool.h>
30  #include <linux/rethook.h>
31  #include <asm/kprobes.h>
32  
33  #ifdef CONFIG_KPROBES
34  
35  /* kprobe_status settings */
36  #define KPROBE_HIT_ACTIVE	0x00000001
37  #define KPROBE_HIT_SS		0x00000002
38  #define KPROBE_REENTER		0x00000004
39  #define KPROBE_HIT_SSDONE	0x00000008
40  
41  #else /* !CONFIG_KPROBES */
42  #include <asm-generic/kprobes.h>
43  typedef int kprobe_opcode_t;
44  struct arch_specific_insn {
45  	int dummy;
46  };
47  #endif /* CONFIG_KPROBES */
48  
49  struct kprobe;
50  struct pt_regs;
51  struct kretprobe;
52  struct kretprobe_instance;
53  typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
54  typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
55  				       unsigned long flags);
56  typedef int (*kretprobe_handler_t) (struct kretprobe_instance *,
57  				    struct pt_regs *);
58  
59  struct kprobe {
60  	struct hlist_node hlist;
61  
62  	/* list of kprobes for multi-handler support */
63  	struct list_head list;
64  
65  	/*count the number of times this probe was temporarily disarmed */
66  	unsigned long nmissed;
67  
68  	/* location of the probe point */
69  	kprobe_opcode_t *addr;
70  
71  	/* Allow user to indicate symbol name of the probe point */
72  	const char *symbol_name;
73  
74  	/* Offset into the symbol */
75  	unsigned int offset;
76  
77  	/* Called before addr is executed. */
78  	kprobe_pre_handler_t pre_handler;
79  
80  	/* Called after addr is executed, unless... */
81  	kprobe_post_handler_t post_handler;
82  
83  	/* Saved opcode (which has been replaced with breakpoint) */
84  	kprobe_opcode_t opcode;
85  
86  	/* copy of the original instruction */
87  	struct arch_specific_insn ainsn;
88  
89  	/*
90  	 * Indicates various status flags.
91  	 * Protected by kprobe_mutex after this kprobe is registered.
92  	 */
93  	u32 flags;
94  };
95  
96  /* Kprobe status flags */
97  #define KPROBE_FLAG_GONE	1 /* breakpoint has already gone */
98  #define KPROBE_FLAG_DISABLED	2 /* probe is temporarily disabled */
99  #define KPROBE_FLAG_OPTIMIZED	4 /*
100  				   * probe is really optimized.
101  				   * NOTE:
102  				   * this flag is only for optimized_kprobe.
103  				   */
104  #define KPROBE_FLAG_FTRACE	8 /* probe is using ftrace */
105  #define KPROBE_FLAG_ON_FUNC_ENTRY	16 /* probe is on the function entry */
106  
107  /* Has this kprobe gone ? */
kprobe_gone(struct kprobe * p)108  static inline bool kprobe_gone(struct kprobe *p)
109  {
110  	return p->flags & KPROBE_FLAG_GONE;
111  }
112  
113  /* Is this kprobe disabled ? */
kprobe_disabled(struct kprobe * p)114  static inline bool kprobe_disabled(struct kprobe *p)
115  {
116  	return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
117  }
118  
119  /* Is this kprobe really running optimized path ? */
kprobe_optimized(struct kprobe * p)120  static inline bool kprobe_optimized(struct kprobe *p)
121  {
122  	return p->flags & KPROBE_FLAG_OPTIMIZED;
123  }
124  
125  /* Is this kprobe uses ftrace ? */
kprobe_ftrace(struct kprobe * p)126  static inline bool kprobe_ftrace(struct kprobe *p)
127  {
128  	return p->flags & KPROBE_FLAG_FTRACE;
129  }
130  
131  /*
132   * Function-return probe -
133   * Note:
134   * User needs to provide a handler function, and initialize maxactive.
135   * maxactive - The maximum number of instances of the probed function that
136   * can be active concurrently.
137   * nmissed - tracks the number of times the probed function's return was
138   * ignored, due to maxactive being too low.
139   *
140   */
141  struct kretprobe_holder {
142  	struct kretprobe __rcu *rp;
143  	struct objpool_head	pool;
144  };
145  
146  struct kretprobe {
147  	struct kprobe kp;
148  	kretprobe_handler_t handler;
149  	kretprobe_handler_t entry_handler;
150  	int maxactive;
151  	int nmissed;
152  	size_t data_size;
153  #ifdef CONFIG_KRETPROBE_ON_RETHOOK
154  	struct rethook *rh;
155  #else
156  	struct kretprobe_holder *rph;
157  #endif
158  };
159  
160  #define KRETPROBE_MAX_DATA_SIZE	4096
161  
162  struct kretprobe_instance {
163  #ifdef CONFIG_KRETPROBE_ON_RETHOOK
164  	struct rethook_node node;
165  #else
166  	struct rcu_head rcu;
167  	struct llist_node llist;
168  	struct kretprobe_holder *rph;
169  	kprobe_opcode_t *ret_addr;
170  	void *fp;
171  #endif
172  	char data[];
173  };
174  
175  struct kretprobe_blackpoint {
176  	const char *name;
177  	void *addr;
178  };
179  
180  struct kprobe_blacklist_entry {
181  	struct list_head list;
182  	unsigned long start_addr;
183  	unsigned long end_addr;
184  };
185  
186  #ifdef CONFIG_KPROBES
187  DECLARE_PER_CPU(struct kprobe *, current_kprobe);
188  DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
189  
190  extern void kprobe_busy_begin(void);
191  extern void kprobe_busy_end(void);
192  
193  #ifdef CONFIG_KRETPROBES
194  /* Check whether @p is used for implementing a trampoline. */
195  extern int arch_trampoline_kprobe(struct kprobe *p);
196  
197  #ifdef CONFIG_KRETPROBE_ON_RETHOOK
get_kretprobe(struct kretprobe_instance * ri)198  static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
199  {
200  	/* rethook::data is non-changed field, so that you can access it freely. */
201  	return (struct kretprobe *)ri->node.rethook->data;
202  }
get_kretprobe_retaddr(struct kretprobe_instance * ri)203  static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
204  {
205  	return ri->node.ret_addr;
206  }
207  #else
208  extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
209  				   struct pt_regs *regs);
210  void arch_kretprobe_fixup_return(struct pt_regs *regs,
211  				 kprobe_opcode_t *correct_ret_addr);
212  
213  void __kretprobe_trampoline(void);
214  /*
215   * Since some architecture uses structured function pointer,
216   * use dereference_function_descriptor() to get real function address.
217   */
kretprobe_trampoline_addr(void)218  static nokprobe_inline void *kretprobe_trampoline_addr(void)
219  {
220  	return dereference_kernel_function_descriptor(__kretprobe_trampoline);
221  }
222  
223  /* If the trampoline handler called from a kprobe, use this version */
224  unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
225  					     void *frame_pointer);
226  
227  static nokprobe_inline
kretprobe_trampoline_handler(struct pt_regs * regs,void * frame_pointer)228  unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
229  					   void *frame_pointer)
230  {
231  	unsigned long ret;
232  	/*
233  	 * Set a dummy kprobe for avoiding kretprobe recursion.
234  	 * Since kretprobe never runs in kprobe handler, no kprobe must
235  	 * be running at this point.
236  	 */
237  	kprobe_busy_begin();
238  	ret = __kretprobe_trampoline_handler(regs, frame_pointer);
239  	kprobe_busy_end();
240  
241  	return ret;
242  }
243  
get_kretprobe(struct kretprobe_instance * ri)244  static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
245  {
246  	return rcu_dereference_check(ri->rph->rp, rcu_read_lock_any_held());
247  }
248  
get_kretprobe_retaddr(struct kretprobe_instance * ri)249  static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
250  {
251  	return (unsigned long)ri->ret_addr;
252  }
253  #endif /* CONFIG_KRETPROBE_ON_RETHOOK */
254  
255  #else /* !CONFIG_KRETPROBES */
arch_prepare_kretprobe(struct kretprobe * rp,struct pt_regs * regs)256  static inline void arch_prepare_kretprobe(struct kretprobe *rp,
257  					struct pt_regs *regs)
258  {
259  }
arch_trampoline_kprobe(struct kprobe * p)260  static inline int arch_trampoline_kprobe(struct kprobe *p)
261  {
262  	return 0;
263  }
264  #endif /* CONFIG_KRETPROBES */
265  
266  /* Markers of '_kprobe_blacklist' section */
267  extern unsigned long __start_kprobe_blacklist[];
268  extern unsigned long __stop_kprobe_blacklist[];
269  
270  extern struct kretprobe_blackpoint kretprobe_blacklist[];
271  
272  extern int arch_prepare_kprobe(struct kprobe *p);
273  extern void arch_arm_kprobe(struct kprobe *p);
274  extern void arch_disarm_kprobe(struct kprobe *p);
275  extern int arch_init_kprobes(void);
276  extern void kprobes_inc_nmissed_count(struct kprobe *p);
277  extern bool arch_within_kprobe_blacklist(unsigned long addr);
278  extern int arch_populate_kprobe_blacklist(void);
279  extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
280  
281  extern bool within_kprobe_blacklist(unsigned long addr);
282  extern int kprobe_add_ksym_blacklist(unsigned long entry);
283  extern int kprobe_add_area_blacklist(unsigned long start, unsigned long end);
284  
285  struct kprobe_insn_cache {
286  	struct mutex mutex;
287  	void *(*alloc)(void);	/* allocate insn page */
288  	void (*free)(void *);	/* free insn page */
289  	const char *sym;	/* symbol for insn pages */
290  	struct list_head pages; /* list of kprobe_insn_page */
291  	size_t insn_size;	/* size of instruction slot */
292  	int nr_garbage;
293  };
294  
295  #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
296  extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c);
297  extern void __free_insn_slot(struct kprobe_insn_cache *c,
298  			     kprobe_opcode_t *slot, int dirty);
299  /* sleep-less address checking routine  */
300  extern bool __is_insn_slot_addr(struct kprobe_insn_cache *c,
301  				unsigned long addr);
302  
303  #define DEFINE_INSN_CACHE_OPS(__name)					\
304  extern struct kprobe_insn_cache kprobe_##__name##_slots;		\
305  									\
306  static inline kprobe_opcode_t *get_##__name##_slot(void)		\
307  {									\
308  	return __get_insn_slot(&kprobe_##__name##_slots);		\
309  }									\
310  									\
311  static inline void free_##__name##_slot(kprobe_opcode_t *slot, int dirty)\
312  {									\
313  	__free_insn_slot(&kprobe_##__name##_slots, slot, dirty);	\
314  }									\
315  									\
316  static inline bool is_kprobe_##__name##_slot(unsigned long addr)	\
317  {									\
318  	return __is_insn_slot_addr(&kprobe_##__name##_slots, addr);	\
319  }
320  #define KPROBE_INSN_PAGE_SYM		"kprobe_insn_page"
321  #define KPROBE_OPTINSN_PAGE_SYM		"kprobe_optinsn_page"
322  int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
323  			     unsigned long *value, char *type, char *sym);
324  #else /* !__ARCH_WANT_KPROBES_INSN_SLOT */
325  #define DEFINE_INSN_CACHE_OPS(__name)					\
326  static inline bool is_kprobe_##__name##_slot(unsigned long addr)	\
327  {									\
328  	return 0;							\
329  }
330  #endif
331  
332  DEFINE_INSN_CACHE_OPS(insn);
333  
334  #ifdef CONFIG_OPTPROBES
335  /*
336   * Internal structure for direct jump optimized probe
337   */
338  struct optimized_kprobe {
339  	struct kprobe kp;
340  	struct list_head list;	/* list for optimizing queue */
341  	struct arch_optimized_insn optinsn;
342  };
343  
344  /* Architecture dependent functions for direct jump optimization */
345  extern int arch_prepared_optinsn(struct arch_optimized_insn *optinsn);
346  extern int arch_check_optimized_kprobe(struct optimized_kprobe *op);
347  extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
348  					 struct kprobe *orig);
349  extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op);
350  extern void arch_optimize_kprobes(struct list_head *oplist);
351  extern void arch_unoptimize_kprobes(struct list_head *oplist,
352  				    struct list_head *done_list);
353  extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
354  extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
355  					kprobe_opcode_t *addr);
356  
357  extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
358  
359  DEFINE_INSN_CACHE_OPS(optinsn);
360  
361  extern void wait_for_kprobe_optimizer(void);
362  bool optprobe_queued_unopt(struct optimized_kprobe *op);
363  bool kprobe_disarmed(struct kprobe *p);
364  #else /* !CONFIG_OPTPROBES */
wait_for_kprobe_optimizer(void)365  static inline void wait_for_kprobe_optimizer(void) { }
366  #endif /* CONFIG_OPTPROBES */
367  
368  #ifdef CONFIG_KPROBES_ON_FTRACE
369  extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
370  				  struct ftrace_ops *ops, struct ftrace_regs *fregs);
371  extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
372  /* Set when ftrace has been killed: kprobes on ftrace must be disabled for safety */
373  extern bool kprobe_ftrace_disabled __read_mostly;
374  extern void kprobe_ftrace_kill(void);
375  #else
arch_prepare_kprobe_ftrace(struct kprobe * p)376  static inline int arch_prepare_kprobe_ftrace(struct kprobe *p)
377  {
378  	return -EINVAL;
379  }
kprobe_ftrace_kill(void)380  static inline void kprobe_ftrace_kill(void) {}
381  #endif /* CONFIG_KPROBES_ON_FTRACE */
382  
383  /* Get the kprobe at this addr (if any) - called with preemption disabled */
384  struct kprobe *get_kprobe(void *addr);
385  
386  /* kprobe_running() will just return the current_kprobe on this CPU */
kprobe_running(void)387  static inline struct kprobe *kprobe_running(void)
388  {
389  	return __this_cpu_read(current_kprobe);
390  }
391  
reset_current_kprobe(void)392  static inline void reset_current_kprobe(void)
393  {
394  	__this_cpu_write(current_kprobe, NULL);
395  }
396  
get_kprobe_ctlblk(void)397  static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
398  {
399  	return this_cpu_ptr(&kprobe_ctlblk);
400  }
401  
402  kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
403  kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset, bool *on_func_entry);
404  
405  int register_kprobe(struct kprobe *p);
406  void unregister_kprobe(struct kprobe *p);
407  int register_kprobes(struct kprobe **kps, int num);
408  void unregister_kprobes(struct kprobe **kps, int num);
409  
410  int register_kretprobe(struct kretprobe *rp);
411  void unregister_kretprobe(struct kretprobe *rp);
412  int register_kretprobes(struct kretprobe **rps, int num);
413  void unregister_kretprobes(struct kretprobe **rps, int num);
414  
415  #if defined(CONFIG_KRETPROBE_ON_RETHOOK) || !defined(CONFIG_KRETPROBES)
416  #define kprobe_flush_task(tk)	do {} while (0)
417  #else
418  void kprobe_flush_task(struct task_struct *tk);
419  #endif
420  
421  void kprobe_free_init_mem(void);
422  
423  int disable_kprobe(struct kprobe *kp);
424  int enable_kprobe(struct kprobe *kp);
425  
426  void dump_kprobe(struct kprobe *kp);
427  
428  void *alloc_insn_page(void);
429  
430  void *alloc_optinsn_page(void);
431  void free_optinsn_page(void *page);
432  
433  int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
434  		       char *sym);
435  
436  int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
437  			    char *type, char *sym);
438  
439  int kprobe_exceptions_notify(struct notifier_block *self,
440  			     unsigned long val, void *data);
441  
442  #else /* !CONFIG_KPROBES: */
443  
kprobe_fault_handler(struct pt_regs * regs,int trapnr)444  static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
445  {
446  	return 0;
447  }
get_kprobe(void * addr)448  static inline struct kprobe *get_kprobe(void *addr)
449  {
450  	return NULL;
451  }
kprobe_running(void)452  static inline struct kprobe *kprobe_running(void)
453  {
454  	return NULL;
455  }
456  #define kprobe_busy_begin()	do {} while (0)
457  #define kprobe_busy_end()	do {} while (0)
458  
register_kprobe(struct kprobe * p)459  static inline int register_kprobe(struct kprobe *p)
460  {
461  	return -EOPNOTSUPP;
462  }
register_kprobes(struct kprobe ** kps,int num)463  static inline int register_kprobes(struct kprobe **kps, int num)
464  {
465  	return -EOPNOTSUPP;
466  }
unregister_kprobe(struct kprobe * p)467  static inline void unregister_kprobe(struct kprobe *p)
468  {
469  }
unregister_kprobes(struct kprobe ** kps,int num)470  static inline void unregister_kprobes(struct kprobe **kps, int num)
471  {
472  }
register_kretprobe(struct kretprobe * rp)473  static inline int register_kretprobe(struct kretprobe *rp)
474  {
475  	return -EOPNOTSUPP;
476  }
register_kretprobes(struct kretprobe ** rps,int num)477  static inline int register_kretprobes(struct kretprobe **rps, int num)
478  {
479  	return -EOPNOTSUPP;
480  }
unregister_kretprobe(struct kretprobe * rp)481  static inline void unregister_kretprobe(struct kretprobe *rp)
482  {
483  }
unregister_kretprobes(struct kretprobe ** rps,int num)484  static inline void unregister_kretprobes(struct kretprobe **rps, int num)
485  {
486  }
kprobe_flush_task(struct task_struct * tk)487  static inline void kprobe_flush_task(struct task_struct *tk)
488  {
489  }
kprobe_free_init_mem(void)490  static inline void kprobe_free_init_mem(void)
491  {
492  }
kprobe_ftrace_kill(void)493  static inline void kprobe_ftrace_kill(void)
494  {
495  }
disable_kprobe(struct kprobe * kp)496  static inline int disable_kprobe(struct kprobe *kp)
497  {
498  	return -EOPNOTSUPP;
499  }
enable_kprobe(struct kprobe * kp)500  static inline int enable_kprobe(struct kprobe *kp)
501  {
502  	return -EOPNOTSUPP;
503  }
504  
within_kprobe_blacklist(unsigned long addr)505  static inline bool within_kprobe_blacklist(unsigned long addr)
506  {
507  	return true;
508  }
kprobe_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)509  static inline int kprobe_get_kallsym(unsigned int symnum, unsigned long *value,
510  				     char *type, char *sym)
511  {
512  	return -ERANGE;
513  }
514  #endif /* CONFIG_KPROBES */
515  
disable_kretprobe(struct kretprobe * rp)516  static inline int disable_kretprobe(struct kretprobe *rp)
517  {
518  	return disable_kprobe(&rp->kp);
519  }
enable_kretprobe(struct kretprobe * rp)520  static inline int enable_kretprobe(struct kretprobe *rp)
521  {
522  	return enable_kprobe(&rp->kp);
523  }
524  
525  #ifndef CONFIG_KPROBES
is_kprobe_insn_slot(unsigned long addr)526  static inline bool is_kprobe_insn_slot(unsigned long addr)
527  {
528  	return false;
529  }
530  #endif /* !CONFIG_KPROBES */
531  
532  #ifndef CONFIG_OPTPROBES
is_kprobe_optinsn_slot(unsigned long addr)533  static inline bool is_kprobe_optinsn_slot(unsigned long addr)
534  {
535  	return false;
536  }
537  #endif /* !CONFIG_OPTPROBES */
538  
539  #ifdef CONFIG_KRETPROBES
540  #ifdef CONFIG_KRETPROBE_ON_RETHOOK
is_kretprobe_trampoline(unsigned long addr)541  static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
542  {
543  	return is_rethook_trampoline(addr);
544  }
545  
546  static nokprobe_inline
kretprobe_find_ret_addr(struct task_struct * tsk,void * fp,struct llist_node ** cur)547  unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
548  				      struct llist_node **cur)
549  {
550  	return rethook_find_ret_addr(tsk, (unsigned long)fp, cur);
551  }
552  #else
is_kretprobe_trampoline(unsigned long addr)553  static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
554  {
555  	return (void *)addr == kretprobe_trampoline_addr();
556  }
557  
558  unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
559  				      struct llist_node **cur);
560  #endif
561  #else
is_kretprobe_trampoline(unsigned long addr)562  static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
563  {
564  	return false;
565  }
566  
567  static nokprobe_inline
kretprobe_find_ret_addr(struct task_struct * tsk,void * fp,struct llist_node ** cur)568  unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
569  				      struct llist_node **cur)
570  {
571  	return 0;
572  }
573  #endif
574  
575  /* Returns true if kprobes handled the fault */
kprobe_page_fault(struct pt_regs * regs,unsigned int trap)576  static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs,
577  					      unsigned int trap)
578  {
579  	if (!IS_ENABLED(CONFIG_KPROBES))
580  		return false;
581  	if (user_mode(regs))
582  		return false;
583  	/*
584  	 * To be potentially processing a kprobe fault and to be allowed
585  	 * to call kprobe_running(), we have to be non-preemptible.
586  	 */
587  	if (preemptible())
588  		return false;
589  	if (!kprobe_running())
590  		return false;
591  	return kprobe_fault_handler(regs, trap);
592  }
593  
594  #endif /* _LINUX_KPROBES_H */
595