1 /*
2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
4 */
5
6 #ifndef __SYSDEP_STUB_H
7 #define __SYSDEP_STUB_H
8
9 #include <stddef.h>
10 #include <sysdep/ptrace_user.h>
11 #include <generated/asm-offsets.h>
12 #include <linux/stddef.h>
13
14 #define STUB_MMAP_NR __NR_mmap
15 #define MMAP_OFFSET(o) (o)
16
17 #define __syscall_clobber "r11","rcx","memory"
18 #define __syscall "syscall"
19
stub_syscall0(long syscall)20 static __always_inline long stub_syscall0(long syscall)
21 {
22 long ret;
23
24 __asm__ volatile (__syscall
25 : "=a" (ret)
26 : "0" (syscall) : __syscall_clobber );
27
28 return ret;
29 }
30
stub_syscall2(long syscall,long arg1,long arg2)31 static __always_inline long stub_syscall2(long syscall, long arg1, long arg2)
32 {
33 long ret;
34
35 __asm__ volatile (__syscall
36 : "=a" (ret)
37 : "0" (syscall), "D" (arg1), "S" (arg2) : __syscall_clobber );
38
39 return ret;
40 }
41
stub_syscall3(long syscall,long arg1,long arg2,long arg3)42 static __always_inline long stub_syscall3(long syscall, long arg1, long arg2,
43 long arg3)
44 {
45 long ret;
46
47 __asm__ volatile (__syscall
48 : "=a" (ret)
49 : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3)
50 : __syscall_clobber );
51
52 return ret;
53 }
54
stub_syscall4(long syscall,long arg1,long arg2,long arg3,long arg4)55 static __always_inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
56 long arg4)
57 {
58 long ret;
59
60 __asm__ volatile ("movq %5,%%r10 ; " __syscall
61 : "=a" (ret)
62 : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
63 "g" (arg4)
64 : __syscall_clobber, "r10" );
65
66 return ret;
67 }
68
stub_syscall5(long syscall,long arg1,long arg2,long arg3,long arg4,long arg5)69 static __always_inline long stub_syscall5(long syscall, long arg1, long arg2,
70 long arg3, long arg4, long arg5)
71 {
72 long ret;
73
74 __asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall
75 : "=a" (ret)
76 : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
77 "g" (arg4), "g" (arg5)
78 : __syscall_clobber, "r10", "r8" );
79
80 return ret;
81 }
82
stub_syscall6(long syscall,long arg1,long arg2,long arg3,long arg4,long arg5,long arg6)83 static __always_inline long stub_syscall6(long syscall, long arg1, long arg2,
84 long arg3, long arg4, long arg5,
85 long arg6)
86 {
87 long ret;
88
89 __asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; movq %7,%%r9 ; "
90 __syscall
91 : "=a" (ret)
92 : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
93 "g" (arg4), "g" (arg5), "g" (arg6)
94 : __syscall_clobber, "r10", "r8", "r9");
95
96 return ret;
97 }
98
trap_myself(void)99 static __always_inline void trap_myself(void)
100 {
101 __asm("int3");
102 }
103
get_stub_data(void)104 static __always_inline void *get_stub_data(void)
105 {
106 unsigned long ret;
107
108 asm volatile (
109 "movq %%rsp,%0 ;"
110 "andq %1,%0"
111 : "=a" (ret)
112 : "g" (~(STUB_DATA_PAGES * UM_KERN_PAGE_SIZE - 1)));
113
114 return (void *)ret;
115 }
116 #endif
117