Lines Matching +full:reserved +full:- +full:cpu +full:- +full:vectors

1 /* SPDX-License-Identifier: GPL-2.0 */
14 ((((base) & _AC(0xff000000,ULL)) << (56-24)) | \
16 (((limit) & _AC(0x000f0000,ULL)) << (48-16)) | \
61 * The layout of the per-CPU GDT under Linux:
63 * 0 - null <=== cacheline #1
64 * 1 - reserved
65 * 2 - reserved
66 * 3 - reserved
68 * 4 - unused <=== cacheline #2
69 * 5 - unused
71 * ------- start of TLS (Thread-Local Storage) segments:
73 * 6 - TLS segment #1 [ glibc's TLS segment ]
74 * 7 - TLS segment #2 [ Wine's %fs Win32 segment ]
75 * 8 - TLS segment #3 <=== cacheline #3
76 * 9 - reserved
77 * 10 - reserved
78 * 11 - reserved
80 * ------- start of kernel segments:
82 * 12 - kernel code segment <=== cacheline #4
83 * 13 - kernel data segment
84 * 14 - default user CS
85 * 15 - default user DS
86 * 16 - TSS <=== cacheline #5
87 * 17 - LDT
88 * 18 - PNPBIOS support (16->32 gate)
89 * 19 - PNPBIOS support
90 * 20 - PNPBIOS support <=== cacheline #6
91 * 21 - PNPBIOS support
92 * 22 - PNPBIOS support
93 * 23 - APM BIOS support
94 * 24 - APM BIOS support <=== cacheline #7
95 * 25 - APM BIOS support
97 * 26 - ESPFIX small SS
98 * 27 - per-cpu [ offset to per-cpu data area ]
99 * 28 - VDSO getcpu
100 * 29 - unused
101 * 30 - unused
102 * 31 - TSS for double fault handler
105 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
165 #else /* 64-bit: */
177 * GDT layout to get 64-bit SYSCALL/SYSRET support right. SYSRET hardcodes
180 * if returning to 32-bit userspace: cs = STAR.SYSRET_CS,
181 * if returning to 64-bit userspace: cs = STAR.SYSRET_CS+16,
185 * thus USER_DS should be between 32-bit and 64-bit code selectors:
210 * expressed with the +3 value for user-space selectors:
225 /* Bitmask of exception vectors which push an error code on the stack: */
232 /* Bit size and mask of CPU number stored in the per CPU data (and TSC_AUX) */
238 /* Helper functions to store/load CPU and node numbers */
240 static inline unsigned long vdso_encode_cpunode(int cpu, unsigned long node) in vdso_encode_cpunode() argument
242 return (node << VDSO_CPUNODE_BITS) | cpu; in vdso_encode_cpunode()
245 static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node) in vdso_read_cpunode() argument
250 * Load CPU and node number from the GDT. LSL is faster than RDTSCP in vdso_read_cpunode()
262 if (cpu) in vdso_read_cpunode()
263 *cpu = (p & VDSO_CPUNODE_MASK); in vdso_read_cpunode()
301 * This is always the case on Intel CPUs and, even on 64-bit AMD CPUs, any
322 * On 32-bit systems, the hidden parts of FS and GS are unobservable if