]>
Commit | Line | Data |
---|---|---|
bd3239d2 MF |
1 | typedef __signed__ char __s8; |
2 | typedef unsigned char __u8; | |
3 | typedef __signed__ short __s16; | |
4 | typedef unsigned short __u16; | |
5 | typedef __signed__ int __s32; | |
6 | typedef unsigned int __u32; | |
7 | __extension__ typedef __signed__ long long __s64; | |
8 | __extension__ typedef unsigned long long __u64; | |
9 | typedef signed char s8; | |
10 | typedef unsigned char u8; | |
11 | typedef signed short s16; | |
12 | typedef unsigned short u16; | |
13 | typedef signed int s32; | |
14 | typedef unsigned int u32; | |
15 | typedef signed long long s64; | |
16 | typedef unsigned long long u64; | |
17 | typedef unsigned short umode_t; | |
18 | struct ftrace_branch_data { | |
19 | const char *func; | |
20 | const char *file; | |
21 | unsigned line; | |
22 | union { | |
23 | struct { | |
24 | unsigned long correct; | |
25 | unsigned long incorrect; | |
26 | }; | |
27 | struct { | |
28 | unsigned long miss; | |
29 | unsigned long hit; | |
30 | }; | |
31 | unsigned long miss_hit[2]; | |
32 | }; | |
33 | }; | |
34 | void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |
35 | enum { | |
36 | false = 0, | |
37 | true = 1 | |
38 | }; | |
39 | typedef struct { | |
40 | unsigned long fds_bits [(1024/(8 * sizeof(unsigned long)))]; | |
41 | } __kernel_fd_set; | |
42 | typedef void (*__kernel_sighandler_t)(int); | |
43 | typedef int __kernel_key_t; | |
44 | typedef int __kernel_mqd_t; | |
45 | typedef unsigned long __kernel_ino_t; | |
46 | typedef unsigned short __kernel_mode_t; | |
47 | typedef unsigned short __kernel_nlink_t; | |
48 | typedef long __kernel_off_t; | |
49 | typedef int __kernel_pid_t; | |
50 | typedef unsigned short __kernel_ipc_pid_t; | |
51 | typedef unsigned short __kernel_uid_t; | |
52 | typedef unsigned short __kernel_gid_t; | |
53 | typedef unsigned int __kernel_size_t; | |
54 | typedef int __kernel_ssize_t; | |
55 | typedef int __kernel_ptrdiff_t; | |
56 | typedef long __kernel_time_t; | |
57 | typedef long __kernel_suseconds_t; | |
58 | typedef long __kernel_clock_t; | |
59 | typedef int __kernel_timer_t; | |
60 | typedef int __kernel_clockid_t; | |
61 | typedef int __kernel_daddr_t; | |
62 | typedef char * __kernel_caddr_t; | |
63 | typedef unsigned short __kernel_uid16_t; | |
64 | typedef unsigned short __kernel_gid16_t; | |
65 | typedef unsigned int __kernel_uid32_t; | |
66 | typedef unsigned int __kernel_gid32_t; | |
67 | typedef unsigned short __kernel_old_uid_t; | |
68 | typedef unsigned short __kernel_old_gid_t; | |
69 | typedef unsigned short __kernel_old_dev_t; | |
70 | typedef long long __kernel_loff_t; | |
71 | typedef struct { | |
72 | int val[2]; | |
73 | } __kernel_fsid_t; | |
74 | typedef __u32 __kernel_dev_t; | |
75 | typedef __kernel_fd_set fd_set; | |
76 | typedef __kernel_dev_t dev_t; | |
77 | typedef __kernel_ino_t ino_t; | |
78 | typedef __kernel_mode_t mode_t; | |
79 | typedef __kernel_nlink_t nlink_t; | |
80 | typedef __kernel_off_t off_t; | |
81 | typedef __kernel_pid_t pid_t; | |
82 | typedef __kernel_daddr_t daddr_t; | |
83 | typedef __kernel_key_t key_t; | |
84 | typedef __kernel_suseconds_t suseconds_t; | |
85 | typedef __kernel_timer_t timer_t; | |
86 | typedef __kernel_clockid_t clockid_t; | |
87 | typedef __kernel_mqd_t mqd_t; | |
88 | typedef _Bool bool; | |
89 | typedef __kernel_uid32_t uid_t; | |
90 | typedef __kernel_gid32_t gid_t; | |
91 | typedef __kernel_uid16_t uid16_t; | |
92 | typedef __kernel_gid16_t gid16_t; | |
93 | typedef unsigned long uintptr_t; | |
94 | typedef __kernel_old_uid_t old_uid_t; | |
95 | typedef __kernel_old_gid_t old_gid_t; | |
96 | typedef __kernel_loff_t loff_t; | |
97 | typedef __kernel_size_t size_t; | |
98 | typedef __kernel_ssize_t ssize_t; | |
99 | typedef __kernel_ptrdiff_t ptrdiff_t; | |
100 | typedef __kernel_time_t time_t; | |
101 | typedef __kernel_clock_t clock_t; | |
102 | typedef __kernel_caddr_t caddr_t; | |
103 | typedef unsigned char u_char; | |
104 | typedef unsigned short u_short; | |
105 | typedef unsigned int u_int; | |
106 | typedef unsigned long u_long; | |
107 | typedef unsigned char unchar; | |
108 | typedef unsigned short ushort; | |
109 | typedef unsigned int uint; | |
110 | typedef unsigned long ulong; | |
111 | typedef __u8 u_int8_t; | |
112 | typedef __s8 int8_t; | |
113 | typedef __u16 u_int16_t; | |
114 | typedef __s16 int16_t; | |
115 | typedef __u32 u_int32_t; | |
116 | typedef __s32 int32_t; | |
117 | typedef __u8 uint8_t; | |
118 | typedef __u16 uint16_t; | |
119 | typedef __u32 uint32_t; | |
120 | typedef __u64 uint64_t; | |
121 | typedef __u64 u_int64_t; | |
122 | typedef __s64 int64_t; | |
123 | typedef u64 sector_t; | |
124 | typedef u64 blkcnt_t; | |
125 | typedef u64 dma_addr_t; | |
126 | typedef __u16 __le16; | |
127 | typedef __u16 __be16; | |
128 | typedef __u32 __le32; | |
129 | typedef __u32 __be32; | |
130 | typedef __u64 __le64; | |
131 | typedef __u64 __be64; | |
132 | typedef __u16 __sum16; | |
133 | typedef __u32 __wsum; | |
134 | typedef unsigned gfp_t; | |
135 | typedef unsigned fmode_t; | |
136 | typedef u64 phys_addr_t; | |
137 | typedef phys_addr_t resource_size_t; | |
138 | typedef struct { | |
139 | int counter; | |
140 | } atomic_t; | |
141 | struct list_head { | |
142 | struct list_head *next, *prev; | |
143 | }; | |
144 | struct hlist_head { | |
145 | struct hlist_node *first; | |
146 | }; | |
147 | struct hlist_node { | |
148 | struct hlist_node *next, **pprev; | |
149 | }; | |
150 | struct ustat { | |
151 | __kernel_daddr_t f_tfree; | |
152 | __kernel_ino_t f_tinode; | |
153 | char f_fname[6]; | |
154 | char f_fpack[6]; | |
155 | }; | |
156 | struct timespec; | |
157 | struct compat_timespec; | |
158 | struct restart_block { | |
159 | long (*fn)(struct restart_block *); | |
160 | union { | |
161 | struct { | |
162 | u32 *uaddr; | |
163 | u32 val; | |
164 | u32 flags; | |
165 | u32 bitset; | |
166 | u64 time; | |
167 | u32 *uaddr2; | |
168 | } futex; | |
169 | struct { | |
170 | clockid_t clockid; | |
171 | struct timespec *rmtp; | |
172 | u64 expires; | |
173 | } nanosleep; | |
174 | struct { | |
175 | struct pollfd *ufds; | |
176 | int nfds; | |
177 | int has_timeout; | |
178 | unsigned long tv_sec; | |
179 | unsigned long tv_nsec; | |
180 | } poll; | |
181 | }; | |
182 | }; | |
183 | extern long do_no_restart_syscall(struct restart_block *parm); | |
184 | extern unsigned int __sw_hweight8(unsigned int w); | |
185 | extern unsigned int __sw_hweight16(unsigned int w); | |
186 | extern unsigned int __sw_hweight32(unsigned int w); | |
187 | extern unsigned long __sw_hweight64(__u64 w); | |
188 | struct alt_instr { | |
189 | u8 *instr; | |
190 | u8 *replacement; | |
191 | u16 cpuid; | |
192 | u8 instrlen; | |
193 | u8 replacementlen; | |
194 | }; | |
195 | extern void alternative_instructions(void); | |
196 | extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); | |
197 | struct module; | |
198 | extern void alternatives_smp_module_add(struct module *mod, char *name, | |
199 | void *locks, void *locks_end, | |
200 | void *text, void *text_end); | |
201 | extern void alternatives_smp_module_del(struct module *mod); | |
202 | extern void alternatives_smp_switch(int smp); | |
203 | extern int alternatives_text_reserved(void *start, void *end); | |
204 | extern bool skip_smp_alternatives; | |
205 | extern const char * const x86_cap_flags[10*32]; | |
206 | extern const char * const x86_power_flags[32]; | |
207 | static inline __attribute__((always_inline)) __attribute__((always_inline)) __attribute__((pure)) bool __static_cpu_has(u16 bit) | |
208 | { | |
209 | asm goto("1: jmp %l[t_no]\n" | |
210 | "2:\n" | |
211 | ".section .altinstructions,\"a\"\n" | |
212 | " " ".balign 4" " " "\n" | |
213 | " " ".long" " " "1b\n" | |
214 | " " ".long" " " "0\n" | |
215 | " .word %P0\n" | |
216 | " .byte 2b - 1b\n" | |
217 | " .byte 0\n" | |
218 | ".previous\n" | |
219 | : : "i" (bit) : : t_no); | |
220 | return true; | |
221 | t_no: | |
222 | return false; | |
223 | } | |
224 | struct paravirt_patch_site; | |
225 | void apply_paravirt(struct paravirt_patch_site *start, | |
226 | struct paravirt_patch_site *end); | |
227 | extern void *text_poke_early(void *addr, const void *opcode, size_t len); | |
228 | struct text_poke_param { | |
229 | void *addr; | |
230 | const void *opcode; | |
231 | size_t len; | |
232 | }; | |
233 | extern void *text_poke(void *addr, const void *opcode, size_t len); | |
234 | extern void *text_poke_smp(void *addr, const void *opcode, size_t len); | |
235 | extern void text_poke_smp_batch(struct text_poke_param *params, int n); | |
236 | static inline __attribute__((always_inline)) __attribute__((always_inline)) void | |
237 | set_bit(unsigned int nr, volatile unsigned long *addr) | |
238 | { | |
239 | if (__builtin_constant_p((((__builtin_constant_p(nr))))) ? !!(((__builtin_constant_p(nr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/bitops.h", .line = 62, }; ______r = !!(((__builtin_constant_p(nr)))); ______f.miss_hit[______r]++; ______r; })) { | |
240 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "orb %1,%0" | |
241 | : "+m" (*(volatile long *) ((void *)(addr) + ((nr)>>3))) | |
242 | : "iq" ((u8)(1 << ((nr) & 7))) | |
243 | : "memory"); | |
244 | } else { | |
245 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "bts %1,%0" | |
246 | : "+m" (*(volatile long *) (addr)) : "Ir" (nr) : "memory"); | |
247 | } | |
248 | } | |
249 | static inline __attribute__((always_inline)) void __set_bit(int nr, volatile unsigned long *addr) | |
250 | { | |
251 | asm volatile("bts %1,%0" : "+m" (*(volatile long *) (addr)) : "Ir" (nr) : "memory"); | |
252 | } | |
253 | static inline __attribute__((always_inline)) __attribute__((always_inline)) void | |
254 | clear_bit(int nr, volatile unsigned long *addr) | |
255 | { | |
256 | if (__builtin_constant_p((((__builtin_constant_p(nr))))) ? !!(((__builtin_constant_p(nr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/bitops.h", .line = 100, }; ______r = !!(((__builtin_constant_p(nr)))); ______f.miss_hit[______r]++; ______r; })) { | |
257 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "andb %1,%0" | |
258 | : "+m" (*(volatile long *) ((void *)(addr) + ((nr)>>3))) | |
259 | : "iq" ((u8)~(1 << ((nr) & 7)))); | |
260 | } else { | |
261 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "btr %1,%0" | |
262 | : "+m" (*(volatile long *) (addr)) | |
263 | : "Ir" (nr)); | |
264 | } | |
265 | } | |
266 | static inline __attribute__((always_inline)) void clear_bit_unlock(unsigned nr, volatile unsigned long *addr) | |
267 | { | |
268 | __asm__ __volatile__("": : :"memory"); | |
269 | clear_bit(nr, addr); | |
270 | } | |
271 | static inline __attribute__((always_inline)) void __clear_bit(int nr, volatile unsigned long *addr) | |
272 | { | |
273 | asm volatile("btr %1,%0" : "+m" (*(volatile long *) (addr)) : "Ir" (nr)); | |
274 | } | |
275 | static inline __attribute__((always_inline)) void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) | |
276 | { | |
277 | __asm__ __volatile__("": : :"memory"); | |
278 | __clear_bit(nr, addr); | |
279 | } | |
280 | static inline __attribute__((always_inline)) void __change_bit(int nr, volatile unsigned long *addr) | |
281 | { | |
282 | asm volatile("btc %1,%0" : "+m" (*(volatile long *) (addr)) : "Ir" (nr)); | |
283 | } | |
284 | static inline __attribute__((always_inline)) void change_bit(int nr, volatile unsigned long *addr) | |
285 | { | |
286 | if (__builtin_constant_p((((__builtin_constant_p(nr))))) ? !!(((__builtin_constant_p(nr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/bitops.h", .line = 176, }; ______r = !!(((__builtin_constant_p(nr)))); ______f.miss_hit[______r]++; ______r; })) { | |
287 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "xorb %1,%0" | |
288 | : "+m" (*(volatile long *) ((void *)(addr) + ((nr)>>3))) | |
289 | : "iq" ((u8)(1 << ((nr) & 7)))); | |
290 | } else { | |
291 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "btc %1,%0" | |
292 | : "+m" (*(volatile long *) (addr)) | |
293 | : "Ir" (nr)); | |
294 | } | |
295 | } | |
296 | static inline __attribute__((always_inline)) int test_and_set_bit(int nr, volatile unsigned long *addr) | |
297 | { | |
298 | int oldbit; | |
299 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "bts %2,%1\n\t" | |
300 | "sbb %0,%0" : "=r" (oldbit), "+m" (*(volatile long *) (addr)) : "Ir" (nr) : "memory"); | |
301 | return oldbit; | |
302 | } | |
303 | static inline __attribute__((always_inline)) __attribute__((always_inline)) int | |
304 | test_and_set_bit_lock(int nr, volatile unsigned long *addr) | |
305 | { | |
306 | return test_and_set_bit(nr, addr); | |
307 | } | |
308 | static inline __attribute__((always_inline)) int __test_and_set_bit(int nr, volatile unsigned long *addr) | |
309 | { | |
310 | int oldbit; | |
311 | asm("bts %2,%1\n\t" | |
312 | "sbb %0,%0" | |
313 | : "=r" (oldbit), "+m" (*(volatile long *) (addr)) | |
314 | : "Ir" (nr)); | |
315 | return oldbit; | |
316 | } | |
317 | static inline __attribute__((always_inline)) int test_and_clear_bit(int nr, volatile unsigned long *addr) | |
318 | { | |
319 | int oldbit; | |
320 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "btr %2,%1\n\t" | |
321 | "sbb %0,%0" | |
322 | : "=r" (oldbit), "+m" (*(volatile long *) (addr)) : "Ir" (nr) : "memory"); | |
323 | return oldbit; | |
324 | } | |
325 | static inline __attribute__((always_inline)) int __test_and_clear_bit(int nr, volatile unsigned long *addr) | |
326 | { | |
327 | int oldbit; | |
328 | asm volatile("btr %2,%1\n\t" | |
329 | "sbb %0,%0" | |
330 | : "=r" (oldbit), "+m" (*(volatile long *) (addr)) | |
331 | : "Ir" (nr)); | |
332 | return oldbit; | |
333 | } | |
334 | static inline __attribute__((always_inline)) int __test_and_change_bit(int nr, volatile unsigned long *addr) | |
335 | { | |
336 | int oldbit; | |
337 | asm volatile("btc %2,%1\n\t" | |
338 | "sbb %0,%0" | |
339 | : "=r" (oldbit), "+m" (*(volatile long *) (addr)) | |
340 | : "Ir" (nr) : "memory"); | |
341 | return oldbit; | |
342 | } | |
343 | static inline __attribute__((always_inline)) int test_and_change_bit(int nr, volatile unsigned long *addr) | |
344 | { | |
345 | int oldbit; | |
346 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "btc %2,%1\n\t" | |
347 | "sbb %0,%0" | |
348 | : "=r" (oldbit), "+m" (*(volatile long *) (addr)) : "Ir" (nr) : "memory"); | |
349 | return oldbit; | |
350 | } | |
351 | static inline __attribute__((always_inline)) __attribute__((always_inline)) int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) | |
352 | { | |
353 | return ((1UL << (nr % 32)) & | |
354 | (addr[nr / 32])) != 0; | |
355 | } | |
356 | static inline __attribute__((always_inline)) int variable_test_bit(int nr, volatile const unsigned long *addr) | |
357 | { | |
358 | int oldbit; | |
359 | asm volatile("bt %2,%1\n\t" | |
360 | "sbb %0,%0" | |
361 | : "=r" (oldbit) | |
362 | : "m" (*(unsigned long *)addr), "Ir" (nr)); | |
363 | return oldbit; | |
364 | } | |
365 | static inline __attribute__((always_inline)) unsigned long __ffs(unsigned long word) | |
366 | { | |
367 | asm("bsf %1,%0" | |
368 | : "=r" (word) | |
369 | : "rm" (word)); | |
370 | return word; | |
371 | } | |
372 | static inline __attribute__((always_inline)) unsigned long ffz(unsigned long word) | |
373 | { | |
374 | asm("bsf %1,%0" | |
375 | : "=r" (word) | |
376 | : "r" (~word)); | |
377 | return word; | |
378 | } | |
379 | static inline __attribute__((always_inline)) unsigned long __fls(unsigned long word) | |
380 | { | |
381 | asm("bsr %1,%0" | |
382 | : "=r" (word) | |
383 | : "rm" (word)); | |
384 | return word; | |
385 | } | |
386 | static inline __attribute__((always_inline)) int ffs(int x) | |
387 | { | |
388 | int r; | |
389 | asm("bsfl %1,%0\n\t" | |
390 | "cmovzl %2,%0" | |
391 | : "=r" (r) : "rm" (x), "r" (-1)); | |
392 | return r + 1; | |
393 | } | |
394 | static inline __attribute__((always_inline)) int fls(int x) | |
395 | { | |
396 | int r; | |
397 | asm("bsrl %1,%0\n\t" | |
398 | "cmovzl %2,%0" | |
399 | : "=&r" (r) : "rm" (x), "rm" (-1)); | |
400 | return r + 1; | |
401 | } | |
402 | extern unsigned long find_next_bit(const unsigned long *addr, unsigned long | |
403 | size, unsigned long offset); | |
404 | extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned | |
405 | long size, unsigned long offset); | |
406 | extern unsigned long find_first_bit(const unsigned long *addr, | |
407 | unsigned long size); | |
408 | extern unsigned long find_first_zero_bit(const unsigned long *addr, | |
409 | unsigned long size); | |
410 | static inline __attribute__((always_inline)) int sched_find_first_bit(const unsigned long *b) | |
411 | { | |
412 | if (__builtin_constant_p(((b[0]))) ? !!((b[0])) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/bitops/sched.h", .line = 19, }; ______r = !!((b[0])); ______f.miss_hit[______r]++; ______r; })) | |
413 | return __ffs(b[0]); | |
414 | if (__builtin_constant_p(((b[1]))) ? !!((b[1])) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/bitops/sched.h", .line = 21, }; ______r = !!((b[1])); ______f.miss_hit[______r]++; ______r; })) | |
415 | return __ffs(b[1]) + 32; | |
416 | if (__builtin_constant_p(((b[2]))) ? !!((b[2])) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/bitops/sched.h", .line = 23, }; ______r = !!((b[2])); ______f.miss_hit[______r]++; ______r; })) | |
417 | return __ffs(b[2]) + 64; | |
418 | return __ffs(b[3]) + 96; | |
419 | } | |
420 | static inline __attribute__((always_inline)) unsigned int __arch_hweight32(unsigned int w) | |
421 | { | |
422 | unsigned int res = 0; | |
423 | asm ("661:\n\t" "call __sw_hweight32" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(4*32+23)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" ".byte 0xf3,0x0f,0xb8,0xc0" "\n664:\n" ".previous" | |
424 | : "=""a" (res) | |
425 | : "a" (w)); | |
426 | return res; | |
427 | } | |
428 | static inline __attribute__((always_inline)) unsigned int __arch_hweight16(unsigned int w) | |
429 | { | |
430 | return __arch_hweight32(w & 0xffff); | |
431 | } | |
432 | static inline __attribute__((always_inline)) unsigned int __arch_hweight8(unsigned int w) | |
433 | { | |
434 | return __arch_hweight32(w & 0xff); | |
435 | } | |
436 | static inline __attribute__((always_inline)) unsigned long __arch_hweight64(__u64 w) | |
437 | { | |
438 | unsigned long res = 0; | |
439 | return __arch_hweight32((u32)w) + | |
440 | __arch_hweight32((u32)(w >> 32)); | |
441 | return res; | |
442 | } | |
443 | static inline __attribute__((always_inline)) __attribute__((always_inline)) int fls64(__u64 x) | |
444 | { | |
445 | __u32 h = x >> 32; | |
446 | if (__builtin_constant_p(((h))) ? !!((h)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/bitops/fls64.h", .line = 21, }; ______r = !!((h)); ______f.miss_hit[______r]++; ______r; })) | |
447 | return fls(h) + 32; | |
448 | return fls(x); | |
449 | } | |
450 | static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __arch_swab32(__u32 val) | |
451 | { | |
452 | asm("bswap %0" : "=r" (val) : "0" (val)); | |
453 | return val; | |
454 | } | |
455 | static inline __attribute__((always_inline)) __attribute__((__const__)) __u64 __arch_swab64(__u64 val) | |
456 | { | |
457 | union { | |
458 | struct { | |
459 | __u32 a; | |
460 | __u32 b; | |
461 | } s; | |
462 | __u64 u; | |
463 | } v; | |
464 | v.u = val; | |
465 | asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" | |
466 | : "=r" (v.s.a), "=r" (v.s.b) | |
467 | : "0" (v.s.a), "1" (v.s.b)); | |
468 | return v.u; | |
469 | } | |
470 | static inline __attribute__((always_inline)) __attribute__((__const__)) __u16 __fswab16(__u16 val) | |
471 | { | |
472 | return ((__u16)( (((__u16)(val) & (__u16)0x00ffU) << 8) | (((__u16)(val) & (__u16)0xff00U) >> 8))); | |
473 | } | |
474 | static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswab32(__u32 val) | |
475 | { | |
476 | return __arch_swab32(val); | |
477 | } | |
478 | static inline __attribute__((always_inline)) __attribute__((__const__)) __u64 __fswab64(__u64 val) | |
479 | { | |
480 | return __arch_swab64(val); | |
481 | } | |
482 | static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswahw32(__u32 val) | |
483 | { | |
484 | return ((__u32)( (((__u32)(val) & (__u32)0x0000ffffUL) << 16) | (((__u32)(val) & (__u32)0xffff0000UL) >> 16))); | |
485 | } | |
486 | static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswahb32(__u32 val) | |
487 | { | |
488 | return ((__u32)( (((__u32)(val) & (__u32)0x00ff00ffUL) << 8) | (((__u32)(val) & (__u32)0xff00ff00UL) >> 8))); | |
489 | } | |
490 | static inline __attribute__((always_inline)) __u16 __swab16p(const __u16 *p) | |
491 | { | |
492 | return (__builtin_constant_p((__u16)(*p)) ? ((__u16)( (((__u16)(*p) & (__u16)0x00ffU) << 8) | (((__u16)(*p) & (__u16)0xff00U) >> 8))) : __fswab16(*p)); | |
493 | } | |
494 | static inline __attribute__((always_inline)) __u32 __swab32p(const __u32 *p) | |
495 | { | |
496 | return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x000000ffUL) << 24) | (((__u32)(*p) & (__u32)0x0000ff00UL) << 8) | (((__u32)(*p) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(*p) & (__u32)0xff000000UL) >> 24))) : __fswab32(*p)); | |
497 | } | |
498 | static inline __attribute__((always_inline)) __u64 __swab64p(const __u64 *p) | |
499 | { | |
500 | return (__builtin_constant_p((__u64)(*p)) ? ((__u64)( (((__u64)(*p) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(*p) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(*p) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(*p) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(*p) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(*p) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(*p) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(*p) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(*p)); | |
501 | } | |
502 | static inline __attribute__((always_inline)) __u32 __swahw32p(const __u32 *p) | |
503 | { | |
504 | return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x0000ffffUL) << 16) | (((__u32)(*p) & (__u32)0xffff0000UL) >> 16))) : __fswahw32(*p)); | |
505 | } | |
506 | static inline __attribute__((always_inline)) __u32 __swahb32p(const __u32 *p) | |
507 | { | |
508 | return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x00ff00ffUL) << 8) | (((__u32)(*p) & (__u32)0xff00ff00UL) >> 8))) : __fswahb32(*p)); | |
509 | } | |
510 | static inline __attribute__((always_inline)) void __swab16s(__u16 *p) | |
511 | { | |
512 | *p = __swab16p(p); | |
513 | } | |
514 | static inline __attribute__((always_inline)) void __swab32s(__u32 *p) | |
515 | { | |
516 | *p = __swab32p(p); | |
517 | } | |
518 | static inline __attribute__((always_inline)) void __swab64s(__u64 *p) | |
519 | { | |
520 | *p = __swab64p(p); | |
521 | } | |
522 | static inline __attribute__((always_inline)) void __swahw32s(__u32 *p) | |
523 | { | |
524 | *p = __swahw32p(p); | |
525 | } | |
526 | static inline __attribute__((always_inline)) void __swahb32s(__u32 *p) | |
527 | { | |
528 | *p = __swahb32p(p); | |
529 | } | |
530 | static inline __attribute__((always_inline)) __le64 __cpu_to_le64p(const __u64 *p) | |
531 | { | |
532 | return ( __le64)*p; | |
533 | } | |
534 | static inline __attribute__((always_inline)) __u64 __le64_to_cpup(const __le64 *p) | |
535 | { | |
536 | return ( __u64)*p; | |
537 | } | |
538 | static inline __attribute__((always_inline)) __le32 __cpu_to_le32p(const __u32 *p) | |
539 | { | |
540 | return ( __le32)*p; | |
541 | } | |
542 | static inline __attribute__((always_inline)) __u32 __le32_to_cpup(const __le32 *p) | |
543 | { | |
544 | return ( __u32)*p; | |
545 | } | |
546 | static inline __attribute__((always_inline)) __le16 __cpu_to_le16p(const __u16 *p) | |
547 | { | |
548 | return ( __le16)*p; | |
549 | } | |
550 | static inline __attribute__((always_inline)) __u16 __le16_to_cpup(const __le16 *p) | |
551 | { | |
552 | return ( __u16)*p; | |
553 | } | |
554 | static inline __attribute__((always_inline)) __be64 __cpu_to_be64p(const __u64 *p) | |
555 | { | |
556 | return ( __be64)__swab64p(p); | |
557 | } | |
558 | static inline __attribute__((always_inline)) __u64 __be64_to_cpup(const __be64 *p) | |
559 | { | |
560 | return __swab64p((__u64 *)p); | |
561 | } | |
562 | static inline __attribute__((always_inline)) __be32 __cpu_to_be32p(const __u32 *p) | |
563 | { | |
564 | return ( __be32)__swab32p(p); | |
565 | } | |
566 | static inline __attribute__((always_inline)) __u32 __be32_to_cpup(const __be32 *p) | |
567 | { | |
568 | return __swab32p((__u32 *)p); | |
569 | } | |
570 | static inline __attribute__((always_inline)) __be16 __cpu_to_be16p(const __u16 *p) | |
571 | { | |
572 | return ( __be16)__swab16p(p); | |
573 | } | |
574 | static inline __attribute__((always_inline)) __u16 __be16_to_cpup(const __be16 *p) | |
575 | { | |
576 | return __swab16p((__u16 *)p); | |
577 | } | |
578 | static inline __attribute__((always_inline)) void le16_add_cpu(__le16 *var, u16 val) | |
579 | { | |
580 | *var = (( __le16)(__u16)((( __u16)(__le16)(*var)) + val)); | |
581 | } | |
582 | static inline __attribute__((always_inline)) void le32_add_cpu(__le32 *var, u32 val) | |
583 | { | |
584 | *var = (( __le32)(__u32)((( __u32)(__le32)(*var)) + val)); | |
585 | } | |
586 | static inline __attribute__((always_inline)) void le64_add_cpu(__le64 *var, u64 val) | |
587 | { | |
588 | *var = (( __le64)(__u64)((( __u64)(__le64)(*var)) + val)); | |
589 | } | |
590 | static inline __attribute__((always_inline)) void be16_add_cpu(__be16 *var, u16 val) | |
591 | { | |
592 | *var = (( __be16)(__builtin_constant_p((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val))) ? ((__u16)( (((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val)) & (__u16)0x00ffU) << 8) | (((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val)) & (__u16)0xff00U) >> 8))) : __fswab16(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val)))); | |
593 | } | |
594 | static inline __attribute__((always_inline)) void be32_add_cpu(__be32 *var, u32 val) | |
595 | { | |
596 | *var = (( __be32)(__builtin_constant_p((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val))) ? ((__u32)( (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x000000ffUL) << 24) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0xff000000UL) >> 24))) : __fswab32(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)))); | |
597 | } | |
598 | static inline __attribute__((always_inline)) void be64_add_cpu(__be64 *var, u64 val) | |
599 | { | |
600 | *var = (( __be64)(__builtin_constant_p((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val))) ? ((__u64)( (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)))); | |
601 | } | |
602 | static inline __attribute__((always_inline)) unsigned long find_next_zero_bit_le(const void *addr, | |
603 | unsigned long size, unsigned long offset) | |
604 | { | |
605 | return find_next_zero_bit(addr, size, offset); | |
606 | } | |
607 | static inline __attribute__((always_inline)) unsigned long find_next_bit_le(const void *addr, | |
608 | unsigned long size, unsigned long offset) | |
609 | { | |
610 | return find_next_bit(addr, size, offset); | |
611 | } | |
612 | static inline __attribute__((always_inline)) unsigned long find_first_zero_bit_le(const void *addr, | |
613 | unsigned long size) | |
614 | { | |
615 | return find_first_zero_bit(addr, size); | |
616 | } | |
617 | static inline __attribute__((always_inline)) int test_bit_le(int nr, const void *addr) | |
618 | { | |
619 | return (__builtin_constant_p((nr ^ 0)) ? constant_test_bit((nr ^ 0), (addr)) : variable_test_bit((nr ^ 0), (addr))); | |
620 | } | |
621 | static inline __attribute__((always_inline)) void __set_bit_le(int nr, void *addr) | |
622 | { | |
623 | __set_bit(nr ^ 0, addr); | |
624 | } | |
625 | static inline __attribute__((always_inline)) void __clear_bit_le(int nr, void *addr) | |
626 | { | |
627 | __clear_bit(nr ^ 0, addr); | |
628 | } | |
629 | static inline __attribute__((always_inline)) int test_and_set_bit_le(int nr, void *addr) | |
630 | { | |
631 | return test_and_set_bit(nr ^ 0, addr); | |
632 | } | |
633 | static inline __attribute__((always_inline)) int test_and_clear_bit_le(int nr, void *addr) | |
634 | { | |
635 | return test_and_clear_bit(nr ^ 0, addr); | |
636 | } | |
637 | static inline __attribute__((always_inline)) int __test_and_set_bit_le(int nr, void *addr) | |
638 | { | |
639 | return __test_and_set_bit(nr ^ 0, addr); | |
640 | } | |
641 | static inline __attribute__((always_inline)) int __test_and_clear_bit_le(int nr, void *addr) | |
642 | { | |
643 | return __test_and_clear_bit(nr ^ 0, addr); | |
644 | } | |
645 | static __inline__ __attribute__((always_inline)) int get_bitmask_order(unsigned int count) | |
646 | { | |
647 | int order; | |
648 | order = fls(count); | |
649 | return order; | |
650 | } | |
651 | static __inline__ __attribute__((always_inline)) int get_count_order(unsigned int count) | |
652 | { | |
653 | int order; | |
654 | order = fls(count) - 1; | |
655 | if (__builtin_constant_p(((count & (count - 1)))) ? !!((count & (count - 1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitops.h", .line = 42, }; ______r = !!((count & (count - 1))); ______f.miss_hit[______r]++; ______r; })) | |
656 | order++; | |
657 | return order; | |
658 | } | |
659 | static inline __attribute__((always_inline)) unsigned long hweight_long(unsigned long w) | |
660 | { | |
661 | return sizeof(w) == 4 ? (__builtin_constant_p(w) ? ((( (!!((w) & (1ULL << 0))) + (!!((w) & (1ULL << 1))) + (!!((w) & (1ULL << 2))) + (!!((w) & (1ULL << 3))) + (!!((w) & (1ULL << 4))) + (!!((w) & (1ULL << 5))) + (!!((w) & (1ULL << 6))) + (!!((w) & (1ULL << 7))) ) + ( (!!(((w) >> 8) & (1ULL << 0))) + (!!(((w) >> 8) & (1ULL << 1))) + (!!(((w) >> 8) & (1ULL << 2))) + (!!(((w) >> 8) & (1ULL << 3))) + (!!(((w) >> 8) & (1ULL << 4))) + (!!(((w) >> 8) & (1ULL << 5))) + (!!(((w) >> 8) & (1ULL << 6))) + (!!(((w) >> 8) & (1ULL << 7))) )) + (( (!!(((w) >> 16) & (1ULL << 0))) + (!!(((w) >> 16) & (1ULL << 1))) + (!!(((w) >> 16) & (1ULL << 2))) + (!!(((w) >> 16) & (1ULL << 3))) + (!!(((w) >> 16) & (1ULL << 4))) + (!!(((w) >> 16) & (1ULL << 5))) + (!!(((w) >> 16) & (1ULL << 6))) + (!!(((w) >> 16) & (1ULL << 7))) ) + ( (!!((((w) >> 16) >> 8) & (1ULL << 0))) + (!!((((w) >> 16) >> 8) & (1ULL << 1))) + (!!((((w) >> 16) >> 8) & (1ULL << 2))) + (!!((((w) >> 16) >> 8) & (1ULL << 3))) + (!!((((w) >> 16) >> 8) & (1ULL << 4))) + (!!((((w) >> 16) >> 8) & (1ULL << 5))) + (!!((((w) >> 16) >> 8) & (1ULL << 6))) + (!!((((w) >> 16) >> 8) & (1ULL << 7))) ))) : __arch_hweight32(w)) : (__builtin_constant_p(w) ? (((( (!!((w) & (1ULL << 0))) + (!!((w) & (1ULL << 1))) + (!!((w) & (1ULL << 2))) + (!!((w) & (1ULL << 3))) + (!!((w) & (1ULL << 4))) + (!!((w) & (1ULL << 5))) + (!!((w) & (1ULL << 6))) + (!!((w) & (1ULL << 7))) ) + ( (!!(((w) >> 8) & (1ULL << 0))) + (!!(((w) >> 8) & (1ULL << 1))) + (!!(((w) >> 8) & (1ULL << 2))) + (!!(((w) >> 8) & (1ULL << 3))) + (!!(((w) >> 8) & (1ULL << 4))) + (!!(((w) >> 8) & (1ULL << 5))) + (!!(((w) >> 8) & (1ULL << 6))) + (!!(((w) >> 8) & (1ULL << 7))) )) + (( (!!(((w) >> 16) & (1ULL << 0))) + (!!(((w) >> 16) & (1ULL << 1))) + (!!(((w) >> 16) & (1ULL << 2))) + (!!(((w) >> 16) & (1ULL << 3))) + (!!(((w) >> 16) & (1ULL << 4))) + (!!(((w) >> 16) & (1ULL << 5))) + (!!(((w) >> 16) & (1ULL << 6))) + (!!(((w) >> 16) & (1ULL << 7))) ) + ( (!!((((w) >> 16) >> 8) & (1ULL << 0))) + (!!((((w) >> 16) >> 8) & (1ULL << 1))) + (!!((((w) >> 16) >> 8) & (1ULL << 2))) + (!!((((w) >> 16) >> 8) & (1ULL << 3))) + (!!((((w) >> 16) >> 8) & (1ULL << 4))) + (!!((((w) >> 16) >> 8) & (1ULL << 5))) + (!!((((w) >> 16) >> 8) & (1ULL << 6))) + (!!((((w) >> 16) >> 8) & (1ULL << 7))) ))) + ((( (!!(((w) >> 32) & (1ULL << 0))) + (!!(((w) >> 32) & (1ULL << 1))) + (!!(((w) >> 32) & (1ULL << 2))) + (!!(((w) >> 32) & (1ULL << 3))) + (!!(((w) >> 32) & (1ULL << 4))) + (!!(((w) >> 32) & (1ULL << 5))) + (!!(((w) >> 32) & (1ULL << 6))) + (!!(((w) >> 32) & (1ULL << 7))) ) + ( (!!((((w) >> 32) >> 8) & (1ULL << 0))) + (!!((((w) >> 32) >> 8) & (1ULL << 1))) + (!!((((w) >> 32) >> 8) & (1ULL << 2))) + (!!((((w) >> 32) >> 8) & (1ULL << 3))) + (!!((((w) >> 32) >> 8) & (1ULL << 4))) + (!!((((w) >> 32) >> 8) & (1ULL << 5))) + (!!((((w) >> 32) >> 8) & (1ULL << 6))) + (!!((((w) >> 32) >> 8) & (1ULL << 7))) )) + (( (!!((((w) >> 32) >> 16) & (1ULL << 0))) + (!!((((w) >> 32) >> 16) & (1ULL << 1))) + (!!((((w) >> 32) >> 16) & (1ULL << 2))) + (!!((((w) >> 32) >> 16) & (1ULL << 3))) + (!!((((w) >> 32) >> 16) & (1ULL << 4))) + (!!((((w) >> 32) >> 16) & (1ULL << 5))) + (!!((((w) >> 32) >> 16) & (1ULL << 6))) + (!!((((w) >> 32) >> 16) & (1ULL << 7))) ) + ( (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 0))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 1))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 2))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 3))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 4))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 5))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 6))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 7))) )))) : __arch_hweight64(w)); | |
662 | } | |
663 | static inline __attribute__((always_inline)) __u32 rol32(__u32 word, unsigned int shift) | |
664 | { | |
665 | return (word << shift) | (word >> (32 - shift)); | |
666 | } | |
667 | static inline __attribute__((always_inline)) __u32 ror32(__u32 word, unsigned int shift) | |
668 | { | |
669 | return (word >> shift) | (word << (32 - shift)); | |
670 | } | |
671 | static inline __attribute__((always_inline)) __u16 rol16(__u16 word, unsigned int shift) | |
672 | { | |
673 | return (word << shift) | (word >> (16 - shift)); | |
674 | } | |
675 | static inline __attribute__((always_inline)) __u16 ror16(__u16 word, unsigned int shift) | |
676 | { | |
677 | return (word >> shift) | (word << (16 - shift)); | |
678 | } | |
679 | static inline __attribute__((always_inline)) __u8 rol8(__u8 word, unsigned int shift) | |
680 | { | |
681 | return (word << shift) | (word >> (8 - shift)); | |
682 | } | |
683 | static inline __attribute__((always_inline)) __u8 ror8(__u8 word, unsigned int shift) | |
684 | { | |
685 | return (word >> shift) | (word << (8 - shift)); | |
686 | } | |
687 | static inline __attribute__((always_inline)) __s32 sign_extend32(__u32 value, int index) | |
688 | { | |
689 | __u8 shift = 31 - index; | |
690 | return (__s32)(value << shift) >> shift; | |
691 | } | |
692 | static inline __attribute__((always_inline)) unsigned fls_long(unsigned long l) | |
693 | { | |
694 | if (__builtin_constant_p(((sizeof(l) == 4))) ? !!((sizeof(l) == 4)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitops.h", .line = 125, }; ______r = !!((sizeof(l) == 4)); ______f.miss_hit[______r]++; ______r; })) | |
695 | return fls(l); | |
696 | return fls64(l); | |
697 | } | |
698 | static inline __attribute__((always_inline)) unsigned long __ffs64(u64 word) | |
699 | { | |
700 | if (__builtin_constant_p(((((u32)word) == 0UL))) ? !!((((u32)word) == 0UL)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitops.h", .line = 141, }; ______r = !!((((u32)word) == 0UL)); ______f.miss_hit[______r]++; ______r; })) | |
701 | return __ffs((u32)(word >> 32)) + 32; | |
702 | return __ffs((unsigned long)word); | |
703 | } | |
704 | extern unsigned long find_last_bit(const unsigned long *addr, | |
705 | unsigned long size); | |
706 | extern unsigned int __VMALLOC_RESERVE; | |
707 | extern int sysctl_legacy_va_layout; | |
708 | extern void find_low_pfn_range(void); | |
709 | extern void setup_bootmem_allocator(void); | |
710 | extern int devmem_is_allowed(unsigned long pagenr); | |
711 | extern unsigned long max_low_pfn_mapped; | |
712 | extern unsigned long max_pfn_mapped; | |
713 | static inline __attribute__((always_inline)) phys_addr_t get_max_mapped(void) | |
714 | { | |
715 | return (phys_addr_t)max_pfn_mapped << 12; | |
716 | } | |
717 | extern unsigned long init_memory_mapping(unsigned long start, | |
718 | unsigned long end); | |
719 | extern void initmem_init(void); | |
720 | extern void free_initmem(void); | |
721 | typedef __builtin_va_list __gnuc_va_list; | |
722 | typedef __gnuc_va_list va_list; | |
723 | extern char *strndup_user(const char *, long); | |
724 | extern void *memdup_user(const void *, size_t); | |
725 | extern char *strcpy(char *dest, const char *src); | |
726 | extern char *strncpy(char *dest, const char *src, size_t count); | |
727 | extern char *strcat(char *dest, const char *src); | |
728 | extern char *strncat(char *dest, const char *src, size_t count); | |
729 | extern int strcmp(const char *cs, const char *ct); | |
730 | extern int strncmp(const char *cs, const char *ct, size_t count); | |
731 | extern char *strchr(const char *s, int c); | |
732 | extern size_t strlen(const char *s); | |
733 | static inline __attribute__((always_inline)) __attribute__((always_inline)) void *__memcpy(void *to, const void *from, size_t n) | |
734 | { | |
735 | int d0, d1, d2; | |
736 | asm volatile("rep ; movsl\n\t" | |
737 | "movl %4,%%ecx\n\t" | |
738 | "andl $3,%%ecx\n\t" | |
739 | "jz 1f\n\t" | |
740 | "rep ; movsb\n\t" | |
741 | "1:" | |
742 | : "=&c" (d0), "=&D" (d1), "=&S" (d2) | |
743 | : "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from) | |
744 | : "memory"); | |
745 | return to; | |
746 | } | |
747 | static inline __attribute__((always_inline)) __attribute__((always_inline)) void *__constant_memcpy(void *to, const void *from, | |
748 | size_t n) | |
749 | { | |
750 | long esi, edi; | |
751 | if (__builtin_constant_p(((!n))) ? !!((!n)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/string_32.h", .line = 55, }; ______r = !!((!n)); ______f.miss_hit[______r]++; ______r; })) | |
752 | return to; | |
753 | switch (n) { | |
754 | case 1: | |
755 | *(char *)to = *(char *)from; | |
756 | return to; | |
757 | case 2: | |
758 | *(short *)to = *(short *)from; | |
759 | return to; | |
760 | case 4: | |
761 | *(int *)to = *(int *)from; | |
762 | return to; | |
763 | case 3: | |
764 | *(short *)to = *(short *)from; | |
765 | *((char *)to + 2) = *((char *)from + 2); | |
766 | return to; | |
767 | case 5: | |
768 | *(int *)to = *(int *)from; | |
769 | *((char *)to + 4) = *((char *)from + 4); | |
770 | return to; | |
771 | case 6: | |
772 | *(int *)to = *(int *)from; | |
773 | *((short *)to + 2) = *((short *)from + 2); | |
774 | return to; | |
775 | case 8: | |
776 | *(int *)to = *(int *)from; | |
777 | *((int *)to + 1) = *((int *)from + 1); | |
778 | return to; | |
779 | } | |
780 | esi = (long)from; | |
781 | edi = (long)to; | |
782 | if (__builtin_constant_p(((n >= 5 * 4))) ? !!((n >= 5 * 4)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/string_32.h", .line = 88, }; ______r = !!((n >= 5 * 4)); ______f.miss_hit[______r]++; ______r; })) { | |
783 | int ecx; | |
784 | asm volatile("rep ; movsl" | |
785 | : "=&c" (ecx), "=&D" (edi), "=&S" (esi) | |
786 | : "0" (n / 4), "1" (edi), "2" (esi) | |
787 | : "memory" | |
788 | ); | |
789 | } else { | |
790 | if (__builtin_constant_p(((n >= 4 * 4))) ? !!((n >= 4 * 4)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/string_32.h", .line = 98, }; ______r = !!((n >= 4 * 4)); ______f.miss_hit[______r]++; ______r; })) | |
791 | asm volatile("movsl" | |
792 | : "=&D"(edi), "=&S"(esi) | |
793 | : "0"(edi), "1"(esi) | |
794 | : "memory"); | |
795 | if (__builtin_constant_p(((n >= 3 * 4))) ? !!((n >= 3 * 4)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/string_32.h", .line = 103, }; ______r = !!((n >= 3 * 4)); ______f.miss_hit[______r]++; ______r; })) | |
796 | asm volatile("movsl" | |
797 | : "=&D"(edi), "=&S"(esi) | |
798 | : "0"(edi), "1"(esi) | |
799 | : "memory"); | |
800 | if (__builtin_constant_p(((n >= 2 * 4))) ? !!((n >= 2 * 4)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/string_32.h", .line = 108, }; ______r = !!((n >= 2 * 4)); ______f.miss_hit[______r]++; ______r; })) | |
801 | asm volatile("movsl" | |
802 | : "=&D"(edi), "=&S"(esi) | |
803 | : "0"(edi), "1"(esi) | |
804 | : "memory"); | |
805 | if (__builtin_constant_p(((n >= 1 * 4))) ? !!((n >= 1 * 4)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/string_32.h", .line = 113, }; ______r = !!((n >= 1 * 4)); ______f.miss_hit[______r]++; ______r; })) | |
806 | asm volatile("movsl" | |
807 | : "=&D"(edi), "=&S"(esi) | |
808 | : "0"(edi), "1"(esi) | |
809 | : "memory"); | |
810 | } | |
811 | switch (n % 4) { | |
812 | case 0: | |
813 | return to; | |
814 | case 1: | |
815 | asm volatile("movsb" | |
816 | : "=&D"(edi), "=&S"(esi) | |
817 | : "0"(edi), "1"(esi) | |
818 | : "memory"); | |
819 | return to; | |
820 | case 2: | |
821 | asm volatile("movsw" | |
822 | : "=&D"(edi), "=&S"(esi) | |
823 | : "0"(edi), "1"(esi) | |
824 | : "memory"); | |
825 | return to; | |
826 | default: | |
827 | asm volatile("movsw\n\tmovsb" | |
828 | : "=&D"(edi), "=&S"(esi) | |
829 | : "0"(edi), "1"(esi) | |
830 | : "memory"); | |
831 | return to; | |
832 | } | |
833 | } | |
834 | void *memmove(void *dest, const void *src, size_t n); | |
835 | extern void *memchr(const void *cs, int c, size_t count); | |
836 | static inline __attribute__((always_inline)) void *__memset_generic(void *s, char c, size_t count) | |
837 | { | |
838 | int d0, d1; | |
839 | asm volatile("rep\n\t" | |
840 | "stosb" | |
841 | : "=&c" (d0), "=&D" (d1) | |
842 | : "a" (c), "1" (s), "0" (count) | |
843 | : "memory"); | |
844 | return s; | |
845 | } | |
846 | static inline __attribute__((always_inline)) __attribute__((always_inline)) | |
847 | void *__constant_c_memset(void *s, unsigned long c, size_t count) | |
848 | { | |
849 | int d0, d1; | |
850 | asm volatile("rep ; stosl\n\t" | |
851 | "testb $2,%b3\n\t" | |
852 | "je 1f\n\t" | |
853 | "stosw\n" | |
854 | "1:\ttestb $1,%b3\n\t" | |
855 | "je 2f\n\t" | |
856 | "stosb\n" | |
857 | "2:" | |
858 | : "=&c" (d0), "=&D" (d1) | |
859 | : "a" (c), "q" (count), "0" (count/4), "1" ((long)s) | |
860 | : "memory"); | |
861 | return s; | |
862 | } | |
863 | extern size_t strnlen(const char *s, size_t count); | |
864 | extern char *strstr(const char *cs, const char *ct); | |
865 | static inline __attribute__((always_inline)) __attribute__((always_inline)) | |
866 | void *__constant_c_and_count_memset(void *s, unsigned long pattern, | |
867 | size_t count) | |
868 | { | |
869 | switch (count) { | |
870 | case 0: | |
871 | return s; | |
872 | case 1: | |
873 | *(unsigned char *)s = pattern & 0xff; | |
874 | return s; | |
875 | case 2: | |
876 | *(unsigned short *)s = pattern & 0xffff; | |
877 | return s; | |
878 | case 3: | |
879 | *(unsigned short *)s = pattern & 0xffff; | |
880 | *((unsigned char *)s + 2) = pattern & 0xff; | |
881 | return s; | |
882 | case 4: | |
883 | *(unsigned long *)s = pattern; | |
884 | return s; | |
885 | } | |
886 | { | |
887 | int d0, d1; | |
888 | unsigned long eax = pattern; | |
889 | switch (count % 4) { | |
890 | case 0: | |
891 | asm volatile("rep ; stosl" "" : "=&c" (d0), "=&D" (d1) : "a" (eax), "0" (count/4), "1" ((long)s) : "memory"); | |
892 | return s; | |
893 | case 1: | |
894 | asm volatile("rep ; stosl" "\n\tstosb" : "=&c" (d0), "=&D" (d1) : "a" (eax), "0" (count/4), "1" ((long)s) : "memory"); | |
895 | return s; | |
896 | case 2: | |
897 | asm volatile("rep ; stosl" "\n\tstosw" : "=&c" (d0), "=&D" (d1) : "a" (eax), "0" (count/4), "1" ((long)s) : "memory"); | |
898 | return s; | |
899 | default: | |
900 | asm volatile("rep ; stosl" "\n\tstosw\n\tstosb" : "=&c" (d0), "=&D" (d1) : "a" (eax), "0" (count/4), "1" ((long)s) : "memory"); | |
901 | return s; | |
902 | } | |
903 | } | |
904 | } | |
905 | extern void *memscan(void *addr, int c, size_t size); | |
906 | size_t strlcpy(char *, const char *, size_t); | |
907 | extern size_t strlcat(char *, const char *, __kernel_size_t); | |
908 | extern int strnicmp(const char *, const char *, __kernel_size_t); | |
909 | extern int strcasecmp(const char *s1, const char *s2); | |
910 | extern int strncasecmp(const char *s1, const char *s2, size_t n); | |
911 | extern char * strnchr(const char *, size_t, int); | |
912 | extern char * strrchr(const char *,int); | |
913 | extern char * __attribute__((warn_unused_result)) skip_spaces(const char *); | |
914 | extern char *strim(char *); | |
915 | static inline __attribute__((always_inline)) __attribute__((warn_unused_result)) char *strstrip(char *str) | |
916 | { | |
917 | return strim(str); | |
918 | } | |
919 | extern char * strnstr(const char *, const char *, size_t); | |
920 | extern char * strpbrk(const char *,const char *); | |
921 | extern char * strsep(char **,const char *); | |
922 | extern __kernel_size_t strspn(const char *,const char *); | |
923 | extern __kernel_size_t strcspn(const char *,const char *); | |
924 | extern int __builtin_memcmp(const void *,const void *,__kernel_size_t); | |
925 | extern char *kstrdup(const char *s, gfp_t gfp); | |
926 | extern char *kstrndup(const char *s, size_t len, gfp_t gfp); | |
927 | extern void *kmemdup(const void *src, size_t len, gfp_t gfp); | |
928 | extern char **argv_split(gfp_t gfp, const char *str, int *argcp); | |
929 | extern void argv_free(char **argv); | |
930 | extern bool sysfs_streq(const char *s1, const char *s2); | |
931 | extern int strtobool(const char *s, bool *res); | |
932 | int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); | |
933 | int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf); | |
934 | int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __attribute__((format(printf,3,4))); | |
935 | extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, | |
936 | const void *from, size_t available); | |
937 | static inline __attribute__((always_inline)) bool strstarts(const char *str, const char *prefix) | |
938 | { | |
939 | return strncmp(str, prefix, strlen(prefix)) == 0; | |
940 | } | |
941 | static inline __attribute__((always_inline)) void clear_page(void *page) | |
942 | { | |
943 | __builtin_memset(page, 0, ((1UL) << 12)); | |
944 | } | |
945 | static inline __attribute__((always_inline)) void copy_page(void *to, void *from) | |
946 | { | |
947 | __builtin_memcpy(to, from, ((1UL) << 12)); | |
948 | } | |
949 | struct page; | |
950 | static inline __attribute__((always_inline)) void clear_user_page(void *page, unsigned long vaddr, | |
951 | struct page *pg) | |
952 | { | |
953 | clear_page(page); | |
954 | } | |
955 | static inline __attribute__((always_inline)) void copy_user_page(void *to, void *from, unsigned long vaddr, | |
956 | struct page *topage) | |
957 | { | |
958 | copy_page(to, from); | |
959 | } | |
960 | extern bool __virt_addr_valid(unsigned long kaddr); | |
961 | static inline __attribute__((always_inline)) __attribute__((__const__)) int get_order(unsigned long size) | |
962 | { | |
963 | int order; | |
964 | size = (size - 1) >> (12 - 1); | |
965 | order = -1; | |
966 | do { | |
967 | size >>= 1; | |
968 | order++; | |
969 | } while (size); | |
970 | return order; | |
971 | } | |
972 | struct task_struct; | |
973 | struct exec_domain; | |
974 | struct task_struct; | |
975 | struct mm_struct; | |
976 | struct vm86_regs { | |
977 | long ebx; | |
978 | long ecx; | |
979 | long edx; | |
980 | long esi; | |
981 | long edi; | |
982 | long ebp; | |
983 | long eax; | |
984 | long __null_ds; | |
985 | long __null_es; | |
986 | long __null_fs; | |
987 | long __null_gs; | |
988 | long orig_eax; | |
989 | long eip; | |
990 | unsigned short cs, __csh; | |
991 | long eflags; | |
992 | long esp; | |
993 | unsigned short ss, __ssh; | |
994 | unsigned short es, __esh; | |
995 | unsigned short ds, __dsh; | |
996 | unsigned short fs, __fsh; | |
997 | unsigned short gs, __gsh; | |
998 | }; | |
999 | struct revectored_struct { | |
1000 | unsigned long __map[8]; | |
1001 | }; | |
1002 | struct vm86_struct { | |
1003 | struct vm86_regs regs; | |
1004 | unsigned long flags; | |
1005 | unsigned long screen_bitmap; | |
1006 | unsigned long cpu_type; | |
1007 | struct revectored_struct int_revectored; | |
1008 | struct revectored_struct int21_revectored; | |
1009 | }; | |
1010 | struct vm86plus_info_struct { | |
1011 | unsigned long force_return_for_pic:1; | |
1012 | unsigned long vm86dbg_active:1; | |
1013 | unsigned long vm86dbg_TFpendig:1; | |
1014 | unsigned long unused:28; | |
1015 | unsigned long is_vm86pus:1; | |
1016 | unsigned char vm86dbg_intxxtab[32]; | |
1017 | }; | |
1018 | struct vm86plus_struct { | |
1019 | struct vm86_regs regs; | |
1020 | unsigned long flags; | |
1021 | unsigned long screen_bitmap; | |
1022 | unsigned long cpu_type; | |
1023 | struct revectored_struct int_revectored; | |
1024 | struct revectored_struct int21_revectored; | |
1025 | struct vm86plus_info_struct vm86plus; | |
1026 | }; | |
1027 | extern const char early_idt_handlers[32][10]; | |
1028 | struct pt_regs { | |
1029 | unsigned long bx; | |
1030 | unsigned long cx; | |
1031 | unsigned long dx; | |
1032 | unsigned long si; | |
1033 | unsigned long di; | |
1034 | unsigned long bp; | |
1035 | unsigned long ax; | |
1036 | unsigned long ds; | |
1037 | unsigned long es; | |
1038 | unsigned long fs; | |
1039 | unsigned long gs; | |
1040 | unsigned long orig_ax; | |
1041 | unsigned long ip; | |
1042 | unsigned long cs; | |
1043 | unsigned long flags; | |
1044 | unsigned long sp; | |
1045 | unsigned long ss; | |
1046 | }; | |
1047 | typedef int (*initcall_t)(void); | |
1048 | typedef void (*exitcall_t)(void); | |
1049 | extern initcall_t __con_initcall_start[], __con_initcall_end[]; | |
1050 | extern initcall_t __security_initcall_start[], __security_initcall_end[]; | |
1051 | typedef void (*ctor_fn_t)(void); | |
1052 | extern int do_one_initcall(initcall_t fn); | |
1053 | extern char __attribute__ ((__section__(".init.data"))) boot_command_line[]; | |
1054 | extern char *saved_command_line; | |
1055 | extern unsigned int reset_devices; | |
1056 | void setup_arch(char **); | |
1057 | void prepare_namespace(void); | |
1058 | extern void (*late_time_init)(void); | |
1059 | extern int initcall_debug; | |
1060 | struct cpuinfo_x86; | |
1061 | struct task_struct; | |
1062 | extern unsigned long profile_pc(struct pt_regs *regs); | |
1063 | extern unsigned long | |
1064 | convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); | |
1065 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, | |
1066 | int error_code, int si_code); | |
1067 | void signal_fault(struct pt_regs *regs, void *frame, char *where); | |
1068 | extern long syscall_trace_enter(struct pt_regs *); | |
1069 | extern void syscall_trace_leave(struct pt_regs *); | |
1070 | static inline __attribute__((always_inline)) unsigned long regs_return_value(struct pt_regs *regs) | |
1071 | { | |
1072 | return regs->ax; | |
1073 | } | |
1074 | static inline __attribute__((always_inline)) int user_mode(struct pt_regs *regs) | |
1075 | { | |
1076 | return (regs->cs & 0x3) == 0x3; | |
1077 | } | |
1078 | static inline __attribute__((always_inline)) int user_mode_vm(struct pt_regs *regs) | |
1079 | { | |
1080 | return ((regs->cs & 0x3) | (regs->flags & 0x00020000)) >= | |
1081 | 0x3; | |
1082 | } | |
1083 | static inline __attribute__((always_inline)) int v8086_mode(struct pt_regs *regs) | |
1084 | { | |
1085 | return (regs->flags & 0x00020000); | |
1086 | } | |
1087 | static inline __attribute__((always_inline)) unsigned long kernel_stack_pointer(struct pt_regs *regs) | |
1088 | { | |
1089 | return (unsigned long)(®s->sp); | |
1090 | } | |
1091 | static inline __attribute__((always_inline)) unsigned long instruction_pointer(struct pt_regs *regs) | |
1092 | { | |
1093 | return ((regs)->ip); | |
1094 | } | |
1095 | static inline __attribute__((always_inline)) void instruction_pointer_set(struct pt_regs *regs, | |
1096 | unsigned long val) | |
1097 | { | |
1098 | (((regs)->ip) = (val)); | |
1099 | } | |
1100 | static inline __attribute__((always_inline)) unsigned long user_stack_pointer(struct pt_regs *regs) | |
1101 | { | |
1102 | return ((regs)->sp); | |
1103 | } | |
1104 | static inline __attribute__((always_inline)) void user_stack_pointer_set(struct pt_regs *regs, | |
1105 | unsigned long val) | |
1106 | { | |
1107 | (((regs)->sp) = (val)); | |
1108 | } | |
1109 | static inline __attribute__((always_inline)) unsigned long frame_pointer(struct pt_regs *regs) | |
1110 | { | |
1111 | return ((regs)->bp); | |
1112 | } | |
1113 | static inline __attribute__((always_inline)) void frame_pointer_set(struct pt_regs *regs, | |
1114 | unsigned long val) | |
1115 | { | |
1116 | (((regs)->bp) = (val)); | |
1117 | } | |
1118 | extern int regs_query_register_offset(const char *name); | |
1119 | extern const char *regs_query_register_name(unsigned int offset); | |
1120 | static inline __attribute__((always_inline)) unsigned long regs_get_register(struct pt_regs *regs, | |
1121 | unsigned int offset) | |
1122 | { | |
1123 | if (__builtin_constant_p((((__builtin_constant_p(offset > (__builtin_offsetof(struct pt_regs,ss))) ? !!(offset > (__builtin_offsetof(struct pt_regs,ss))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/ptrace.h", .line = 229, }; ______r = __builtin_expect(!!(offset > (__builtin_offsetof(struct pt_regs,ss))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(offset > (__builtin_offsetof(struct pt_regs,ss))) ? !!(offset > (__builtin_offsetof(struct pt_regs,ss))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/ptrace.h", .line = 229, }; ______r = __builtin_expect(!!(offset > (__builtin_offsetof(struct pt_regs,ss))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/ptrace.h", .line = 229, }; ______r = !!(((__builtin_constant_p(offset > (__builtin_offsetof(struct pt_regs,ss))) ? !!(offset > (__builtin_offsetof(struct pt_regs,ss))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/ptrace.h", .line = 229, }; ______r = __builtin_expect(!!(offset > (__builtin_offsetof(struct pt_regs,ss))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
1124 | return 0; | |
1125 | return *(unsigned long *)((unsigned long)regs + offset); | |
1126 | } | |
1127 | static inline __attribute__((always_inline)) int regs_within_kernel_stack(struct pt_regs *regs, | |
1128 | unsigned long addr) | |
1129 | { | |
1130 | return ((addr & ~((((1UL) << 12) << 1) - 1)) == | |
1131 | (kernel_stack_pointer(regs) & ~((((1UL) << 12) << 1) - 1))); | |
1132 | } | |
1133 | static inline __attribute__((always_inline)) unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, | |
1134 | unsigned int n) | |
1135 | { | |
1136 | unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); | |
1137 | addr += n; | |
1138 | if (__builtin_constant_p(((regs_within_kernel_stack(regs, (unsigned long)addr)))) ? !!((regs_within_kernel_stack(regs, (unsigned long)addr))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/ptrace.h", .line = 263, }; ______r = !!((regs_within_kernel_stack(regs, (unsigned long)addr))); ______f.miss_hit[______r]++; ______r; })) | |
1139 | return *addr; | |
1140 | else | |
1141 | return 0; | |
1142 | } | |
1143 | struct user_desc; | |
1144 | extern int do_get_thread_area(struct task_struct *p, int idx, | |
1145 | struct user_desc *info); | |
1146 | extern int do_set_thread_area(struct task_struct *p, int idx, | |
1147 | struct user_desc *info, int can_allocate); | |
1148 | struct kernel_vm86_regs { | |
1149 | struct pt_regs pt; | |
1150 | unsigned short es, __esh; | |
1151 | unsigned short ds, __dsh; | |
1152 | unsigned short fs, __fsh; | |
1153 | unsigned short gs, __gsh; | |
1154 | }; | |
1155 | struct kernel_vm86_struct { | |
1156 | struct kernel_vm86_regs regs; | |
1157 | unsigned long flags; | |
1158 | unsigned long screen_bitmap; | |
1159 | unsigned long cpu_type; | |
1160 | struct revectored_struct int_revectored; | |
1161 | struct revectored_struct int21_revectored; | |
1162 | struct vm86plus_info_struct vm86plus; | |
1163 | struct pt_regs *regs32; | |
1164 | }; | |
1165 | void handle_vm86_fault(struct kernel_vm86_regs *, long); | |
1166 | int handle_vm86_trap(struct kernel_vm86_regs *, long, int); | |
1167 | struct pt_regs *save_v86_state(struct kernel_vm86_regs *); | |
1168 | struct task_struct; | |
1169 | void release_vm86_irqs(struct task_struct *); | |
1170 | struct math_emu_info { | |
1171 | long ___orig_eip; | |
1172 | union { | |
1173 | struct pt_regs *regs; | |
1174 | struct kernel_vm86_regs *vm86; | |
1175 | }; | |
1176 | }; | |
1177 | struct _fpx_sw_bytes { | |
1178 | __u32 magic1; | |
1179 | __u32 extended_size; | |
1180 | __u64 xstate_bv; | |
1181 | __u32 xstate_size; | |
1182 | __u32 padding[7]; | |
1183 | }; | |
1184 | struct _fpreg { | |
1185 | unsigned short significand[4]; | |
1186 | unsigned short exponent; | |
1187 | }; | |
1188 | struct _fpxreg { | |
1189 | unsigned short significand[4]; | |
1190 | unsigned short exponent; | |
1191 | unsigned short padding[3]; | |
1192 | }; | |
1193 | struct _xmmreg { | |
1194 | unsigned long element[4]; | |
1195 | }; | |
1196 | struct _fpstate { | |
1197 | unsigned long cw; | |
1198 | unsigned long sw; | |
1199 | unsigned long tag; | |
1200 | unsigned long ipoff; | |
1201 | unsigned long cssel; | |
1202 | unsigned long dataoff; | |
1203 | unsigned long datasel; | |
1204 | struct _fpreg _st[8]; | |
1205 | unsigned short status; | |
1206 | unsigned short magic; | |
1207 | unsigned long _fxsr_env[6]; | |
1208 | unsigned long mxcsr; | |
1209 | unsigned long reserved; | |
1210 | struct _fpxreg _fxsr_st[8]; | |
1211 | struct _xmmreg _xmm[8]; | |
1212 | unsigned long padding1[44]; | |
1213 | union { | |
1214 | unsigned long padding2[12]; | |
1215 | struct _fpx_sw_bytes sw_reserved; | |
1216 | }; | |
1217 | }; | |
1218 | struct sigcontext { | |
1219 | unsigned short gs, __gsh; | |
1220 | unsigned short fs, __fsh; | |
1221 | unsigned short es, __esh; | |
1222 | unsigned short ds, __dsh; | |
1223 | unsigned long di; | |
1224 | unsigned long si; | |
1225 | unsigned long bp; | |
1226 | unsigned long sp; | |
1227 | unsigned long bx; | |
1228 | unsigned long dx; | |
1229 | unsigned long cx; | |
1230 | unsigned long ax; | |
1231 | unsigned long trapno; | |
1232 | unsigned long err; | |
1233 | unsigned long ip; | |
1234 | unsigned short cs, __csh; | |
1235 | unsigned long flags; | |
1236 | unsigned long sp_at_signal; | |
1237 | unsigned short ss, __ssh; | |
1238 | void *fpstate; | |
1239 | unsigned long oldmask; | |
1240 | unsigned long cr2; | |
1241 | }; | |
1242 | struct _xsave_hdr { | |
1243 | __u64 xstate_bv; | |
1244 | __u64 reserved1[2]; | |
1245 | __u64 reserved2[5]; | |
1246 | }; | |
1247 | struct _ymmh_state { | |
1248 | __u32 ymmh_space[64]; | |
1249 | }; | |
1250 | struct _xstate { | |
1251 | struct _fpstate fpstate; | |
1252 | struct _xsave_hdr xstate_hdr; | |
1253 | struct _ymmh_state ymmh; | |
1254 | }; | |
1255 | extern __attribute__((const, noreturn)) | |
1256 | int ____ilog2_NaN(void); | |
1257 | static inline __attribute__((always_inline)) __attribute__((const)) | |
1258 | int __ilog2_u32(u32 n) | |
1259 | { | |
1260 | return fls(n) - 1; | |
1261 | } | |
1262 | static inline __attribute__((always_inline)) __attribute__((const)) | |
1263 | int __ilog2_u64(u64 n) | |
1264 | { | |
1265 | return fls64(n) - 1; | |
1266 | } | |
1267 | static inline __attribute__((always_inline)) __attribute__((const)) | |
1268 | bool is_power_of_2(unsigned long n) | |
1269 | { | |
1270 | return (n != 0 && ((n & (n - 1)) == 0)); | |
1271 | } | |
1272 | static inline __attribute__((always_inline)) __attribute__((const)) | |
1273 | unsigned long __roundup_pow_of_two(unsigned long n) | |
1274 | { | |
1275 | return 1UL << fls_long(n - 1); | |
1276 | } | |
1277 | static inline __attribute__((always_inline)) __attribute__((const)) | |
1278 | unsigned long __rounddown_pow_of_two(unsigned long n) | |
1279 | { | |
1280 | return 1UL << (fls_long(n) - 1); | |
1281 | } | |
1282 | extern const char linux_banner[]; | |
1283 | extern const char linux_proc_banner[]; | |
1284 | extern int console_printk[]; | |
1285 | static inline __attribute__((always_inline)) void console_silent(void) | |
1286 | { | |
1287 | (console_printk[0]) = 0; | |
1288 | } | |
1289 | static inline __attribute__((always_inline)) void console_verbose(void) | |
1290 | { | |
1291 | if (__builtin_constant_p((((console_printk[0])))) ? !!(((console_printk[0]))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/printk.h", .line = 41, }; ______r = !!(((console_printk[0]))); ______f.miss_hit[______r]++; ______r; })) | |
1292 | (console_printk[0]) = 15; | |
1293 | } | |
1294 | struct va_format { | |
1295 | const char *fmt; | |
1296 | va_list *va; | |
1297 | }; | |
1298 | static inline __attribute__((always_inline)) __attribute__ ((format (printf, 1, 2))) | |
1299 | int no_printk(const char *fmt, ...) | |
1300 | { | |
1301 | return 0; | |
1302 | } | |
1303 | extern __attribute__((regparm(0))) __attribute__ ((format (printf, 1, 2))) | |
1304 | void early_printk(const char *fmt, ...); | |
1305 | extern int printk_needs_cpu(int cpu); | |
1306 | extern void printk_tick(void); | |
1307 | __attribute__((regparm(0))) __attribute__ ((format (printf, 1, 0))) | |
1308 | int vprintk(const char *fmt, va_list args); | |
1309 | __attribute__((regparm(0))) __attribute__ ((format (printf, 1, 2))) __attribute__((__cold__)) | |
1310 | int printk(const char *fmt, ...); | |
1311 | extern int __printk_ratelimit(const char *func); | |
1312 | extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, | |
1313 | unsigned int interval_msec); | |
1314 | extern int printk_delay_msec; | |
1315 | extern int dmesg_restrict; | |
1316 | extern int kptr_restrict; | |
1317 | void log_buf_kexec_setup(void); | |
1318 | void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) setup_log_buf(int early); | |
1319 | extern void dump_stack(void) __attribute__((__cold__)); | |
1320 | enum { | |
1321 | DUMP_PREFIX_NONE, | |
1322 | DUMP_PREFIX_ADDRESS, | |
1323 | DUMP_PREFIX_OFFSET | |
1324 | }; | |
1325 | extern void hex_dump_to_buffer(const void *buf, size_t len, | |
1326 | int rowsize, int groupsize, | |
1327 | char *linebuf, size_t linebuflen, bool ascii); | |
1328 | extern void print_hex_dump(const char *level, const char *prefix_str, | |
1329 | int prefix_type, int rowsize, int groupsize, | |
1330 | const void *buf, size_t len, bool ascii); | |
1331 | extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, | |
1332 | const void *buf, size_t len); | |
1333 | extern long long dynamic_debug_enabled; | |
1334 | extern long long dynamic_debug_enabled2; | |
1335 | struct _ddebug { | |
1336 | const char *modname; | |
1337 | const char *function; | |
1338 | const char *filename; | |
1339 | const char *format; | |
1340 | unsigned int lineno:24; | |
1341 | unsigned int flags:8; | |
1342 | char enabled; | |
1343 | } __attribute__((aligned(8))); | |
1344 | int ddebug_add_module(struct _ddebug *tab, unsigned int n, | |
1345 | const char *modname); | |
1346 | static inline __attribute__((always_inline)) int ddebug_remove_module(const char *mod) | |
1347 | { | |
1348 | return 0; | |
1349 | } | |
1350 | struct bug_entry { | |
1351 | unsigned long bug_addr; | |
1352 | const char *file; | |
1353 | unsigned short line; | |
1354 | unsigned short flags; | |
1355 | }; | |
1356 | extern void warn_slowpath_fmt(const char *file, const int line, | |
1357 | const char *fmt, ...) __attribute__((format(printf, 3, 4))); | |
1358 | extern void warn_slowpath_fmt_taint(const char *file, const int line, | |
1359 | unsigned taint, const char *fmt, ...) | |
1360 | __attribute__((format(printf, 4, 5))); | |
1361 | extern void warn_slowpath_null(const char *file, const int line); | |
1362 | static inline __attribute__((always_inline)) u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) | |
1363 | { | |
1364 | union { | |
1365 | u64 v64; | |
1366 | u32 v32[2]; | |
1367 | } d = { dividend }; | |
1368 | u32 upper; | |
1369 | upper = d.v32[1]; | |
1370 | d.v32[1] = 0; | |
1371 | if (__builtin_constant_p(((upper >= divisor))) ? !!((upper >= divisor)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/div64.h", .line = 46, }; ______r = !!((upper >= divisor)); ______f.miss_hit[______r]++; ______r; })) { | |
1372 | d.v32[1] = upper / divisor; | |
1373 | upper %= divisor; | |
1374 | } | |
1375 | asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) : | |
1376 | "rm" (divisor), "0" (d.v32[0]), "1" (upper)); | |
1377 | return d.v64; | |
1378 | } | |
1379 | struct completion; | |
1380 | struct pt_regs; | |
1381 | struct user; | |
1382 | void __might_sleep(const char *file, int line, int preempt_offset); | |
1383 | void might_fault(void); | |
1384 | extern struct atomic_notifier_head panic_notifier_list; | |
1385 | extern long (*panic_blink)(int state); | |
1386 | void panic(const char * fmt, ...) | |
1387 | __attribute__ ((noreturn, format (printf, 1, 2))) __attribute__((__cold__)); | |
1388 | extern void oops_enter(void); | |
1389 | extern void oops_exit(void); | |
1390 | void print_oops_end_marker(void); | |
1391 | extern int oops_may_print(void); | |
1392 | void do_exit(long error_code) | |
1393 | __attribute__((noreturn)); | |
1394 | void complete_and_exit(struct completion *, long) | |
1395 | __attribute__((noreturn)); | |
1396 | int __attribute__((warn_unused_result)) _kstrtoul(const char *s, unsigned int base, unsigned long *res); | |
1397 | int __attribute__((warn_unused_result)) _kstrtol(const char *s, unsigned int base, long *res); | |
1398 | int __attribute__((warn_unused_result)) kstrtoull(const char *s, unsigned int base, unsigned long long *res); | |
1399 | int __attribute__((warn_unused_result)) kstrtoll(const char *s, unsigned int base, long long *res); | |
1400 | static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtoul(const char *s, unsigned int base, unsigned long *res) | |
1401 | { | |
1402 | if (__builtin_constant_p(((sizeof(unsigned long) == sizeof(unsigned long long) && __alignof__(unsigned long) == __alignof__(unsigned long long)))) ? !!((sizeof(unsigned long) == sizeof(unsigned long long) && __alignof__(unsigned long) == __alignof__(unsigned long long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
1403 | "include/linux/kernel.h" | |
1404 | , .line = | |
1405 | 204 | |
1406 | , }; ______r = !!((sizeof(unsigned long) == sizeof(unsigned long long) && __alignof__(unsigned long) == __alignof__(unsigned long long))); ______f.miss_hit[______r]++; ______r; })) | |
1407 | return kstrtoull(s, base, (unsigned long long *)res); | |
1408 | else | |
1409 | return _kstrtoul(s, base, res); | |
1410 | } | |
1411 | static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtol(const char *s, unsigned int base, long *res) | |
1412 | { | |
1413 | if (__builtin_constant_p(((sizeof(long) == sizeof(long long) && __alignof__(long) == __alignof__(long long)))) ? !!((sizeof(long) == sizeof(long long) && __alignof__(long) == __alignof__(long long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
1414 | "include/linux/kernel.h" | |
1415 | , .line = | |
1416 | 217 | |
1417 | , }; ______r = !!((sizeof(long) == sizeof(long long) && __alignof__(long) == __alignof__(long long))); ______f.miss_hit[______r]++; ______r; })) | |
1418 | return kstrtoll(s, base, (long long *)res); | |
1419 | else | |
1420 | return _kstrtol(s, base, res); | |
1421 | } | |
1422 | int __attribute__((warn_unused_result)) kstrtouint(const char *s, unsigned int base, unsigned int *res); | |
1423 | int __attribute__((warn_unused_result)) kstrtoint(const char *s, unsigned int base, int *res); | |
1424 | static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtou64(const char *s, unsigned int base, u64 *res) | |
1425 | { | |
1426 | return kstrtoull(s, base, res); | |
1427 | } | |
1428 | static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtos64(const char *s, unsigned int base, s64 *res) | |
1429 | { | |
1430 | return kstrtoll(s, base, res); | |
1431 | } | |
1432 | static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtou32(const char *s, unsigned int base, u32 *res) | |
1433 | { | |
1434 | return kstrtouint(s, base, res); | |
1435 | } | |
1436 | static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtos32(const char *s, unsigned int base, s32 *res) | |
1437 | { | |
1438 | return kstrtoint(s, base, res); | |
1439 | } | |
1440 | int __attribute__((warn_unused_result)) kstrtou16(const char *s, unsigned int base, u16 *res); | |
1441 | int __attribute__((warn_unused_result)) kstrtos16(const char *s, unsigned int base, s16 *res); | |
1442 | int __attribute__((warn_unused_result)) kstrtou8(const char *s, unsigned int base, u8 *res); | |
1443 | int __attribute__((warn_unused_result)) kstrtos8(const char *s, unsigned int base, s8 *res); | |
1444 | int __attribute__((warn_unused_result)) kstrtoull_from_user(const char *s, size_t count, unsigned int base, unsigned long long *res); | |
1445 | int __attribute__((warn_unused_result)) kstrtoll_from_user(const char *s, size_t count, unsigned int base, long long *res); | |
1446 | int __attribute__((warn_unused_result)) kstrtoul_from_user(const char *s, size_t count, unsigned int base, unsigned long *res); | |
1447 | int __attribute__((warn_unused_result)) kstrtol_from_user(const char *s, size_t count, unsigned int base, long *res); | |
1448 | int __attribute__((warn_unused_result)) kstrtouint_from_user(const char *s, size_t count, unsigned int base, unsigned int *res); | |
1449 | int __attribute__((warn_unused_result)) kstrtoint_from_user(const char *s, size_t count, unsigned int base, int *res); | |
1450 | int __attribute__((warn_unused_result)) kstrtou16_from_user(const char *s, size_t count, unsigned int base, u16 *res); | |
1451 | int __attribute__((warn_unused_result)) kstrtos16_from_user(const char *s, size_t count, unsigned int base, s16 *res); | |
1452 | int __attribute__((warn_unused_result)) kstrtou8_from_user(const char *s, size_t count, unsigned int base, u8 *res); | |
1453 | int __attribute__((warn_unused_result)) kstrtos8_from_user(const char *s, size_t count, unsigned int base, s8 *res); | |
1454 | static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtou64_from_user(const char *s, size_t count, unsigned int base, u64 *res) | |
1455 | { | |
1456 | return kstrtoull_from_user(s, count, base, res); | |
1457 | } | |
1458 | static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtos64_from_user(const char *s, size_t count, unsigned int base, s64 *res) | |
1459 | { | |
1460 | return kstrtoll_from_user(s, count, base, res); | |
1461 | } | |
1462 | static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtou32_from_user(const char *s, size_t count, unsigned int base, u32 *res) | |
1463 | { | |
1464 | return kstrtouint_from_user(s, count, base, res); | |
1465 | } | |
1466 | static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtos32_from_user(const char *s, size_t count, unsigned int base, s32 *res) | |
1467 | { | |
1468 | return kstrtoint_from_user(s, count, base, res); | |
1469 | } | |
1470 | extern unsigned long simple_strtoul(const char *,char **,unsigned int); | |
1471 | extern long simple_strtol(const char *,char **,unsigned int); | |
1472 | extern unsigned long long simple_strtoull(const char *,char **,unsigned int); | |
1473 | extern long long simple_strtoll(const char *,char **,unsigned int); | |
1474 | extern int sprintf(char * buf, const char * fmt, ...) | |
1475 | __attribute__ ((format (printf, 2, 3))); | |
1476 | extern int vsprintf(char *buf, const char *, va_list) | |
1477 | __attribute__ ((format (printf, 2, 0))); | |
1478 | extern int snprintf(char * buf, size_t size, const char * fmt, ...) | |
1479 | __attribute__ ((format (printf, 3, 4))); | |
1480 | extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |
1481 | __attribute__ ((format (printf, 3, 0))); | |
1482 | extern int scnprintf(char * buf, size_t size, const char * fmt, ...) | |
1483 | __attribute__ ((format (printf, 3, 4))); | |
1484 | extern int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) | |
1485 | __attribute__ ((format (printf, 3, 0))); | |
1486 | extern char *kasprintf(gfp_t gfp, const char *fmt, ...) | |
1487 | __attribute__ ((format (printf, 2, 3))); | |
1488 | extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); | |
1489 | extern int sscanf(const char *, const char *, ...) | |
1490 | __attribute__ ((format (scanf, 2, 3))); | |
1491 | extern int vsscanf(const char *, const char *, va_list) | |
1492 | __attribute__ ((format (scanf, 2, 0))); | |
1493 | extern int get_option(char **str, int *pint); | |
1494 | extern char *get_options(const char *str, int nints, int *ints); | |
1495 | extern unsigned long long memparse(const char *ptr, char **retptr); | |
1496 | extern int core_kernel_text(unsigned long addr); | |
1497 | extern int core_kernel_data(unsigned long addr); | |
1498 | extern int __kernel_text_address(unsigned long addr); | |
1499 | extern int kernel_text_address(unsigned long addr); | |
1500 | extern int func_ptr_is_kernel_text(void *ptr); | |
1501 | struct pid; | |
1502 | extern struct pid *session_of_pgrp(struct pid *pgrp); | |
1503 | unsigned long int_sqrt(unsigned long); | |
1504 | extern void bust_spinlocks(int yes); | |
1505 | extern void wake_up_klogd(void); | |
1506 | extern int oops_in_progress; | |
1507 | extern int panic_timeout; | |
1508 | extern int panic_on_oops; | |
1509 | extern int panic_on_unrecovered_nmi; | |
1510 | extern int panic_on_io_nmi; | |
1511 | extern const char *print_tainted(void); | |
1512 | extern void add_taint(unsigned flag); | |
1513 | extern int test_taint(unsigned flag); | |
1514 | extern unsigned long get_taint(void); | |
1515 | extern int root_mountflags; | |
1516 | extern bool early_boot_irqs_disabled; | |
1517 | extern enum system_states { | |
1518 | SYSTEM_BOOTING, | |
1519 | SYSTEM_RUNNING, | |
1520 | SYSTEM_HALT, | |
1521 | SYSTEM_POWER_OFF, | |
1522 | SYSTEM_RESTART, | |
1523 | SYSTEM_SUSPEND_DISK, | |
1524 | } system_state; | |
1525 | extern const char hex_asc[]; | |
1526 | static inline __attribute__((always_inline)) char *pack_hex_byte(char *buf, u8 byte) | |
1527 | { | |
1528 | *buf++ = hex_asc[((byte) & 0xf0) >> 4]; | |
1529 | *buf++ = hex_asc[((byte) & 0x0f)]; | |
1530 | return buf; | |
1531 | } | |
1532 | extern int hex_to_bin(char ch); | |
1533 | extern void hex2bin(u8 *dst, const char *src, size_t count); | |
1534 | void tracing_on(void); | |
1535 | void tracing_off(void); | |
1536 | void tracing_off_permanent(void); | |
1537 | int tracing_is_on(void); | |
1538 | enum ftrace_dump_mode { | |
1539 | DUMP_NONE, | |
1540 | DUMP_ALL, | |
1541 | DUMP_ORIG, | |
1542 | }; | |
1543 | extern void tracing_start(void); | |
1544 | extern void tracing_stop(void); | |
1545 | extern void ftrace_off_permanent(void); | |
1546 | static inline __attribute__((always_inline)) void __attribute__ ((format (printf, 1, 2))) | |
1547 | ____trace_printk_check_format(const char *fmt, ...) | |
1548 | { | |
1549 | } | |
1550 | extern int | |
1551 | __trace_bprintk(unsigned long ip, const char *fmt, ...) | |
1552 | __attribute__ ((format (printf, 2, 3))); | |
1553 | extern int | |
1554 | __trace_printk(unsigned long ip, const char *fmt, ...) | |
1555 | __attribute__ ((format (printf, 2, 3))); | |
1556 | extern void trace_dump_stack(void); | |
1557 | extern int | |
1558 | __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); | |
1559 | extern int | |
1560 | __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); | |
1561 | extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); | |
1562 | struct sysinfo; | |
1563 | extern int do_sysinfo(struct sysinfo *info); | |
1564 | struct sysinfo { | |
1565 | long uptime; | |
1566 | unsigned long loads[3]; | |
1567 | unsigned long totalram; | |
1568 | unsigned long freeram; | |
1569 | unsigned long sharedram; | |
1570 | unsigned long bufferram; | |
1571 | unsigned long totalswap; | |
1572 | unsigned long freeswap; | |
1573 | unsigned short procs; | |
1574 | unsigned short pad; | |
1575 | unsigned long totalhigh; | |
1576 | unsigned long freehigh; | |
1577 | unsigned int mem_unit; | |
1578 | char _f[20-2*sizeof(long)-sizeof(int)]; | |
1579 | }; | |
1580 | extern int __build_bug_on_failed; | |
1581 | extern void __bad_percpu_size(void); | |
1582 | static inline __attribute__((always_inline)) __attribute__((always_inline)) int x86_this_cpu_constant_test_bit(unsigned int nr, | |
1583 | const unsigned long *addr) | |
1584 | { | |
1585 | unsigned long *a = (unsigned long *)addr + nr / 32; | |
1586 | return ((1UL << (nr % 32)) & ({ typeof(*a) pfo_ret__; switch (sizeof(*a)) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m" (*a)); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m" (*a)); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m" (*a)); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m" (*a)); break; default: __bad_percpu_size(); } pfo_ret__; })) != 0; | |
1587 | } | |
1588 | static inline __attribute__((always_inline)) int x86_this_cpu_variable_test_bit(int nr, | |
1589 | const unsigned long *addr) | |
1590 | { | |
1591 | int oldbit; | |
1592 | asm volatile("bt ""%%""fs"":" "%P" "2"",%1\n\t" | |
1593 | "sbb %0,%0" | |
1594 | : "=r" (oldbit) | |
1595 | : "m" (*(unsigned long *)addr), "Ir" (nr)); | |
1596 | return oldbit; | |
1597 | } | |
1598 | extern unsigned long __per_cpu_offset[8]; | |
1599 | extern void setup_per_cpu_areas(void); | |
1600 | extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned long) this_cpu_off; | |
1601 | struct task_struct; | |
1602 | extern __attribute__((section(".data..percpu" ""))) __typeof__(struct task_struct *) current_task; | |
1603 | static inline __attribute__((always_inline)) __attribute__((always_inline)) struct task_struct *get_current(void) | |
1604 | { | |
1605 | return ({ typeof(current_task) pfo_ret__; switch (sizeof(current_task)) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "p" (&(current_task))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "p" (&(current_task))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "p" (&(current_task))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "p" (&(current_task))); break; default: __bad_percpu_size(); } pfo_ret__; }); | |
1606 | } | |
1607 | extern void __xchg_wrong_size(void); | |
1608 | static inline __attribute__((always_inline)) void set_64bit(volatile u64 *ptr, u64 value) | |
1609 | { | |
1610 | u32 low = value; | |
1611 | u32 high = value >> 32; | |
1612 | u64 prev = *ptr; | |
1613 | asm volatile("\n1:\t" | |
1614 | ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchg8b %0\n\t" | |
1615 | "jnz 1b" | |
1616 | : "=m" (*ptr), "+A" (prev) | |
1617 | : "b" (low), "c" (high) | |
1618 | : "memory"); | |
1619 | } | |
1620 | extern void __cmpxchg_wrong_size(void); | |
1621 | static inline __attribute__((always_inline)) u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new) | |
1622 | { | |
1623 | u64 prev; | |
1624 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchg8b %1" | |
1625 | : "=A" (prev), | |
1626 | "+m" (*ptr) | |
1627 | : "b" ((u32)new), | |
1628 | "c" ((u32)(new >> 32)), | |
1629 | "0" (old) | |
1630 | : "memory"); | |
1631 | return prev; | |
1632 | } | |
1633 | static inline __attribute__((always_inline)) u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) | |
1634 | { | |
1635 | u64 prev; | |
1636 | asm volatile("cmpxchg8b %1" | |
1637 | : "=A" (prev), | |
1638 | "+m" (*ptr) | |
1639 | : "b" ((u32)new), | |
1640 | "c" ((u32)(new >> 32)), | |
1641 | "0" (old) | |
1642 | : "memory"); | |
1643 | return prev; | |
1644 | } | |
1645 | extern const unsigned char * const *ideal_nops; | |
1646 | extern void arch_init_ideal_nops(void); | |
1647 | static inline __attribute__((always_inline)) unsigned long native_save_fl(void) | |
1648 | { | |
1649 | unsigned long flags; | |
1650 | asm volatile("# __raw_save_flags\n\t" | |
1651 | "pushf ; pop %0" | |
1652 | : "=rm" (flags) | |
1653 | : | |
1654 | : "memory"); | |
1655 | return flags; | |
1656 | } | |
1657 | static inline __attribute__((always_inline)) void native_restore_fl(unsigned long flags) | |
1658 | { | |
1659 | asm volatile("push %0 ; popf" | |
1660 | : | |
1661 | :"g" (flags) | |
1662 | :"memory", "cc"); | |
1663 | } | |
1664 | static inline __attribute__((always_inline)) void native_irq_disable(void) | |
1665 | { | |
1666 | asm volatile("cli": : :"memory"); | |
1667 | } | |
1668 | static inline __attribute__((always_inline)) void native_irq_enable(void) | |
1669 | { | |
1670 | asm volatile("sti": : :"memory"); | |
1671 | } | |
1672 | static inline __attribute__((always_inline)) void native_safe_halt(void) | |
1673 | { | |
1674 | asm volatile("sti; hlt": : :"memory"); | |
1675 | } | |
1676 | static inline __attribute__((always_inline)) void native_halt(void) | |
1677 | { | |
1678 | asm volatile("hlt": : :"memory"); | |
1679 | } | |
1680 | typedef u64 pteval_t; | |
1681 | typedef u64 pmdval_t; | |
1682 | typedef u64 pudval_t; | |
1683 | typedef u64 pgdval_t; | |
1684 | typedef u64 pgprotval_t; | |
1685 | typedef union { | |
1686 | struct { | |
1687 | unsigned long pte_low, pte_high; | |
1688 | }; | |
1689 | pteval_t pte; | |
1690 | } pte_t; | |
1691 | extern bool __vmalloc_start_set; | |
1692 | typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; | |
1693 | typedef struct { pgdval_t pgd; } pgd_t; | |
1694 | static inline __attribute__((always_inline)) pgd_t native_make_pgd(pgdval_t val) | |
1695 | { | |
1696 | return (pgd_t) { val }; | |
1697 | } | |
1698 | static inline __attribute__((always_inline)) pgdval_t native_pgd_val(pgd_t pgd) | |
1699 | { | |
1700 | return pgd.pgd; | |
1701 | } | |
1702 | static inline __attribute__((always_inline)) pgdval_t pgd_flags(pgd_t pgd) | |
1703 | { | |
1704 | return native_pgd_val(pgd) & (~((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1))))); | |
1705 | } | |
1706 | typedef struct { pgd_t pgd; } pud_t; | |
1707 | static inline __attribute__((always_inline)) int pgd_none(pgd_t pgd) { return 0; } | |
1708 | static inline __attribute__((always_inline)) int pgd_bad(pgd_t pgd) { return 0; } | |
1709 | static inline __attribute__((always_inline)) int pgd_present(pgd_t pgd) { return 1; } | |
1710 | static inline __attribute__((always_inline)) void pgd_clear(pgd_t *pgd) { } | |
1711 | static inline __attribute__((always_inline)) pud_t * pud_offset(pgd_t * pgd, unsigned long address) | |
1712 | { | |
1713 | return (pud_t *)pgd; | |
1714 | } | |
1715 | static inline __attribute__((always_inline)) pudval_t native_pud_val(pud_t pud) | |
1716 | { | |
1717 | return native_pgd_val(pud.pgd); | |
1718 | } | |
1719 | typedef struct { pmdval_t pmd; } pmd_t; | |
1720 | static inline __attribute__((always_inline)) pmd_t native_make_pmd(pmdval_t val) | |
1721 | { | |
1722 | return (pmd_t) { val }; | |
1723 | } | |
1724 | static inline __attribute__((always_inline)) pmdval_t native_pmd_val(pmd_t pmd) | |
1725 | { | |
1726 | return pmd.pmd; | |
1727 | } | |
1728 | static inline __attribute__((always_inline)) pudval_t pud_flags(pud_t pud) | |
1729 | { | |
1730 | return native_pud_val(pud) & (~((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1))))); | |
1731 | } | |
1732 | static inline __attribute__((always_inline)) pmdval_t pmd_flags(pmd_t pmd) | |
1733 | { | |
1734 | return native_pmd_val(pmd) & (~((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1))))); | |
1735 | } | |
1736 | static inline __attribute__((always_inline)) pte_t native_make_pte(pteval_t val) | |
1737 | { | |
1738 | return (pte_t) { .pte = val }; | |
1739 | } | |
1740 | static inline __attribute__((always_inline)) pteval_t native_pte_val(pte_t pte) | |
1741 | { | |
1742 | return pte.pte; | |
1743 | } | |
1744 | static inline __attribute__((always_inline)) pteval_t pte_flags(pte_t pte) | |
1745 | { | |
1746 | return native_pte_val(pte) & (~((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1))))); | |
1747 | } | |
1748 | typedef struct page *pgtable_t; | |
1749 | extern pteval_t __supported_pte_mask; | |
1750 | extern void set_nx(void); | |
1751 | extern int nx_enabled; | |
1752 | extern pgprot_t pgprot_writecombine(pgprot_t prot); | |
1753 | struct file; | |
1754 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |
1755 | unsigned long size, pgprot_t vma_prot); | |
1756 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | |
1757 | unsigned long size, pgprot_t *vma_prot); | |
1758 | void set_pte_vaddr(unsigned long vaddr, pte_t pte); | |
1759 | extern void native_pagetable_reserve(u64 start, u64 end); | |
1760 | extern void native_pagetable_setup_start(pgd_t *base); | |
1761 | extern void native_pagetable_setup_done(pgd_t *base); | |
1762 | struct seq_file; | |
1763 | extern void arch_report_meminfo(struct seq_file *m); | |
1764 | enum { | |
1765 | PG_LEVEL_NONE, | |
1766 | PG_LEVEL_4K, | |
1767 | PG_LEVEL_2M, | |
1768 | PG_LEVEL_1G, | |
1769 | PG_LEVEL_NUM | |
1770 | }; | |
1771 | extern void update_page_count(int level, unsigned long pages); | |
1772 | extern pte_t *lookup_address(unsigned long address, unsigned int *level); | |
1773 | struct desc_struct { | |
1774 | union { | |
1775 | struct { | |
1776 | unsigned int a; | |
1777 | unsigned int b; | |
1778 | }; | |
1779 | struct { | |
1780 | u16 limit0; | |
1781 | u16 base0; | |
1782 | unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; | |
1783 | unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; | |
1784 | }; | |
1785 | }; | |
1786 | } __attribute__((packed)); | |
1787 | enum { | |
1788 | GATE_INTERRUPT = 0xE, | |
1789 | GATE_TRAP = 0xF, | |
1790 | GATE_CALL = 0xC, | |
1791 | GATE_TASK = 0x5, | |
1792 | }; | |
1793 | struct gate_struct64 { | |
1794 | u16 offset_low; | |
1795 | u16 segment; | |
1796 | unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1; | |
1797 | u16 offset_middle; | |
1798 | u32 offset_high; | |
1799 | u32 zero1; | |
1800 | } __attribute__((packed)); | |
1801 | enum { | |
1802 | DESC_TSS = 0x9, | |
1803 | DESC_LDT = 0x2, | |
1804 | DESCTYPE_S = 0x10, | |
1805 | }; | |
1806 | struct ldttss_desc64 { | |
1807 | u16 limit0; | |
1808 | u16 base0; | |
1809 | unsigned base1 : 8, type : 5, dpl : 2, p : 1; | |
1810 | unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8; | |
1811 | u32 base3; | |
1812 | u32 zero1; | |
1813 | } __attribute__((packed)); | |
1814 | typedef struct desc_struct gate_desc; | |
1815 | typedef struct desc_struct ldt_desc; | |
1816 | typedef struct desc_struct tss_desc; | |
1817 | struct desc_ptr { | |
1818 | unsigned short size; | |
1819 | unsigned long address; | |
1820 | } __attribute__((packed)) ; | |
1821 | enum km_type { | |
1822 | KM_BOUNCE_READ, | |
1823 | KM_SKB_SUNRPC_DATA, | |
1824 | KM_SKB_DATA_SOFTIRQ, | |
1825 | KM_USER0, | |
1826 | KM_USER1, | |
1827 | KM_BIO_SRC_IRQ, | |
1828 | KM_BIO_DST_IRQ, | |
1829 | KM_PTE0, | |
1830 | KM_PTE1, | |
1831 | KM_IRQ0, | |
1832 | KM_IRQ1, | |
1833 | KM_SOFTIRQ0, | |
1834 | KM_SOFTIRQ1, | |
1835 | KM_SYNC_ICACHE, | |
1836 | KM_SYNC_DCACHE, | |
1837 | KM_UML_USERCOPY, | |
1838 | KM_IRQ_PTE, | |
1839 | KM_NMI, | |
1840 | KM_NMI_PTE, | |
1841 | KM_KDB, | |
1842 | KM_TYPE_NR | |
1843 | }; | |
1844 | struct page; | |
1845 | struct thread_struct; | |
1846 | struct desc_ptr; | |
1847 | struct tss_struct; | |
1848 | struct mm_struct; | |
1849 | struct desc_struct; | |
1850 | struct task_struct; | |
1851 | struct cpumask; | |
1852 | struct paravirt_callee_save { | |
1853 | void *func; | |
1854 | }; | |
1855 | struct pv_info { | |
1856 | unsigned int kernel_rpl; | |
1857 | int shared_kernel_pmd; | |
1858 | int paravirt_enabled; | |
1859 | const char *name; | |
1860 | }; | |
1861 | struct pv_init_ops { | |
1862 | unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, | |
1863 | unsigned long addr, unsigned len); | |
1864 | }; | |
1865 | struct pv_lazy_ops { | |
1866 | void (*enter)(void); | |
1867 | void (*leave)(void); | |
1868 | }; | |
1869 | struct pv_time_ops { | |
1870 | unsigned long long (*sched_clock)(void); | |
1871 | unsigned long (*get_tsc_khz)(void); | |
1872 | }; | |
1873 | struct pv_cpu_ops { | |
1874 | unsigned long (*get_debugreg)(int regno); | |
1875 | void (*set_debugreg)(int regno, unsigned long value); | |
1876 | void (*clts)(void); | |
1877 | unsigned long (*read_cr0)(void); | |
1878 | void (*write_cr0)(unsigned long); | |
1879 | unsigned long (*read_cr4_safe)(void); | |
1880 | unsigned long (*read_cr4)(void); | |
1881 | void (*write_cr4)(unsigned long); | |
1882 | void (*load_tr_desc)(void); | |
1883 | void (*load_gdt)(const struct desc_ptr *); | |
1884 | void (*load_idt)(const struct desc_ptr *); | |
1885 | void (*store_gdt)(struct desc_ptr *); | |
1886 | void (*store_idt)(struct desc_ptr *); | |
1887 | void (*set_ldt)(const void *desc, unsigned entries); | |
1888 | unsigned long (*store_tr)(void); | |
1889 | void (*load_tls)(struct thread_struct *t, unsigned int cpu); | |
1890 | void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, | |
1891 | const void *desc); | |
1892 | void (*write_gdt_entry)(struct desc_struct *, | |
1893 | int entrynum, const void *desc, int size); | |
1894 | void (*write_idt_entry)(gate_desc *, | |
1895 | int entrynum, const gate_desc *gate); | |
1896 | void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); | |
1897 | void (*free_ldt)(struct desc_struct *ldt, unsigned entries); | |
1898 | void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); | |
1899 | void (*set_iopl_mask)(unsigned mask); | |
1900 | void (*wbinvd)(void); | |
1901 | void (*io_delay)(void); | |
1902 | void (*cpuid)(unsigned int *eax, unsigned int *ebx, | |
1903 | unsigned int *ecx, unsigned int *edx); | |
1904 | u64 (*read_msr)(unsigned int msr, int *err); | |
1905 | int (*rdmsr_regs)(u32 *regs); | |
1906 | int (*write_msr)(unsigned int msr, unsigned low, unsigned high); | |
1907 | int (*wrmsr_regs)(u32 *regs); | |
1908 | u64 (*read_tsc)(void); | |
1909 | u64 (*read_pmc)(int counter); | |
1910 | unsigned long long (*read_tscp)(unsigned int *aux); | |
1911 | void (*irq_enable_sysexit)(void); | |
1912 | void (*usergs_sysret64)(void); | |
1913 | void (*usergs_sysret32)(void); | |
1914 | void (*iret)(void); | |
1915 | void (*swapgs)(void); | |
1916 | void (*start_context_switch)(struct task_struct *prev); | |
1917 | void (*end_context_switch)(struct task_struct *next); | |
1918 | }; | |
1919 | struct pv_irq_ops { | |
1920 | struct paravirt_callee_save save_fl; | |
1921 | struct paravirt_callee_save restore_fl; | |
1922 | struct paravirt_callee_save irq_disable; | |
1923 | struct paravirt_callee_save irq_enable; | |
1924 | void (*safe_halt)(void); | |
1925 | void (*halt)(void); | |
1926 | }; | |
1927 | struct pv_apic_ops { | |
1928 | void (*startup_ipi_hook)(int phys_apicid, | |
1929 | unsigned long start_eip, | |
1930 | unsigned long start_esp); | |
1931 | }; | |
1932 | struct pv_mmu_ops { | |
1933 | unsigned long (*read_cr2)(void); | |
1934 | void (*write_cr2)(unsigned long); | |
1935 | unsigned long (*read_cr3)(void); | |
1936 | void (*write_cr3)(unsigned long); | |
1937 | void (*activate_mm)(struct mm_struct *prev, | |
1938 | struct mm_struct *next); | |
1939 | void (*dup_mmap)(struct mm_struct *oldmm, | |
1940 | struct mm_struct *mm); | |
1941 | void (*exit_mmap)(struct mm_struct *mm); | |
1942 | void (*flush_tlb_user)(void); | |
1943 | void (*flush_tlb_kernel)(void); | |
1944 | void (*flush_tlb_single)(unsigned long addr); | |
1945 | void (*flush_tlb_others)(const struct cpumask *cpus, | |
1946 | struct mm_struct *mm, | |
1947 | unsigned long va); | |
1948 | int (*pgd_alloc)(struct mm_struct *mm); | |
1949 | void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd); | |
1950 | void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); | |
1951 | void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); | |
1952 | void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); | |
1953 | void (*release_pte)(unsigned long pfn); | |
1954 | void (*release_pmd)(unsigned long pfn); | |
1955 | void (*release_pud)(unsigned long pfn); | |
1956 | void (*set_pte)(pte_t *ptep, pte_t pteval); | |
1957 | void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, | |
1958 | pte_t *ptep, pte_t pteval); | |
1959 | void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); | |
1960 | void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr, | |
1961 | pmd_t *pmdp, pmd_t pmdval); | |
1962 | void (*pte_update)(struct mm_struct *mm, unsigned long addr, | |
1963 | pte_t *ptep); | |
1964 | void (*pte_update_defer)(struct mm_struct *mm, | |
1965 | unsigned long addr, pte_t *ptep); | |
1966 | void (*pmd_update)(struct mm_struct *mm, unsigned long addr, | |
1967 | pmd_t *pmdp); | |
1968 | void (*pmd_update_defer)(struct mm_struct *mm, | |
1969 | unsigned long addr, pmd_t *pmdp); | |
1970 | pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, | |
1971 | pte_t *ptep); | |
1972 | void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, | |
1973 | pte_t *ptep, pte_t pte); | |
1974 | struct paravirt_callee_save pte_val; | |
1975 | struct paravirt_callee_save make_pte; | |
1976 | struct paravirt_callee_save pgd_val; | |
1977 | struct paravirt_callee_save make_pgd; | |
1978 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); | |
1979 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, | |
1980 | pte_t *ptep); | |
1981 | void (*pmd_clear)(pmd_t *pmdp); | |
1982 | void (*set_pud)(pud_t *pudp, pud_t pudval); | |
1983 | struct paravirt_callee_save pmd_val; | |
1984 | struct paravirt_callee_save make_pmd; | |
1985 | struct pv_lazy_ops lazy_mode; | |
1986 | void (*set_fixmap)(unsigned idx, | |
1987 | phys_addr_t phys, pgprot_t flags); | |
1988 | }; | |
1989 | struct arch_spinlock; | |
1990 | struct pv_lock_ops { | |
1991 | int (*spin_is_locked)(struct arch_spinlock *lock); | |
1992 | int (*spin_is_contended)(struct arch_spinlock *lock); | |
1993 | void (*spin_lock)(struct arch_spinlock *lock); | |
1994 | void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags); | |
1995 | int (*spin_trylock)(struct arch_spinlock *lock); | |
1996 | void (*spin_unlock)(struct arch_spinlock *lock); | |
1997 | }; | |
1998 | struct paravirt_patch_template { | |
1999 | struct pv_init_ops pv_init_ops; | |
2000 | struct pv_time_ops pv_time_ops; | |
2001 | struct pv_cpu_ops pv_cpu_ops; | |
2002 | struct pv_irq_ops pv_irq_ops; | |
2003 | struct pv_apic_ops pv_apic_ops; | |
2004 | struct pv_mmu_ops pv_mmu_ops; | |
2005 | struct pv_lock_ops pv_lock_ops; | |
2006 | }; | |
2007 | extern struct pv_info pv_info; | |
2008 | extern struct pv_init_ops pv_init_ops; | |
2009 | extern struct pv_time_ops pv_time_ops; | |
2010 | extern struct pv_cpu_ops pv_cpu_ops; | |
2011 | extern struct pv_irq_ops pv_irq_ops; | |
2012 | extern struct pv_apic_ops pv_apic_ops; | |
2013 | extern struct pv_mmu_ops pv_mmu_ops; | |
2014 | extern struct pv_lock_ops pv_lock_ops; | |
2015 | unsigned paravirt_patch_nop(void); | |
2016 | unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len); | |
2017 | unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); | |
2018 | unsigned paravirt_patch_ignore(unsigned len); | |
2019 | unsigned paravirt_patch_call(void *insnbuf, | |
2020 | const void *target, u16 tgt_clobbers, | |
2021 | unsigned long addr, u16 site_clobbers, | |
2022 | unsigned len); | |
2023 | unsigned paravirt_patch_jmp(void *insnbuf, const void *target, | |
2024 | unsigned long addr, unsigned len); | |
2025 | unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, | |
2026 | unsigned long addr, unsigned len); | |
2027 | unsigned paravirt_patch_insns(void *insnbuf, unsigned len, | |
2028 | const char *start, const char *end); | |
2029 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | |
2030 | unsigned long addr, unsigned len); | |
2031 | int paravirt_disable_iospace(void); | |
2032 | enum paravirt_lazy_mode { | |
2033 | PARAVIRT_LAZY_NONE, | |
2034 | PARAVIRT_LAZY_MMU, | |
2035 | PARAVIRT_LAZY_CPU, | |
2036 | }; | |
2037 | enum paravirt_lazy_mode paravirt_get_lazy_mode(void); | |
2038 | void paravirt_start_context_switch(struct task_struct *prev); | |
2039 | void paravirt_end_context_switch(struct task_struct *next); | |
2040 | void paravirt_enter_lazy_mmu(void); | |
2041 | void paravirt_leave_lazy_mmu(void); | |
2042 | void _paravirt_nop(void); | |
2043 | u32 _paravirt_ident_32(u32); | |
2044 | u64 _paravirt_ident_64(u64); | |
2045 | struct paravirt_patch_site { | |
2046 | u8 *instr; | |
2047 | u8 instrtype; | |
2048 | u8 len; | |
2049 | u16 clobbers; | |
2050 | }; | |
2051 | extern struct paravirt_patch_site __parainstructions[], | |
2052 | __parainstructions_end[]; | |
2053 | extern int __bitmap_empty(const unsigned long *bitmap, int bits); | |
2054 | extern int __bitmap_full(const unsigned long *bitmap, int bits); | |
2055 | extern int __bitmap_equal(const unsigned long *bitmap1, | |
2056 | const unsigned long *bitmap2, int bits); | |
2057 | extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, | |
2058 | int bits); | |
2059 | extern void __bitmap_shift_right(unsigned long *dst, | |
2060 | const unsigned long *src, int shift, int bits); | |
2061 | extern void __bitmap_shift_left(unsigned long *dst, | |
2062 | const unsigned long *src, int shift, int bits); | |
2063 | extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, | |
2064 | const unsigned long *bitmap2, int bits); | |
2065 | extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, | |
2066 | const unsigned long *bitmap2, int bits); | |
2067 | extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, | |
2068 | const unsigned long *bitmap2, int bits); | |
2069 | extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, | |
2070 | const unsigned long *bitmap2, int bits); | |
2071 | extern int __bitmap_intersects(const unsigned long *bitmap1, | |
2072 | const unsigned long *bitmap2, int bits); | |
2073 | extern int __bitmap_subset(const unsigned long *bitmap1, | |
2074 | const unsigned long *bitmap2, int bits); | |
2075 | extern int __bitmap_weight(const unsigned long *bitmap, int bits); | |
2076 | extern void bitmap_set(unsigned long *map, int i, int len); | |
2077 | extern void bitmap_clear(unsigned long *map, int start, int nr); | |
2078 | extern unsigned long bitmap_find_next_zero_area(unsigned long *map, | |
2079 | unsigned long size, | |
2080 | unsigned long start, | |
2081 | unsigned int nr, | |
2082 | unsigned long align_mask); | |
2083 | extern int bitmap_scnprintf(char *buf, unsigned int len, | |
2084 | const unsigned long *src, int nbits); | |
2085 | extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, | |
2086 | unsigned long *dst, int nbits); | |
2087 | extern int bitmap_parse_user(const char *ubuf, unsigned int ulen, | |
2088 | unsigned long *dst, int nbits); | |
2089 | extern int bitmap_scnlistprintf(char *buf, unsigned int len, | |
2090 | const unsigned long *src, int nbits); | |
2091 | extern int bitmap_parselist(const char *buf, unsigned long *maskp, | |
2092 | int nmaskbits); | |
2093 | extern int bitmap_parselist_user(const char *ubuf, unsigned int ulen, | |
2094 | unsigned long *dst, int nbits); | |
2095 | extern void bitmap_remap(unsigned long *dst, const unsigned long *src, | |
2096 | const unsigned long *old, const unsigned long *new, int bits); | |
2097 | extern int bitmap_bitremap(int oldbit, | |
2098 | const unsigned long *old, const unsigned long *new, int bits); | |
2099 | extern void bitmap_onto(unsigned long *dst, const unsigned long *orig, | |
2100 | const unsigned long *relmap, int bits); | |
2101 | extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, | |
2102 | int sz, int bits); | |
2103 | extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order); | |
2104 | extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); | |
2105 | extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); | |
2106 | extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); | |
2107 | static inline __attribute__((always_inline)) void bitmap_zero(unsigned long *dst, int nbits) | |
2108 | { | |
2109 | if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 159, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; })) | |
2110 | *dst = 0UL; | |
2111 | else { | |
2112 | int len = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(unsigned long); | |
2113 | __builtin_memset(dst, 0, len); | |
2114 | } | |
2115 | } | |
2116 | static inline __attribute__((always_inline)) void bitmap_fill(unsigned long *dst, int nbits) | |
2117 | { | |
2118 | size_t nlongs = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))); | |
2119 | if (__builtin_constant_p(((!(__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!((!(__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 170, }; ______r = !!((!(__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; })) { | |
2120 | int len = (nlongs - 1) * sizeof(unsigned long); | |
2121 | __builtin_memset(dst, 0xff, len); | |
2122 | } | |
2123 | dst[nlongs - 1] = ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ); | |
2124 | } | |
2125 | static inline __attribute__((always_inline)) void bitmap_copy(unsigned long *dst, const unsigned long *src, | |
2126 | int nbits) | |
2127 | { | |
2128 | if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 180, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; })) | |
2129 | *dst = *src; | |
2130 | else { | |
2131 | int len = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(unsigned long); | |
2132 | __builtin_memcpy(dst, src, len); | |
2133 | } | |
2134 | } | |
2135 | static inline __attribute__((always_inline)) int bitmap_and(unsigned long *dst, const unsigned long *src1, | |
2136 | const unsigned long *src2, int nbits) | |
2137 | { | |
2138 | if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 191, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; })) | |
2139 | return (*dst = *src1 & *src2) != 0; | |
2140 | return __bitmap_and(dst, src1, src2, nbits); | |
2141 | } | |
2142 | static inline __attribute__((always_inline)) void bitmap_or(unsigned long *dst, const unsigned long *src1, | |
2143 | const unsigned long *src2, int nbits) | |
2144 | { | |
2145 | if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 199, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; })) | |
2146 | *dst = *src1 | *src2; | |
2147 | else | |
2148 | __bitmap_or(dst, src1, src2, nbits); | |
2149 | } | |
2150 | static inline __attribute__((always_inline)) void bitmap_xor(unsigned long *dst, const unsigned long *src1, | |
2151 | const unsigned long *src2, int nbits) | |
2152 | { | |
2153 | if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 208, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; })) | |
2154 | *dst = *src1 ^ *src2; | |
2155 | else | |
2156 | __bitmap_xor(dst, src1, src2, nbits); | |
2157 | } | |
2158 | static inline __attribute__((always_inline)) int bitmap_andnot(unsigned long *dst, const unsigned long *src1, | |
2159 | const unsigned long *src2, int nbits) | |
2160 | { | |
2161 | if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 217, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; })) | |
2162 | return (*dst = *src1 & ~(*src2)) != 0; | |
2163 | return __bitmap_andnot(dst, src1, src2, nbits); | |
2164 | } | |
2165 | static inline __attribute__((always_inline)) void bitmap_complement(unsigned long *dst, const unsigned long *src, | |
2166 | int nbits) | |
2167 | { | |
2168 | if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 225, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; })) | |
2169 | *dst = ~(*src) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ); | |
2170 | else | |
2171 | __bitmap_complement(dst, src, nbits); | |
2172 | } | |
2173 | static inline __attribute__((always_inline)) int bitmap_equal(const unsigned long *src1, | |
2174 | const unsigned long *src2, int nbits) | |
2175 | { | |
2176 | if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 234, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; })) | |
2177 | return ! ((*src1 ^ *src2) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )); | |
2178 | else | |
2179 | return __bitmap_equal(src1, src2, nbits); | |
2180 | } | |
2181 | static inline __attribute__((always_inline)) int bitmap_intersects(const unsigned long *src1, | |
2182 | const unsigned long *src2, int nbits) | |
2183 | { | |
2184 | if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 243, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; })) | |
2185 | return ((*src1 & *src2) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )) != 0; | |
2186 | else | |
2187 | return __bitmap_intersects(src1, src2, nbits); | |
2188 | } | |
2189 | static inline __attribute__((always_inline)) int bitmap_subset(const unsigned long *src1, | |
2190 | const unsigned long *src2, int nbits) | |
2191 | { | |
2192 | if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 252, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; })) | |
2193 | return ! ((*src1 & ~(*src2)) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )); | |
2194 | else | |
2195 | return __bitmap_subset(src1, src2, nbits); | |
2196 | } | |
2197 | static inline __attribute__((always_inline)) int bitmap_empty(const unsigned long *src, int nbits) | |
2198 | { | |
2199 | if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 260, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; })) | |
2200 | return ! (*src & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )); | |
2201 | else | |
2202 | return __bitmap_empty(src, nbits); | |
2203 | } | |
2204 | static inline __attribute__((always_inline)) int bitmap_full(const unsigned long *src, int nbits) | |
2205 | { | |
2206 | if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 268, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; })) | |
2207 | return ! (~(*src) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )); | |
2208 | else | |
2209 | return __bitmap_full(src, nbits); | |
2210 | } | |
2211 | static inline __attribute__((always_inline)) int bitmap_weight(const unsigned long *src, int nbits) | |
2212 | { | |
2213 | if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 276, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; })) | |
2214 | return hweight_long(*src & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )); | |
2215 | return __bitmap_weight(src, nbits); | |
2216 | } | |
2217 | static inline __attribute__((always_inline)) void bitmap_shift_right(unsigned long *dst, | |
2218 | const unsigned long *src, int n, int nbits) | |
2219 | { | |
2220 | if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 284, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; })) | |
2221 | *dst = *src >> n; | |
2222 | else | |
2223 | __bitmap_shift_right(dst, src, n, nbits); | |
2224 | } | |
2225 | static inline __attribute__((always_inline)) void bitmap_shift_left(unsigned long *dst, | |
2226 | const unsigned long *src, int n, int nbits) | |
2227 | { | |
2228 | if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 293, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; })) | |
2229 | *dst = (*src << n) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ); | |
2230 | else | |
2231 | __bitmap_shift_left(dst, src, n, nbits); | |
2232 | } | |
2233 | static inline __attribute__((always_inline)) int bitmap_parse(const char *buf, unsigned int buflen, | |
2234 | unsigned long *maskp, int nmaskbits) | |
2235 | { | |
2236 | return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); | |
2237 | } | |
2238 | typedef struct cpumask { unsigned long bits[(((8) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; } cpumask_t; | |
2239 | extern int nr_cpu_ids; | |
2240 | extern const struct cpumask *const cpu_possible_mask; | |
2241 | extern const struct cpumask *const cpu_online_mask; | |
2242 | extern const struct cpumask *const cpu_present_mask; | |
2243 | extern const struct cpumask *const cpu_active_mask; | |
2244 | static inline __attribute__((always_inline)) unsigned int cpumask_check(unsigned int cpu) | |
2245 | { | |
2246 | return cpu; | |
2247 | } | |
2248 | static inline __attribute__((always_inline)) unsigned int cpumask_first(const struct cpumask *srcp) | |
2249 | { | |
2250 | return find_first_bit(((srcp)->bits), 8); | |
2251 | } | |
2252 | static inline __attribute__((always_inline)) unsigned int cpumask_next(int n, const struct cpumask *srcp) | |
2253 | { | |
2254 | if (__builtin_constant_p(((n != -1))) ? !!((n != -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/cpumask.h", .line = 172, }; ______r = !!((n != -1)); ______f.miss_hit[______r]++; ______r; })) | |
2255 | cpumask_check(n); | |
2256 | return find_next_bit(((srcp)->bits), 8, n+1); | |
2257 | } | |
2258 | static inline __attribute__((always_inline)) unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) | |
2259 | { | |
2260 | if (__builtin_constant_p(((n != -1))) ? !!((n != -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/cpumask.h", .line = 187, }; ______r = !!((n != -1)); ______f.miss_hit[______r]++; ______r; })) | |
2261 | cpumask_check(n); | |
2262 | return find_next_zero_bit(((srcp)->bits), 8, n+1); | |
2263 | } | |
2264 | int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); | |
2265 | int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); | |
2266 | static inline __attribute__((always_inline)) void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) | |
2267 | { | |
2268 | set_bit(cpumask_check(cpu), ((dstp)->bits)); | |
2269 | } | |
2270 | static inline __attribute__((always_inline)) void cpumask_clear_cpu(int cpu, struct cpumask *dstp) | |
2271 | { | |
2272 | clear_bit(cpumask_check(cpu), ((dstp)->bits)); | |
2273 | } | |
2274 | static inline __attribute__((always_inline)) int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) | |
2275 | { | |
2276 | return test_and_set_bit(cpumask_check(cpu), ((cpumask)->bits)); | |
2277 | } | |
2278 | static inline __attribute__((always_inline)) int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) | |
2279 | { | |
2280 | return test_and_clear_bit(cpumask_check(cpu), ((cpumask)->bits)); | |
2281 | } | |
2282 | static inline __attribute__((always_inline)) void cpumask_setall(struct cpumask *dstp) | |
2283 | { | |
2284 | bitmap_fill(((dstp)->bits), 8); | |
2285 | } | |
2286 | static inline __attribute__((always_inline)) void cpumask_clear(struct cpumask *dstp) | |
2287 | { | |
2288 | bitmap_zero(((dstp)->bits), 8); | |
2289 | } | |
2290 | static inline __attribute__((always_inline)) int cpumask_and(struct cpumask *dstp, | |
2291 | const struct cpumask *src1p, | |
2292 | const struct cpumask *src2p) | |
2293 | { | |
2294 | return bitmap_and(((dstp)->bits), ((src1p)->bits), | |
2295 | ((src2p)->bits), 8); | |
2296 | } | |
2297 | static inline __attribute__((always_inline)) void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, | |
2298 | const struct cpumask *src2p) | |
2299 | { | |
2300 | bitmap_or(((dstp)->bits), ((src1p)->bits), | |
2301 | ((src2p)->bits), 8); | |
2302 | } | |
2303 | static inline __attribute__((always_inline)) void cpumask_xor(struct cpumask *dstp, | |
2304 | const struct cpumask *src1p, | |
2305 | const struct cpumask *src2p) | |
2306 | { | |
2307 | bitmap_xor(((dstp)->bits), ((src1p)->bits), | |
2308 | ((src2p)->bits), 8); | |
2309 | } | |
2310 | static inline __attribute__((always_inline)) int cpumask_andnot(struct cpumask *dstp, | |
2311 | const struct cpumask *src1p, | |
2312 | const struct cpumask *src2p) | |
2313 | { | |
2314 | return bitmap_andnot(((dstp)->bits), ((src1p)->bits), | |
2315 | ((src2p)->bits), 8); | |
2316 | } | |
2317 | static inline __attribute__((always_inline)) void cpumask_complement(struct cpumask *dstp, | |
2318 | const struct cpumask *srcp) | |
2319 | { | |
2320 | bitmap_complement(((dstp)->bits), ((srcp)->bits), | |
2321 | 8); | |
2322 | } | |
2323 | static inline __attribute__((always_inline)) bool cpumask_equal(const struct cpumask *src1p, | |
2324 | const struct cpumask *src2p) | |
2325 | { | |
2326 | return bitmap_equal(((src1p)->bits), ((src2p)->bits), | |
2327 | 8); | |
2328 | } | |
2329 | static inline __attribute__((always_inline)) bool cpumask_intersects(const struct cpumask *src1p, | |
2330 | const struct cpumask *src2p) | |
2331 | { | |
2332 | return bitmap_intersects(((src1p)->bits), ((src2p)->bits), | |
2333 | 8); | |
2334 | } | |
2335 | static inline __attribute__((always_inline)) int cpumask_subset(const struct cpumask *src1p, | |
2336 | const struct cpumask *src2p) | |
2337 | { | |
2338 | return bitmap_subset(((src1p)->bits), ((src2p)->bits), | |
2339 | 8); | |
2340 | } | |
2341 | static inline __attribute__((always_inline)) bool cpumask_empty(const struct cpumask *srcp) | |
2342 | { | |
2343 | return bitmap_empty(((srcp)->bits), 8); | |
2344 | } | |
2345 | static inline __attribute__((always_inline)) bool cpumask_full(const struct cpumask *srcp) | |
2346 | { | |
2347 | return bitmap_full(((srcp)->bits), 8); | |
2348 | } | |
2349 | static inline __attribute__((always_inline)) unsigned int cpumask_weight(const struct cpumask *srcp) | |
2350 | { | |
2351 | return bitmap_weight(((srcp)->bits), 8); | |
2352 | } | |
2353 | static inline __attribute__((always_inline)) void cpumask_shift_right(struct cpumask *dstp, | |
2354 | const struct cpumask *srcp, int n) | |
2355 | { | |
2356 | bitmap_shift_right(((dstp)->bits), ((srcp)->bits), n, | |
2357 | 8); | |
2358 | } | |
2359 | static inline __attribute__((always_inline)) void cpumask_shift_left(struct cpumask *dstp, | |
2360 | const struct cpumask *srcp, int n) | |
2361 | { | |
2362 | bitmap_shift_left(((dstp)->bits), ((srcp)->bits), n, | |
2363 | 8); | |
2364 | } | |
2365 | static inline __attribute__((always_inline)) void cpumask_copy(struct cpumask *dstp, | |
2366 | const struct cpumask *srcp) | |
2367 | { | |
2368 | bitmap_copy(((dstp)->bits), ((srcp)->bits), 8); | |
2369 | } | |
2370 | static inline __attribute__((always_inline)) int cpumask_scnprintf(char *buf, int len, | |
2371 | const struct cpumask *srcp) | |
2372 | { | |
2373 | return bitmap_scnprintf(buf, len, ((srcp)->bits), 8); | |
2374 | } | |
2375 | static inline __attribute__((always_inline)) int cpumask_parse_user(const char *buf, int len, | |
2376 | struct cpumask *dstp) | |
2377 | { | |
2378 | return bitmap_parse_user(buf, len, ((dstp)->bits), 8); | |
2379 | } | |
2380 | static inline __attribute__((always_inline)) int cpumask_parselist_user(const char *buf, int len, | |
2381 | struct cpumask *dstp) | |
2382 | { | |
2383 | return bitmap_parselist_user(buf, len, ((dstp)->bits), | |
2384 | 8); | |
2385 | } | |
2386 | static inline __attribute__((always_inline)) int cpulist_scnprintf(char *buf, int len, | |
2387 | const struct cpumask *srcp) | |
2388 | { | |
2389 | return bitmap_scnlistprintf(buf, len, ((srcp)->bits), | |
2390 | 8); | |
2391 | } | |
2392 | static inline __attribute__((always_inline)) int cpulist_parse(const char *buf, struct cpumask *dstp) | |
2393 | { | |
2394 | return bitmap_parselist(buf, ((dstp)->bits), 8); | |
2395 | } | |
2396 | static inline __attribute__((always_inline)) size_t cpumask_size(void) | |
2397 | { | |
2398 | return (((8) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(long); | |
2399 | } | |
2400 | typedef struct cpumask cpumask_var_t[1]; | |
2401 | static inline __attribute__((always_inline)) bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) | |
2402 | { | |
2403 | return true; | |
2404 | } | |
2405 | static inline __attribute__((always_inline)) bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, | |
2406 | int node) | |
2407 | { | |
2408 | return true; | |
2409 | } | |
2410 | static inline __attribute__((always_inline)) bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) | |
2411 | { | |
2412 | cpumask_clear(*mask); | |
2413 | return true; | |
2414 | } | |
2415 | static inline __attribute__((always_inline)) bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, | |
2416 | int node) | |
2417 | { | |
2418 | cpumask_clear(*mask); | |
2419 | return true; | |
2420 | } | |
2421 | static inline __attribute__((always_inline)) void alloc_bootmem_cpumask_var(cpumask_var_t *mask) | |
2422 | { | |
2423 | } | |
2424 | static inline __attribute__((always_inline)) void free_cpumask_var(cpumask_var_t mask) | |
2425 | { | |
2426 | } | |
2427 | static inline __attribute__((always_inline)) void free_bootmem_cpumask_var(cpumask_var_t mask) | |
2428 | { | |
2429 | } | |
2430 | extern const unsigned long cpu_all_bits[(((8) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; | |
2431 | void set_cpu_possible(unsigned int cpu, bool possible); | |
2432 | void set_cpu_present(unsigned int cpu, bool present); | |
2433 | void set_cpu_online(unsigned int cpu, bool online); | |
2434 | void set_cpu_active(unsigned int cpu, bool active); | |
2435 | void init_cpu_present(const struct cpumask *src); | |
2436 | void init_cpu_possible(const struct cpumask *src); | |
2437 | void init_cpu_online(const struct cpumask *src); | |
2438 | static inline __attribute__((always_inline)) int __check_is_bitmap(const unsigned long *bitmap) | |
2439 | { | |
2440 | return 1; | |
2441 | } | |
2442 | extern const unsigned long | |
2443 | cpu_bit_bitmap[32 +1][(((8) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; | |
2444 | static inline __attribute__((always_inline)) const struct cpumask *get_cpu_mask(unsigned int cpu) | |
2445 | { | |
2446 | const unsigned long *p = cpu_bit_bitmap[1 + cpu % 32]; | |
2447 | p -= cpu / 32; | |
2448 | return ((struct cpumask *)(1 ? (p) : (void *)sizeof(__check_is_bitmap(p)))); | |
2449 | } | |
2450 | int __first_cpu(const cpumask_t *srcp); | |
2451 | int __next_cpu(int n, const cpumask_t *srcp); | |
2452 | int __any_online_cpu(const cpumask_t *mask); | |
2453 | static inline __attribute__((always_inline)) void __cpu_set(int cpu, volatile cpumask_t *dstp) | |
2454 | { | |
2455 | set_bit(cpu, dstp->bits); | |
2456 | } | |
2457 | static inline __attribute__((always_inline)) void __cpu_clear(int cpu, volatile cpumask_t *dstp) | |
2458 | { | |
2459 | clear_bit(cpu, dstp->bits); | |
2460 | } | |
2461 | static inline __attribute__((always_inline)) void __cpus_setall(cpumask_t *dstp, int nbits) | |
2462 | { | |
2463 | bitmap_fill(dstp->bits, nbits); | |
2464 | } | |
2465 | static inline __attribute__((always_inline)) void __cpus_clear(cpumask_t *dstp, int nbits) | |
2466 | { | |
2467 | bitmap_zero(dstp->bits, nbits); | |
2468 | } | |
2469 | static inline __attribute__((always_inline)) int __cpu_test_and_set(int cpu, cpumask_t *addr) | |
2470 | { | |
2471 | return test_and_set_bit(cpu, addr->bits); | |
2472 | } | |
2473 | static inline __attribute__((always_inline)) int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, | |
2474 | const cpumask_t *src2p, int nbits) | |
2475 | { | |
2476 | return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); | |
2477 | } | |
2478 | static inline __attribute__((always_inline)) void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, | |
2479 | const cpumask_t *src2p, int nbits) | |
2480 | { | |
2481 | bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); | |
2482 | } | |
2483 | static inline __attribute__((always_inline)) void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, | |
2484 | const cpumask_t *src2p, int nbits) | |
2485 | { | |
2486 | bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); | |
2487 | } | |
2488 | static inline __attribute__((always_inline)) int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, | |
2489 | const cpumask_t *src2p, int nbits) | |
2490 | { | |
2491 | return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); | |
2492 | } | |
2493 | static inline __attribute__((always_inline)) int __cpus_equal(const cpumask_t *src1p, | |
2494 | const cpumask_t *src2p, int nbits) | |
2495 | { | |
2496 | return bitmap_equal(src1p->bits, src2p->bits, nbits); | |
2497 | } | |
2498 | static inline __attribute__((always_inline)) int __cpus_intersects(const cpumask_t *src1p, | |
2499 | const cpumask_t *src2p, int nbits) | |
2500 | { | |
2501 | return bitmap_intersects(src1p->bits, src2p->bits, nbits); | |
2502 | } | |
2503 | static inline __attribute__((always_inline)) int __cpus_subset(const cpumask_t *src1p, | |
2504 | const cpumask_t *src2p, int nbits) | |
2505 | { | |
2506 | return bitmap_subset(src1p->bits, src2p->bits, nbits); | |
2507 | } | |
2508 | static inline __attribute__((always_inline)) int __cpus_empty(const cpumask_t *srcp, int nbits) | |
2509 | { | |
2510 | return bitmap_empty(srcp->bits, nbits); | |
2511 | } | |
2512 | static inline __attribute__((always_inline)) int __cpus_weight(const cpumask_t *srcp, int nbits) | |
2513 | { | |
2514 | return bitmap_weight(srcp->bits, nbits); | |
2515 | } | |
2516 | static inline __attribute__((always_inline)) void __cpus_shift_left(cpumask_t *dstp, | |
2517 | const cpumask_t *srcp, int n, int nbits) | |
2518 | { | |
2519 | bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); | |
2520 | } | |
2521 | static inline __attribute__((always_inline)) int paravirt_enabled(void) | |
2522 | { | |
2523 | return pv_info.paravirt_enabled; | |
2524 | } | |
2525 | static inline __attribute__((always_inline)) void load_sp0(struct tss_struct *tss, | |
2526 | struct thread_struct *thread) | |
2527 | { | |
2528 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.load_sp0); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.load_sp0) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.load_sp0)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(tss)), "d" ((unsigned long)(thread)) : "memory", "cc" ); }); | |
2529 | } | |
2530 | static inline __attribute__((always_inline)) void __cpuid(unsigned int *eax, unsigned int *ebx, | |
2531 | unsigned int *ecx, unsigned int *edx) | |
2532 | { | |
2533 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.cpuid); asm volatile("push %[_arg4];" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "lea 4(%%esp),%%esp;" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.cpuid) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.cpuid)), [paravirt_clobber] "i" (((1 << 4) - 1)), "0" ((u32)(eax)), "1" ((u32)(ebx)), "2" ((u32)(ecx)), [_arg4] "mr" ((u32)(edx)) : "memory", "cc" ); }); | |
2534 | } | |
2535 | static inline __attribute__((always_inline)) unsigned long paravirt_get_debugreg(int reg) | |
2536 | { | |
2537 | return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.get_debugreg); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 39, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.get_debugreg) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.get_debugreg)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(reg)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.get_debugreg) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.get_debugreg)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(reg)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; }); | |
2538 | } | |
2539 | static inline __attribute__((always_inline)) void set_debugreg(unsigned long val, int reg) | |
2540 | { | |
2541 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.set_debugreg); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.set_debugreg) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.set_debugreg)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(reg)), "d" ((unsigned long)(val)) : "memory", "cc" ); }); | |
2542 | } | |
2543 | static inline __attribute__((always_inline)) void clts(void) | |
2544 | { | |
2545 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.clts); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.clts) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.clts)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); }); | |
2546 | } | |
2547 | static inline __attribute__((always_inline)) unsigned long read_cr0(void) | |
2548 | { | |
2549 | return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_cr0); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 54, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_cr0) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_cr0)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_cr0) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_cr0)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; }); | |
2550 | } | |
2551 | static inline __attribute__((always_inline)) void write_cr0(unsigned long x) | |
2552 | { | |
2553 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.write_cr0); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_cr0) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_cr0)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(x)) : "memory", "cc" ); }); | |
2554 | } | |
2555 | static inline __attribute__((always_inline)) unsigned long read_cr2(void) | |
2556 | { | |
2557 | return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.read_cr2); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 64, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.read_cr2) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.read_cr2)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.read_cr2) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.read_cr2)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; }); | |
2558 | } | |
2559 | static inline __attribute__((always_inline)) void write_cr2(unsigned long x) | |
2560 | { | |
2561 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.write_cr2); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.write_cr2) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.write_cr2)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(x)) : "memory", "cc" ); }); | |
2562 | } | |
2563 | static inline __attribute__((always_inline)) unsigned long read_cr3(void) | |
2564 | { | |
2565 | return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.read_cr3); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 74, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.read_cr3) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.read_cr3)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.read_cr3) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.read_cr3)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; }); | |
2566 | } | |
2567 | static inline __attribute__((always_inline)) void write_cr3(unsigned long x) | |
2568 | { | |
2569 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.write_cr3); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.write_cr3) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.write_cr3)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(x)) : "memory", "cc" ); }); | |
2570 | } | |
2571 | static inline __attribute__((always_inline)) unsigned long read_cr4(void) | |
2572 | { | |
2573 | return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_cr4); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 84, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_cr4) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_cr4)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_cr4) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_cr4)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; }); | |
2574 | } | |
2575 | static inline __attribute__((always_inline)) unsigned long read_cr4_safe(void) | |
2576 | { | |
2577 | return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_cr4_safe); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 88, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_cr4_safe) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_cr4_safe)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_cr4_safe) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_cr4_safe)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; }); | |
2578 | } | |
2579 | static inline __attribute__((always_inline)) void write_cr4(unsigned long x) | |
2580 | { | |
2581 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.write_cr4); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_cr4) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_cr4)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(x)) : "memory", "cc" ); }); | |
2582 | } | |
2583 | static inline __attribute__((always_inline)) void arch_safe_halt(void) | |
2584 | { | |
2585 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_irq_ops.safe_halt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.safe_halt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.safe_halt)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); }); | |
2586 | } | |
2587 | static inline __attribute__((always_inline)) void halt(void) | |
2588 | { | |
2589 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_irq_ops.halt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.halt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.halt)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); }); | |
2590 | } | |
2591 | static inline __attribute__((always_inline)) void wbinvd(void) | |
2592 | { | |
2593 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.wbinvd); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.wbinvd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.wbinvd)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); }); | |
2594 | } | |
2595 | static inline __attribute__((always_inline)) u64 paravirt_read_msr(unsigned msr, int *err) | |
2596 | { | |
2597 | return ({ u64 __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_msr); if (__builtin_constant_p(((sizeof(u64) > sizeof(unsigned long)))) ? !!((sizeof(u64) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 127, }; ______r = !!((sizeof(u64) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_msr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_msr)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(msr)), "d" ((unsigned long)(err)) : "memory", "cc" ); __ret = (u64)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_msr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_msr)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(msr)), "d" ((unsigned long)(err)) : "memory", "cc" ); __ret = (u64)__eax; } __ret; }); | |
2598 | } | |
2599 | static inline __attribute__((always_inline)) int paravirt_rdmsr_regs(u32 *regs) | |
2600 | { | |
2601 | return ({ int __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.rdmsr_regs); if (__builtin_constant_p(((sizeof(int) > sizeof(unsigned long)))) ? !!((sizeof(int) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 132, }; ______r = !!((sizeof(int) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.rdmsr_regs) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.rdmsr_regs)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(regs)) : "memory", "cc" ); __ret = (int)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.rdmsr_regs) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.rdmsr_regs)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(regs)) : "memory", "cc" ); __ret = (int)__eax; } __ret; }); | |
2602 | } | |
2603 | static inline __attribute__((always_inline)) int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) | |
2604 | { | |
2605 | return ({ int __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.write_msr); if (__builtin_constant_p(((sizeof(int) > sizeof(unsigned long)))) ? !!((sizeof(int) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 137, }; ______r = !!((sizeof(int) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_msr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_msr)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(msr)), "d" ((unsigned long)(low)), "c" ((unsigned long)(high)) : "memory", "cc" ); __ret = (int)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_msr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_msr)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(msr)), "d" ((unsigned long)(low)), "c" ((unsigned long)(high)) : "memory", "cc" ); __ret = (int)__eax; } __ret; }); | |
2606 | } | |
2607 | static inline __attribute__((always_inline)) int paravirt_wrmsr_regs(u32 *regs) | |
2608 | { | |
2609 | return ({ int __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.wrmsr_regs); if (__builtin_constant_p(((sizeof(int) > sizeof(unsigned long)))) ? !!((sizeof(int) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 142, }; ______r = !!((sizeof(int) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.wrmsr_regs) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.wrmsr_regs)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(regs)) : "memory", "cc" ); __ret = (int)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.wrmsr_regs) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.wrmsr_regs)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(regs)) : "memory", "cc" ); __ret = (int)__eax; } __ret; }); | |
2610 | } | |
2611 | static inline __attribute__((always_inline)) int rdmsrl_safe(unsigned msr, unsigned long long *p) | |
2612 | { | |
2613 | int err; | |
2614 | *p = paravirt_read_msr(msr, &err); | |
2615 | return err; | |
2616 | } | |
2617 | static inline __attribute__((always_inline)) int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) | |
2618 | { | |
2619 | u32 gprs[8] = { 0 }; | |
2620 | int err; | |
2621 | gprs[1] = msr; | |
2622 | gprs[7] = 0x9c5a203a; | |
2623 | err = paravirt_rdmsr_regs(gprs); | |
2624 | *p = gprs[0] | ((u64)gprs[2] << 32); | |
2625 | return err; | |
2626 | } | |
2627 | static inline __attribute__((always_inline)) int wrmsrl_amd_safe(unsigned msr, unsigned long long val) | |
2628 | { | |
2629 | u32 gprs[8] = { 0 }; | |
2630 | gprs[0] = (u32)val; | |
2631 | gprs[1] = msr; | |
2632 | gprs[2] = val >> 32; | |
2633 | gprs[7] = 0x9c5a203a; | |
2634 | return paravirt_wrmsr_regs(gprs); | |
2635 | } | |
2636 | static inline __attribute__((always_inline)) u64 paravirt_read_tsc(void) | |
2637 | { | |
2638 | return ({ u64 __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_tsc); if (__builtin_constant_p(((sizeof(u64) > sizeof(unsigned long)))) ? !!((sizeof(u64) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 217, }; ______r = !!((sizeof(u64) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_tsc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_tsc)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (u64)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_tsc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_tsc)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (u64)__eax; } __ret; }); | |
2639 | } | |
2640 | static inline __attribute__((always_inline)) unsigned long long paravirt_sched_clock(void) | |
2641 | { | |
2642 | return ({ unsigned long long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_time_ops.sched_clock); if (__builtin_constant_p(((sizeof(unsigned long long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 230, }; ______r = !!((sizeof(unsigned long long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_time_ops.sched_clock) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_time_ops.sched_clock)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_time_ops.sched_clock) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_time_ops.sched_clock)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long long)__eax; } __ret; }); | |
2643 | } | |
2644 | static inline __attribute__((always_inline)) unsigned long long paravirt_read_pmc(int counter) | |
2645 | { | |
2646 | return ({ u64 __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_pmc); if (__builtin_constant_p(((sizeof(u64) > sizeof(unsigned long)))) ? !!((sizeof(u64) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 235, }; ______r = !!((sizeof(u64) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_pmc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_pmc)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(counter)) : "memory", "cc" ); __ret = (u64)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_pmc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_pmc)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(counter)) : "memory", "cc" ); __ret = (u64)__eax; } __ret; }); | |
2647 | } | |
2648 | static inline __attribute__((always_inline)) unsigned long long paravirt_rdtscp(unsigned int *aux) | |
2649 | { | |
2650 | return ({ u64 __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_tscp); if (__builtin_constant_p(((sizeof(u64) > sizeof(unsigned long)))) ? !!((sizeof(u64) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 247, }; ______r = !!((sizeof(u64) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_tscp) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_tscp)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(aux)) : "memory", "cc" ); __ret = (u64)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_tscp) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_tscp)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(aux)) : "memory", "cc" ); __ret = (u64)__eax; } __ret; }); | |
2651 | } | |
2652 | static inline __attribute__((always_inline)) void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) | |
2653 | { | |
2654 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.alloc_ldt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.alloc_ldt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.alloc_ldt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(ldt)), "d" ((unsigned long)(entries)) : "memory", "cc" ); }); | |
2655 | } | |
2656 | static inline __attribute__((always_inline)) void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) | |
2657 | { | |
2658 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.free_ldt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.free_ldt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.free_ldt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(ldt)), "d" ((unsigned long)(entries)) : "memory", "cc" ); }); | |
2659 | } | |
2660 | static inline __attribute__((always_inline)) void load_TR_desc(void) | |
2661 | { | |
2662 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.load_tr_desc); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.load_tr_desc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.load_tr_desc)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); }); | |
2663 | } | |
2664 | static inline __attribute__((always_inline)) void load_gdt(const struct desc_ptr *dtr) | |
2665 | { | |
2666 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.load_gdt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.load_gdt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.load_gdt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(dtr)) : "memory", "cc" ); }); | |
2667 | } | |
2668 | static inline __attribute__((always_inline)) void load_idt(const struct desc_ptr *dtr) | |
2669 | { | |
2670 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.load_idt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.load_idt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.load_idt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(dtr)) : "memory", "cc" ); }); | |
2671 | } | |
2672 | static inline __attribute__((always_inline)) void set_ldt(const void *addr, unsigned entries) | |
2673 | { | |
2674 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.set_ldt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.set_ldt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.set_ldt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(addr)), "d" ((unsigned long)(entries)) : "memory", "cc" ); }); | |
2675 | } | |
2676 | static inline __attribute__((always_inline)) void store_gdt(struct desc_ptr *dtr) | |
2677 | { | |
2678 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.store_gdt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.store_gdt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.store_gdt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(dtr)) : "memory", "cc" ); }); | |
2679 | } | |
2680 | static inline __attribute__((always_inline)) void store_idt(struct desc_ptr *dtr) | |
2681 | { | |
2682 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.store_idt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.store_idt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.store_idt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(dtr)) : "memory", "cc" ); }); | |
2683 | } | |
2684 | static inline __attribute__((always_inline)) unsigned long paravirt_store_tr(void) | |
2685 | { | |
2686 | return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.store_tr); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 302, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.store_tr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.store_tr)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.store_tr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.store_tr)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; }); | |
2687 | } | |
2688 | static inline __attribute__((always_inline)) void load_TLS(struct thread_struct *t, unsigned cpu) | |
2689 | { | |
2690 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.load_tls); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.load_tls) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.load_tls)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(t)), "d" ((unsigned long)(cpu)) : "memory", "cc" ); }); | |
2691 | } | |
2692 | static inline __attribute__((always_inline)) void write_ldt_entry(struct desc_struct *dt, int entry, | |
2693 | const void *desc) | |
2694 | { | |
2695 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.write_ldt_entry); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_ldt_entry) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_ldt_entry)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(dt)), "d" ((unsigned long)(entry)), "c" ((unsigned long)(desc)) : "memory", "cc" ); }); | |
2696 | } | |
2697 | static inline __attribute__((always_inline)) void write_gdt_entry(struct desc_struct *dt, int entry, | |
2698 | void *desc, int type) | |
2699 | { | |
2700 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.write_gdt_entry); asm volatile("push %[_arg4];" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "lea 4(%%esp),%%esp;" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_gdt_entry) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_gdt_entry)), [paravirt_clobber] "i" (((1 << 4) - 1)), "0" ((u32)(dt)), "1" ((u32)(entry)), "2" ((u32)(desc)), [_arg4] "mr" ((u32)(type)) : "memory", "cc" ); }); | |
2701 | } | |
2702 | static inline __attribute__((always_inline)) void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) | |
2703 | { | |
2704 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.write_idt_entry); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_idt_entry) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_idt_entry)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(dt)), "d" ((unsigned long)(entry)), "c" ((unsigned long)(g)) : "memory", "cc" ); }); | |
2705 | } | |
2706 | static inline __attribute__((always_inline)) void set_iopl_mask(unsigned mask) | |
2707 | { | |
2708 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.set_iopl_mask); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.set_iopl_mask) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.set_iopl_mask)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mask)) : "memory", "cc" ); }); | |
2709 | } | |
2710 | static inline __attribute__((always_inline)) void slow_down_io(void) | |
2711 | { | |
2712 | pv_cpu_ops.io_delay(); | |
2713 | } | |
2714 | static inline __attribute__((always_inline)) void startup_ipi_hook(int phys_apicid, unsigned long start_eip, | |
2715 | unsigned long start_esp) | |
2716 | { | |
2717 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_apic_ops.startup_ipi_hook); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_apic_ops.startup_ipi_hook) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_apic_ops.startup_ipi_hook)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(phys_apicid)), "d" ((unsigned long)(start_eip)), "c" ((unsigned long)(start_esp)) : "memory", "cc" ); }) | |
2718 | ; | |
2719 | } | |
2720 | static inline __attribute__((always_inline)) void paravirt_activate_mm(struct mm_struct *prev, | |
2721 | struct mm_struct *next) | |
2722 | { | |
2723 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.activate_mm); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.activate_mm) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.activate_mm)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(prev)), "d" ((unsigned long)(next)) : "memory", "cc" ); }); | |
2724 | } | |
2725 | static inline __attribute__((always_inline)) void arch_dup_mmap(struct mm_struct *oldmm, | |
2726 | struct mm_struct *mm) | |
2727 | { | |
2728 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.dup_mmap); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.dup_mmap) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.dup_mmap)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(oldmm)), "d" ((unsigned long)(mm)) : "memory", "cc" ); }); | |
2729 | } | |
2730 | static inline __attribute__((always_inline)) void arch_exit_mmap(struct mm_struct *mm) | |
2731 | { | |
2732 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.exit_mmap); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.exit_mmap) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.exit_mmap)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)) : "memory", "cc" ); }); | |
2733 | } | |
2734 | static inline __attribute__((always_inline)) void __flush_tlb(void) | |
2735 | { | |
2736 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.flush_tlb_user); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.flush_tlb_user) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.flush_tlb_user)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); }); | |
2737 | } | |
2738 | static inline __attribute__((always_inline)) void __flush_tlb_global(void) | |
2739 | { | |
2740 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.flush_tlb_kernel); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.flush_tlb_kernel) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.flush_tlb_kernel)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); }); | |
2741 | } | |
2742 | static inline __attribute__((always_inline)) void __flush_tlb_single(unsigned long addr) | |
2743 | { | |
2744 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.flush_tlb_single); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.flush_tlb_single) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.flush_tlb_single)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(addr)) : "memory", "cc" ); }); | |
2745 | } | |
2746 | static inline __attribute__((always_inline)) void flush_tlb_others(const struct cpumask *cpumask, | |
2747 | struct mm_struct *mm, | |
2748 | unsigned long va) | |
2749 | { | |
2750 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.flush_tlb_others); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.flush_tlb_others) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.flush_tlb_others)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(cpumask)), "d" ((unsigned long)(mm)), "c" ((unsigned long)(va)) : "memory", "cc" ); }); | |
2751 | } | |
2752 | static inline __attribute__((always_inline)) int paravirt_pgd_alloc(struct mm_struct *mm) | |
2753 | { | |
2754 | return ({ int __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pgd_alloc); if (__builtin_constant_p(((sizeof(int) > sizeof(unsigned long)))) ? !!((sizeof(int) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 397, }; ______r = !!((sizeof(int) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_alloc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_alloc)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)) : "memory", "cc" ); __ret = (int)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_alloc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_alloc)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)) : "memory", "cc" ); __ret = (int)__eax; } __ret; }); | |
2755 | } | |
2756 | static inline __attribute__((always_inline)) void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |
2757 | { | |
2758 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pgd_free); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_free) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_free)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(pgd)) : "memory", "cc" ); }); | |
2759 | } | |
2760 | static inline __attribute__((always_inline)) void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) | |
2761 | { | |
2762 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.alloc_pte); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.alloc_pte) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.alloc_pte)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(pfn)) : "memory", "cc" ); }); | |
2763 | } | |
2764 | static inline __attribute__((always_inline)) void paravirt_release_pte(unsigned long pfn) | |
2765 | { | |
2766 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.release_pte); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.release_pte) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.release_pte)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pfn)) : "memory", "cc" ); }); | |
2767 | } | |
2768 | static inline __attribute__((always_inline)) void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) | |
2769 | { | |
2770 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.alloc_pmd); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.alloc_pmd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.alloc_pmd)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(pfn)) : "memory", "cc" ); }); | |
2771 | } | |
2772 | static inline __attribute__((always_inline)) void paravirt_release_pmd(unsigned long pfn) | |
2773 | { | |
2774 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.release_pmd); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.release_pmd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.release_pmd)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pfn)) : "memory", "cc" ); }); | |
2775 | } | |
2776 | static inline __attribute__((always_inline)) void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) | |
2777 | { | |
2778 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.alloc_pud); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.alloc_pud) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.alloc_pud)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(pfn)) : "memory", "cc" ); }); | |
2779 | } | |
2780 | static inline __attribute__((always_inline)) void paravirt_release_pud(unsigned long pfn) | |
2781 | { | |
2782 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.release_pud); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.release_pud) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.release_pud)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pfn)) : "memory", "cc" ); }); | |
2783 | } | |
2784 | static inline __attribute__((always_inline)) void pte_update(struct mm_struct *mm, unsigned long addr, | |
2785 | pte_t *ptep) | |
2786 | { | |
2787 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pte_update); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_update) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_update)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(ptep)) : "memory", "cc" ); }); | |
2788 | } | |
2789 | static inline __attribute__((always_inline)) void pmd_update(struct mm_struct *mm, unsigned long addr, | |
2790 | pmd_t *pmdp) | |
2791 | { | |
2792 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pmd_update); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_update) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_update)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(pmdp)) : "memory", "cc" ); }); | |
2793 | } | |
2794 | static inline __attribute__((always_inline)) void pte_update_defer(struct mm_struct *mm, unsigned long addr, | |
2795 | pte_t *ptep) | |
2796 | { | |
2797 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pte_update_defer); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_update_defer) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_update_defer)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(ptep)) : "memory", "cc" ); }); | |
2798 | } | |
2799 | static inline __attribute__((always_inline)) void pmd_update_defer(struct mm_struct *mm, unsigned long addr, | |
2800 | pmd_t *pmdp) | |
2801 | { | |
2802 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pmd_update_defer); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_update_defer) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_update_defer)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(pmdp)) : "memory", "cc" ); }); | |
2803 | } | |
2804 | static inline __attribute__((always_inline)) pte_t __pte(pteval_t val) | |
2805 | { | |
2806 | pteval_t ret; | |
2807 | if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(long)))) ? !!((sizeof(pteval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 460, }; ______r = !!((sizeof(pteval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; })) | |
2808 | ret = ({ pteval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.make_pte.func); if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(unsigned long)))) ? !!((sizeof(pteval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
2809 | "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h" | |
2810 | , .line = | |
2811 | 463 | |
2812 | , }; ______r = !!((sizeof(pteval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pte.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pte.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pte.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pte.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pteval_t)__eax; } __ret; }) | |
2813 | ; | |
2814 | else | |
2815 | ret = ({ pteval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.make_pte.func); if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(unsigned long)))) ? !!((sizeof(pteval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
2816 | "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h" | |
2817 | , .line = | |
2818 | 467 | |
2819 | , }; ______r = !!((sizeof(pteval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pte.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pte.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pte.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pte.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pteval_t)__eax; } __ret; }) | |
2820 | ; | |
2821 | return (pte_t) { .pte = ret }; | |
2822 | } | |
2823 | static inline __attribute__((always_inline)) pteval_t pte_val(pte_t pte) | |
2824 | { | |
2825 | pteval_t ret; | |
2826 | if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(long)))) ? !!((sizeof(pteval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 476, }; ______r = !!((sizeof(pteval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; })) | |
2827 | ret = ({ pteval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pte_val.func); if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(unsigned long)))) ? !!((sizeof(pteval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
2828 | "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h" | |
2829 | , .line = | |
2830 | 478 | |
2831 | , }; ______r = !!((sizeof(pteval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pte.pte)), "d" ((unsigned long)((u64)pte.pte >> 32)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pte.pte)), "d" ((unsigned long)((u64)pte.pte >> 32)) : "memory", "cc" ); __ret = (pteval_t)__eax; } __ret; }) | |
2832 | ; | |
2833 | else | |
2834 | ret = ({ pteval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pte_val.func); if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(unsigned long)))) ? !!((sizeof(pteval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
2835 | "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h" | |
2836 | , .line = | |
2837 | 481 | |
2838 | , }; ______r = !!((sizeof(pteval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pte.pte)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pte.pte)) : "memory", "cc" ); __ret = (pteval_t)__eax; } __ret; }) | |
2839 | ; | |
2840 | return ret; | |
2841 | } | |
2842 | static inline __attribute__((always_inline)) pgd_t __pgd(pgdval_t val) | |
2843 | { | |
2844 | pgdval_t ret; | |
2845 | if (__builtin_constant_p(((sizeof(pgdval_t) > sizeof(long)))) ? !!((sizeof(pgdval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 490, }; ______r = !!((sizeof(pgdval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; })) | |
2846 | ret = ({ pgdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.make_pgd.func); if (__builtin_constant_p(((sizeof(pgdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pgdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
2847 | "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h" | |
2848 | , .line = | |
2849 | 492 | |
2850 | , }; ______r = !!((sizeof(pgdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pgd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pgd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pgdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pgd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pgd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pgdval_t)__eax; } __ret; }) | |
2851 | ; | |
2852 | else | |
2853 | ret = ({ pgdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.make_pgd.func); if (__builtin_constant_p(((sizeof(pgdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pgdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
2854 | "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h" | |
2855 | , .line = | |
2856 | 495 | |
2857 | , }; ______r = !!((sizeof(pgdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pgd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pgd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pgdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pgd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pgd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pgdval_t)__eax; } __ret; }) | |
2858 | ; | |
2859 | return (pgd_t) { ret }; | |
2860 | } | |
2861 | static inline __attribute__((always_inline)) pgdval_t pgd_val(pgd_t pgd) | |
2862 | { | |
2863 | pgdval_t ret; | |
2864 | if (__builtin_constant_p(((sizeof(pgdval_t) > sizeof(long)))) ? !!((sizeof(pgdval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 504, }; ______r = !!((sizeof(pgdval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; })) | |
2865 | ret = ({ pgdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pgd_val.func); if (__builtin_constant_p(((sizeof(pgdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pgdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
2866 | "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h" | |
2867 | , .line = | |
2868 | 506 | |
2869 | , }; ______r = !!((sizeof(pgdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pgd.pgd)), "d" ((unsigned long)((u64)pgd.pgd >> 32)) : "memory", "cc" ); __ret = (pgdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pgd.pgd)), "d" ((unsigned long)((u64)pgd.pgd >> 32)) : "memory", "cc" ); __ret = (pgdval_t)__eax; } __ret; }) | |
2870 | ; | |
2871 | else | |
2872 | ret = ({ pgdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pgd_val.func); if (__builtin_constant_p(((sizeof(pgdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pgdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
2873 | "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h" | |
2874 | , .line = | |
2875 | 509 | |
2876 | , }; ______r = !!((sizeof(pgdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pgd.pgd)) : "memory", "cc" ); __ret = (pgdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pgd.pgd)) : "memory", "cc" ); __ret = (pgdval_t)__eax; } __ret; }) | |
2877 | ; | |
2878 | return ret; | |
2879 | } | |
2880 | static inline __attribute__((always_inline)) pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, | |
2881 | pte_t *ptep) | |
2882 | { | |
2883 | pteval_t ret; | |
2884 | ret = ({ pteval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.ptep_modify_prot_start); if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(unsigned long)))) ? !!((sizeof(pteval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
2885 | "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h" | |
2886 | , .line = | |
2887 | 521 | |
2888 | , }; ______r = !!((sizeof(pteval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.ptep_modify_prot_start) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.ptep_modify_prot_start)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(ptep)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.ptep_modify_prot_start) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.ptep_modify_prot_start)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(ptep)) : "memory", "cc" ); __ret = (pteval_t)__eax; } __ret; }) | |
2889 | ; | |
2890 | return (pte_t) { .pte = ret }; | |
2891 | } | |
2892 | static inline __attribute__((always_inline)) void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | |
2893 | pte_t *ptep, pte_t pte) | |
2894 | { | |
2895 | if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(long)))) ? !!((sizeof(pteval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 529, }; ______r = !!((sizeof(pteval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; })) | |
2896 | pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte); | |
2897 | else | |
2898 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.ptep_modify_prot_commit); asm volatile("push %[_arg4];" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "lea 4(%%esp),%%esp;" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.ptep_modify_prot_commit) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.ptep_modify_prot_commit)), [paravirt_clobber] "i" (((1 << 4) - 1)), "0" ((u32)(mm)), "1" ((u32)(addr)), "2" ((u32)(ptep)), [_arg4] "mr" ((u32)(pte.pte)) : "memory", "cc" ); }) | |
2899 | ; | |
2900 | } | |
2901 | static inline __attribute__((always_inline)) void set_pte(pte_t *ptep, pte_t pte) | |
2902 | { | |
2903 | if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(long)))) ? !!((sizeof(pteval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 539, }; ______r = !!((sizeof(pteval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; })) | |
2904 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pte); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pte) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pte)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(ptep)), "d" ((unsigned long)(pte.pte)), "c" ((unsigned long)((u64)pte.pte >> 32)) : "memory", "cc" ); }) | |
2905 | ; | |
2906 | else | |
2907 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pte); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pte) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pte)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(ptep)), "d" ((unsigned long)(pte.pte)) : "memory", "cc" ); }) | |
2908 | ; | |
2909 | } | |
2910 | static inline __attribute__((always_inline)) void set_pte_at(struct mm_struct *mm, unsigned long addr, | |
2911 | pte_t *ptep, pte_t pte) | |
2912 | { | |
2913 | if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(long)))) ? !!((sizeof(pteval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 550, }; ______r = !!((sizeof(pteval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; })) | |
2914 | pv_mmu_ops.set_pte_at(mm, addr, ptep, pte); | |
2915 | else | |
2916 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pte_at); asm volatile("push %[_arg4];" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "lea 4(%%esp),%%esp;" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pte_at) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pte_at)), [paravirt_clobber] "i" (((1 << 4) - 1)), "0" ((u32)(mm)), "1" ((u32)(addr)), "2" ((u32)(ptep)), [_arg4] "mr" ((u32)(pte.pte)) : "memory", "cc" ); }); | |
2917 | } | |
2918 | static inline __attribute__((always_inline)) void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |
2919 | pmd_t *pmdp, pmd_t pmd) | |
2920 | { | |
2921 | if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(long)))) ? !!((sizeof(pmdval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 561, }; ______r = !!((sizeof(pmdval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; })) | |
2922 | pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd); | |
2923 | else | |
2924 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pmd_at); asm volatile("push %[_arg4];" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "lea 4(%%esp),%%esp;" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pmd_at) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pmd_at)), [paravirt_clobber] "i" (((1 << 4) - 1)), "0" ((u32)(mm)), "1" ((u32)(addr)), "2" ((u32)(pmdp)), [_arg4] "mr" ((u32)(native_pmd_val(pmd))) : "memory", "cc" ); }) | |
2925 | ; | |
2926 | } | |
2927 | static inline __attribute__((always_inline)) void set_pmd(pmd_t *pmdp, pmd_t pmd) | |
2928 | { | |
2929 | pmdval_t val = native_pmd_val(pmd); | |
2930 | if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(long)))) ? !!((sizeof(pmdval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 574, }; ______r = !!((sizeof(pmdval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; })) | |
2931 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pmd); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pmd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pmd)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pmdp)), "d" ((unsigned long)(val)), "c" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); }); | |
2932 | else | |
2933 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pmd); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pmd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pmd)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pmdp)), "d" ((unsigned long)(val)) : "memory", "cc" ); }); | |
2934 | } | |
2935 | static inline __attribute__((always_inline)) pmd_t __pmd(pmdval_t val) | |
2936 | { | |
2937 | pmdval_t ret; | |
2938 | if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(long)))) ? !!((sizeof(pmdval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 585, }; ______r = !!((sizeof(pmdval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; })) | |
2939 | ret = ({ pmdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.make_pmd.func); if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pmdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
2940 | "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h" | |
2941 | , .line = | |
2942 | 587 | |
2943 | , }; ______r = !!((sizeof(pmdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pmd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pmd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pmdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pmd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pmd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pmdval_t)__eax; } __ret; }) | |
2944 | ; | |
2945 | else | |
2946 | ret = ({ pmdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.make_pmd.func); if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pmdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
2947 | "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h" | |
2948 | , .line = | |
2949 | 590 | |
2950 | , }; ______r = !!((sizeof(pmdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pmd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pmd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pmdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pmd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pmd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pmdval_t)__eax; } __ret; }) | |
2951 | ; | |
2952 | return (pmd_t) { ret }; | |
2953 | } | |
2954 | static inline __attribute__((always_inline)) pmdval_t pmd_val(pmd_t pmd) | |
2955 | { | |
2956 | pmdval_t ret; | |
2957 | if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(long)))) ? !!((sizeof(pmdval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 599, }; ______r = !!((sizeof(pmdval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; })) | |
2958 | ret = ({ pmdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pmd_val.func); if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pmdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
2959 | "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h" | |
2960 | , .line = | |
2961 | 601 | |
2962 | , }; ______r = !!((sizeof(pmdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pmd.pmd)), "d" ((unsigned long)((u64)pmd.pmd >> 32)) : "memory", "cc" ); __ret = (pmdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pmd.pmd)), "d" ((unsigned long)((u64)pmd.pmd >> 32)) : "memory", "cc" ); __ret = (pmdval_t)__eax; } __ret; }) | |
2963 | ; | |
2964 | else | |
2965 | ret = ({ pmdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pmd_val.func); if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pmdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
2966 | "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h" | |
2967 | , .line = | |
2968 | 604 | |
2969 | , }; ______r = !!((sizeof(pmdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pmd.pmd)) : "memory", "cc" ); __ret = (pmdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pmd.pmd)) : "memory", "cc" ); __ret = (pmdval_t)__eax; } __ret; }) | |
2970 | ; | |
2971 | return ret; | |
2972 | } | |
2973 | static inline __attribute__((always_inline)) void set_pud(pud_t *pudp, pud_t pud) | |
2974 | { | |
2975 | pudval_t val = native_pud_val(pud); | |
2976 | if (__builtin_constant_p(((sizeof(pudval_t) > sizeof(long)))) ? !!((sizeof(pudval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 613, }; ______r = !!((sizeof(pudval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; })) | |
2977 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pud); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pud) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pud)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pudp)), "d" ((unsigned long)(val)), "c" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); }) | |
2978 | ; | |
2979 | else | |
2980 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pud); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pud) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pud)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pudp)), "d" ((unsigned long)(val)) : "memory", "cc" ); }) | |
2981 | ; | |
2982 | } | |
2983 | static inline __attribute__((always_inline)) void set_pte_atomic(pte_t *ptep, pte_t pte) | |
2984 | { | |
2985 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pte_atomic); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pte_atomic) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pte_atomic)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(ptep)), "d" ((unsigned long)(pte.pte)), "c" ((unsigned long)(pte.pte >> 32)) : "memory", "cc" ); }) | |
2986 | ; | |
2987 | } | |
2988 | static inline __attribute__((always_inline)) void pte_clear(struct mm_struct *mm, unsigned long addr, | |
2989 | pte_t *ptep) | |
2990 | { | |
2991 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pte_clear); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_clear) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_clear)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(ptep)) : "memory", "cc" ); }); | |
2992 | } | |
2993 | static inline __attribute__((always_inline)) void pmd_clear(pmd_t *pmdp) | |
2994 | { | |
2995 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pmd_clear); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_clear) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_clear)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pmdp)) : "memory", "cc" ); }); | |
2996 | } | |
2997 | static inline __attribute__((always_inline)) void arch_start_context_switch(struct task_struct *prev) | |
2998 | { | |
2999 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.start_context_switch); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.start_context_switch) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.start_context_switch)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(prev)) : "memory", "cc" ); }); | |
3000 | } | |
3001 | static inline __attribute__((always_inline)) void arch_end_context_switch(struct task_struct *next) | |
3002 | { | |
3003 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.end_context_switch); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.end_context_switch) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.end_context_switch)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(next)) : "memory", "cc" ); }); | |
3004 | } | |
3005 | static inline __attribute__((always_inline)) void arch_enter_lazy_mmu_mode(void) | |
3006 | { | |
3007 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.lazy_mode.enter); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.lazy_mode.enter) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.lazy_mode.enter)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); }); | |
3008 | } | |
3009 | static inline __attribute__((always_inline)) void arch_leave_lazy_mmu_mode(void) | |
3010 | { | |
3011 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.lazy_mode.leave); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.lazy_mode.leave) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.lazy_mode.leave)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); }); | |
3012 | } | |
3013 | void arch_flush_lazy_mmu_mode(void); | |
3014 | static inline __attribute__((always_inline)) void __set_fixmap(unsigned idx, | |
3015 | phys_addr_t phys, pgprot_t flags) | |
3016 | { | |
3017 | pv_mmu_ops.set_fixmap(idx, phys, flags); | |
3018 | } | |
3019 | static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) unsigned long arch_local_save_flags(void) | |
3020 | { | |
3021 | return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_irq_ops.save_fl.func); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 853, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.save_fl.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.save_fl.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.save_fl.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.save_fl.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; }); | |
3022 | } | |
3023 | static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void arch_local_irq_restore(unsigned long f) | |
3024 | { | |
3025 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_irq_ops.restore_fl.func); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.restore_fl.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.restore_fl.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(f)) : "memory", "cc" ); }); | |
3026 | } | |
3027 | static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void arch_local_irq_disable(void) | |
3028 | { | |
3029 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_irq_ops.irq_disable.func); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.irq_disable.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.irq_disable.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))) : "memory", "cc" ); }); | |
3030 | } | |
3031 | static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void arch_local_irq_enable(void) | |
3032 | { | |
3033 | ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_irq_ops.irq_enable.func); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.irq_enable.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.irq_enable.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))) : "memory", "cc" ); }); | |
3034 | } | |
3035 | static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) unsigned long arch_local_irq_save(void) | |
3036 | { | |
3037 | unsigned long f; | |
3038 | f = arch_local_save_flags(); | |
3039 | arch_local_irq_disable(); | |
3040 | return f; | |
3041 | } | |
3042 | extern void default_banner(void); | |
3043 | static inline __attribute__((always_inline)) int arch_irqs_disabled_flags(unsigned long flags) | |
3044 | { | |
3045 | return !(flags & 0x00000200); | |
3046 | } | |
3047 | static inline __attribute__((always_inline)) int arch_irqs_disabled(void) | |
3048 | { | |
3049 | unsigned long flags = arch_local_save_flags(); | |
3050 | return arch_irqs_disabled_flags(flags); | |
3051 | } | |
3052 | extern void trace_softirqs_on(unsigned long ip); | |
3053 | extern void trace_softirqs_off(unsigned long ip); | |
3054 | extern void trace_hardirqs_on(void); | |
3055 | extern void trace_hardirqs_off(void); | |
3056 | extern void stop_critical_timings(void); | |
3057 | extern void start_critical_timings(void); | |
3058 | struct task_struct; | |
3059 | struct task_struct *__switch_to(struct task_struct *prev, | |
3060 | struct task_struct *next); | |
3061 | struct tss_struct; | |
3062 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |
3063 | struct tss_struct *tss); | |
3064 | extern void show_regs_common(void); | |
3065 | extern void native_load_gs_index(unsigned); | |
3066 | static inline __attribute__((always_inline)) unsigned long get_limit(unsigned long segment) | |
3067 | { | |
3068 | unsigned long __limit; | |
3069 | asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); | |
3070 | return __limit + 1; | |
3071 | } | |
3072 | static inline __attribute__((always_inline)) void native_clts(void) | |
3073 | { | |
3074 | asm volatile("clts"); | |
3075 | } | |
3076 | static unsigned long __force_order; | |
3077 | static inline __attribute__((always_inline)) unsigned long native_read_cr0(void) | |
3078 | { | |
3079 | unsigned long val; | |
3080 | asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); | |
3081 | return val; | |
3082 | } | |
3083 | static inline __attribute__((always_inline)) void native_write_cr0(unsigned long val) | |
3084 | { | |
3085 | asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order)); | |
3086 | } | |
3087 | static inline __attribute__((always_inline)) unsigned long native_read_cr2(void) | |
3088 | { | |
3089 | unsigned long val; | |
3090 | asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); | |
3091 | return val; | |
3092 | } | |
3093 | static inline __attribute__((always_inline)) void native_write_cr2(unsigned long val) | |
3094 | { | |
3095 | asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order)); | |
3096 | } | |
3097 | static inline __attribute__((always_inline)) unsigned long native_read_cr3(void) | |
3098 | { | |
3099 | unsigned long val; | |
3100 | asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); | |
3101 | return val; | |
3102 | } | |
3103 | static inline __attribute__((always_inline)) void native_write_cr3(unsigned long val) | |
3104 | { | |
3105 | asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order)); | |
3106 | } | |
3107 | static inline __attribute__((always_inline)) unsigned long native_read_cr4(void) | |
3108 | { | |
3109 | unsigned long val; | |
3110 | asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); | |
3111 | return val; | |
3112 | } | |
3113 | static inline __attribute__((always_inline)) unsigned long native_read_cr4_safe(void) | |
3114 | { | |
3115 | unsigned long val; | |
3116 | asm volatile("1: mov %%cr4, %0\n" | |
3117 | "2:\n" | |
3118 | " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "2b" "\n" " .previous\n" | |
3119 | : "=r" (val), "=m" (__force_order) : "0" (0)); | |
3120 | return val; | |
3121 | } | |
3122 | static inline __attribute__((always_inline)) void native_write_cr4(unsigned long val) | |
3123 | { | |
3124 | asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order)); | |
3125 | } | |
3126 | static inline __attribute__((always_inline)) void native_wbinvd(void) | |
3127 | { | |
3128 | asm volatile("wbinvd": : :"memory"); | |
3129 | } | |
3130 | static inline __attribute__((always_inline)) void clflush(volatile void *__p) | |
3131 | { | |
3132 | asm volatile("clflush %0" : "+m" (*(volatile char *)__p)); | |
3133 | } | |
3134 | void disable_hlt(void); | |
3135 | void enable_hlt(void); | |
3136 | void cpu_idle_wait(void); | |
3137 | extern unsigned long arch_align_stack(unsigned long sp); | |
3138 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | |
3139 | void default_idle(void); | |
3140 | void stop_this_cpu(void *dummy); | |
3141 | static inline __attribute__((always_inline)) __attribute__((always_inline)) void rdtsc_barrier(void) | |
3142 | { | |
3143 | asm volatile ("661:\n\t" ".byte " "0x8d,0x76,0x00" "\n" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(3*32+17)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "mfence" "\n664:\n" ".previous" : : : "memory"); | |
3144 | asm volatile ("661:\n\t" ".byte " "0x8d,0x76,0x00" "\n" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(3*32+18)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "lfence" "\n664:\n" ".previous" : : : "memory"); | |
3145 | } | |
3146 | extern unsigned int __invalid_size_argument_for_IOC; | |
3147 | extern cpumask_var_t cpu_callin_mask; | |
3148 | extern cpumask_var_t cpu_callout_mask; | |
3149 | extern cpumask_var_t cpu_initialized_mask; | |
3150 | extern cpumask_var_t cpu_sibling_setup_mask; | |
3151 | extern void setup_cpu_local_masks(void); | |
3152 | struct msr { | |
3153 | union { | |
3154 | struct { | |
3155 | u32 l; | |
3156 | u32 h; | |
3157 | }; | |
3158 | u64 q; | |
3159 | }; | |
3160 | }; | |
3161 | struct msr_info { | |
3162 | u32 msr_no; | |
3163 | struct msr reg; | |
3164 | struct msr *msrs; | |
3165 | int err; | |
3166 | }; | |
3167 | struct msr_regs_info { | |
3168 | u32 *regs; | |
3169 | int err; | |
3170 | }; | |
3171 | static inline __attribute__((always_inline)) unsigned long long native_read_tscp(unsigned int *aux) | |
3172 | { | |
3173 | unsigned long low, high; | |
3174 | asm volatile(".byte 0x0f,0x01,0xf9" | |
3175 | : "=a" (low), "=d" (high), "=c" (*aux)); | |
3176 | return low | ((u64)high << 32); | |
3177 | } | |
3178 | static inline __attribute__((always_inline)) unsigned long long native_read_msr(unsigned int msr) | |
3179 | { | |
3180 | unsigned long long val; | |
3181 | asm volatile("rdmsr" : "=A" (val) : "c" (msr)); | |
3182 | return (val); | |
3183 | } | |
3184 | static inline __attribute__((always_inline)) unsigned long long native_read_msr_safe(unsigned int msr, | |
3185 | int *err) | |
3186 | { | |
3187 | unsigned long long val; | |
3188 | asm volatile("2: rdmsr ; xor %[err],%[err]\n" | |
3189 | "1:\n\t" | |
3190 | ".section .fixup,\"ax\"\n\t" | |
3191 | "3: mov %[fault],%[err] ; jmp 1b\n\t" | |
3192 | ".previous\n\t" | |
3193 | " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "2b" "," "3b" "\n" " .previous\n" | |
3194 | : [err] "=r" (*err), "=A" (val) | |
3195 | : "c" (msr), [fault] "i" (-5)); | |
3196 | return (val); | |
3197 | } | |
3198 | static inline __attribute__((always_inline)) void native_write_msr(unsigned int msr, | |
3199 | unsigned low, unsigned high) | |
3200 | { | |
3201 | asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory"); | |
3202 | } | |
3203 | __attribute__((no_instrument_function)) static inline __attribute__((always_inline)) int native_write_msr_safe(unsigned int msr, | |
3204 | unsigned low, unsigned high) | |
3205 | { | |
3206 | int err; | |
3207 | asm volatile("2: wrmsr ; xor %[err],%[err]\n" | |
3208 | "1:\n\t" | |
3209 | ".section .fixup,\"ax\"\n\t" | |
3210 | "3: mov %[fault],%[err] ; jmp 1b\n\t" | |
3211 | ".previous\n\t" | |
3212 | " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "2b" "," "3b" "\n" " .previous\n" | |
3213 | : [err] "=a" (err) | |
3214 | : "c" (msr), "0" (low), "d" (high), | |
3215 | [fault] "i" (-5) | |
3216 | : "memory"); | |
3217 | return err; | |
3218 | } | |
3219 | extern unsigned long long native_read_tsc(void); | |
3220 | extern int native_rdmsr_safe_regs(u32 regs[8]); | |
3221 | extern int native_wrmsr_safe_regs(u32 regs[8]); | |
3222 | static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long long __native_read_tsc(void) | |
3223 | { | |
3224 | unsigned long long val; | |
3225 | asm volatile("rdtsc" : "=A" (val)); | |
3226 | return (val); | |
3227 | } | |
3228 | static inline __attribute__((always_inline)) unsigned long long native_read_pmc(int counter) | |
3229 | { | |
3230 | unsigned long long val; | |
3231 | asm volatile("rdpmc" : "=A" (val) : "c" (counter)); | |
3232 | return (val); | |
3233 | } | |
3234 | struct msr *msrs_alloc(void); | |
3235 | void msrs_free(struct msr *msrs); | |
3236 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | |
3237 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | |
3238 | void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); | |
3239 | void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); | |
3240 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | |
3241 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | |
3242 | int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); | |
3243 | int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); | |
3244 | struct exec_domain; | |
3245 | struct pt_regs; | |
3246 | extern int register_exec_domain(struct exec_domain *); | |
3247 | extern int unregister_exec_domain(struct exec_domain *); | |
3248 | extern int __set_personality(unsigned int); | |
3249 | enum { | |
3250 | UNAME26 = 0x0020000, | |
3251 | ADDR_NO_RANDOMIZE = 0x0040000, | |
3252 | FDPIC_FUNCPTRS = 0x0080000, | |
3253 | MMAP_PAGE_ZERO = 0x0100000, | |
3254 | ADDR_COMPAT_LAYOUT = 0x0200000, | |
3255 | READ_IMPLIES_EXEC = 0x0400000, | |
3256 | ADDR_LIMIT_32BIT = 0x0800000, | |
3257 | SHORT_INODE = 0x1000000, | |
3258 | WHOLE_SECONDS = 0x2000000, | |
3259 | STICKY_TIMEOUTS = 0x4000000, | |
3260 | ADDR_LIMIT_3GB = 0x8000000, | |
3261 | }; | |
3262 | enum { | |
3263 | PER_LINUX = 0x0000, | |
3264 | PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT, | |
3265 | PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS, | |
3266 | PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, | |
3267 | PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE, | |
3268 | PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | | |
3269 | WHOLE_SECONDS | SHORT_INODE, | |
3270 | PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS, | |
3271 | PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE, | |
3272 | PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS, | |
3273 | PER_BSD = 0x0006, | |
3274 | PER_SUNOS = 0x0006 | STICKY_TIMEOUTS, | |
3275 | PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE, | |
3276 | PER_LINUX32 = 0x0008, | |
3277 | PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB, | |
3278 | PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS, | |
3279 | PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS, | |
3280 | PER_IRIX64 = 0x000b | STICKY_TIMEOUTS, | |
3281 | PER_RISCOS = 0x000c, | |
3282 | PER_SOLARIS = 0x000d | STICKY_TIMEOUTS, | |
3283 | PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, | |
3284 | PER_OSF4 = 0x000f, | |
3285 | PER_HPUX = 0x0010, | |
3286 | PER_MASK = 0x00ff, | |
3287 | }; | |
3288 | typedef void (*handler_t)(int, struct pt_regs *); | |
3289 | struct exec_domain { | |
3290 | const char *name; | |
3291 | handler_t handler; | |
3292 | unsigned char pers_low; | |
3293 | unsigned char pers_high; | |
3294 | unsigned long *signal_map; | |
3295 | unsigned long *signal_invmap; | |
3296 | struct map_segment *err_map; | |
3297 | struct map_segment *socktype_map; | |
3298 | struct map_segment *sockopt_map; | |
3299 | struct map_segment *af_map; | |
3300 | struct module *module; | |
3301 | struct exec_domain *next; | |
3302 | }; | |
3303 | extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); | |
3304 | extern u64 div64_u64(u64 dividend, u64 divisor); | |
3305 | extern s64 div64_s64(s64 dividend, s64 divisor); | |
3306 | static inline __attribute__((always_inline)) u64 div_u64(u64 dividend, u32 divisor) | |
3307 | { | |
3308 | u32 remainder; | |
3309 | return div_u64_rem(dividend, divisor, &remainder); | |
3310 | } | |
3311 | static inline __attribute__((always_inline)) s64 div_s64(s64 dividend, s32 divisor) | |
3312 | { | |
3313 | s32 remainder; | |
3314 | return div_s64_rem(dividend, divisor, &remainder); | |
3315 | } | |
3316 | u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); | |
3317 | static inline __attribute__((always_inline)) __attribute__((always_inline)) u32 | |
3318 | __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) | |
3319 | { | |
3320 | u32 ret = 0; | |
3321 | while (dividend >= divisor) { | |
3322 | asm("" : "+rm"(dividend)); | |
3323 | dividend -= divisor; | |
3324 | ret++; | |
3325 | } | |
3326 | *remainder = dividend; | |
3327 | return ret; | |
3328 | } | |
3329 | static inline __attribute__((always_inline)) void * __attribute__((warn_unused_result)) ERR_PTR(long error) | |
3330 | { | |
3331 | return (void *) error; | |
3332 | } | |
3333 | static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) PTR_ERR(const void *ptr) | |
3334 | { | |
3335 | return (long) ptr; | |
3336 | } | |
3337 | static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) IS_ERR(const void *ptr) | |
3338 | { | |
3339 | return (__builtin_constant_p(((unsigned long)ptr) >= (unsigned long)-4095) ? !!(((unsigned long)ptr) >= (unsigned long)-4095) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/err.h", .line = 34, }; ______r = __builtin_expect(!!(((unsigned long)ptr) >= (unsigned long)-4095), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); | |
3340 | } | |
3341 | static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) IS_ERR_OR_NULL(const void *ptr) | |
3342 | { | |
3343 | return !ptr || (__builtin_constant_p(((unsigned long)ptr) >= (unsigned long)-4095) ? !!(((unsigned long)ptr) >= (unsigned long)-4095) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/err.h", .line = 39, }; ______r = __builtin_expect(!!(((unsigned long)ptr) >= (unsigned long)-4095), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); | |
3344 | } | |
3345 | static inline __attribute__((always_inline)) void * __attribute__((warn_unused_result)) ERR_CAST(const void *ptr) | |
3346 | { | |
3347 | return (void *) ptr; | |
3348 | } | |
3349 | static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) PTR_RET(const void *ptr) | |
3350 | { | |
3351 | if (__builtin_constant_p(((IS_ERR(ptr)))) ? !!((IS_ERR(ptr))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/err.h", .line = 57, }; ______r = !!((IS_ERR(ptr))); ______f.miss_hit[______r]++; ______r; })) | |
3352 | return PTR_ERR(ptr); | |
3353 | else | |
3354 | return 0; | |
3355 | } | |
3356 | static inline __attribute__((always_inline)) void *current_text_addr(void) | |
3357 | { | |
3358 | void *pc; | |
3359 | asm volatile("mov $1f, %0; 1:":"=r" (pc)); | |
3360 | return pc; | |
3361 | } | |
3362 | struct cpuinfo_x86 { | |
3363 | __u8 x86; | |
3364 | __u8 x86_vendor; | |
3365 | __u8 x86_model; | |
3366 | __u8 x86_mask; | |
3367 | char wp_works_ok; | |
3368 | char hlt_works_ok; | |
3369 | char hard_math; | |
3370 | char rfu; | |
3371 | char fdiv_bug; | |
3372 | char f00f_bug; | |
3373 | char coma_bug; | |
3374 | char pad0; | |
3375 | __u8 x86_virt_bits; | |
3376 | __u8 x86_phys_bits; | |
3377 | __u8 x86_coreid_bits; | |
3378 | __u32 extended_cpuid_level; | |
3379 | int cpuid_level; | |
3380 | __u32 x86_capability[10]; | |
3381 | char x86_vendor_id[16]; | |
3382 | char x86_model_id[64]; | |
3383 | int x86_cache_size; | |
3384 | int x86_cache_alignment; | |
3385 | int x86_power; | |
3386 | unsigned long loops_per_jiffy; | |
3387 | u16 x86_max_cores; | |
3388 | u16 apicid; | |
3389 | u16 initial_apicid; | |
3390 | u16 x86_clflush_size; | |
3391 | u16 booted_cores; | |
3392 | u16 phys_proc_id; | |
3393 | u16 cpu_core_id; | |
3394 | u8 compute_unit_id; | |
3395 | u16 cpu_index; | |
3396 | } __attribute__((__aligned__((1 << (6))))); | |
3397 | extern struct cpuinfo_x86 boot_cpu_data; | |
3398 | extern struct cpuinfo_x86 new_cpu_data; | |
3399 | extern struct tss_struct doublefault_tss; | |
3400 | extern __u32 cpu_caps_cleared[10]; | |
3401 | extern __u32 cpu_caps_set[10]; | |
3402 | extern __attribute__((section(".data..percpu" ""))) __typeof__(struct cpuinfo_x86) cpu_info __attribute__((__aligned__((1 << (6))))); | |
3403 | extern const struct seq_operations cpuinfo_op; | |
3404 | static inline __attribute__((always_inline)) int hlt_works(int cpu) | |
3405 | { | |
3406 | return (*({ do { const void *__vpp_verify = (typeof((&(cpu_info))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(cpu_info))) *)(&(cpu_info)))); (typeof((typeof(*(&(cpu_info))) *)(&(cpu_info)))) (__ptr + (((__per_cpu_offset[cpu])))); }); })).hlt_works_ok; | |
3407 | } | |
3408 | extern void cpu_detect(struct cpuinfo_x86 *c); | |
3409 | extern struct pt_regs *idle_regs(struct pt_regs *); | |
3410 | extern void early_cpu_init(void); | |
3411 | extern void identify_boot_cpu(void); | |
3412 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); | |
3413 | extern void print_cpu_info(struct cpuinfo_x86 *); | |
3414 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | |
3415 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | |
3416 | extern unsigned short num_cache_leaves; | |
3417 | extern void detect_extended_topology(struct cpuinfo_x86 *c); | |
3418 | extern void detect_ht(struct cpuinfo_x86 *c); | |
3419 | static inline __attribute__((always_inline)) void native_cpuid(unsigned int *eax, unsigned int *ebx, | |
3420 | unsigned int *ecx, unsigned int *edx) | |
3421 | { | |
3422 | asm volatile("cpuid" | |
3423 | : "=a" (*eax), | |
3424 | "=b" (*ebx), | |
3425 | "=c" (*ecx), | |
3426 | "=d" (*edx) | |
3427 | : "0" (*eax), "2" (*ecx)); | |
3428 | } | |
3429 | static inline __attribute__((always_inline)) void load_cr3(pgd_t *pgdir) | |
3430 | { | |
3431 | write_cr3((((unsigned long)(pgdir)) - ((unsigned long)(0xC0000000UL)))); | |
3432 | } | |
3433 | struct x86_hw_tss { | |
3434 | unsigned short back_link, __blh; | |
3435 | unsigned long sp0; | |
3436 | unsigned short ss0, __ss0h; | |
3437 | unsigned long sp1; | |
3438 | unsigned short ss1, __ss1h; | |
3439 | unsigned long sp2; | |
3440 | unsigned short ss2, __ss2h; | |
3441 | unsigned long __cr3; | |
3442 | unsigned long ip; | |
3443 | unsigned long flags; | |
3444 | unsigned long ax; | |
3445 | unsigned long cx; | |
3446 | unsigned long dx; | |
3447 | unsigned long bx; | |
3448 | unsigned long sp; | |
3449 | unsigned long bp; | |
3450 | unsigned long si; | |
3451 | unsigned long di; | |
3452 | unsigned short es, __esh; | |
3453 | unsigned short cs, __csh; | |
3454 | unsigned short ss, __ssh; | |
3455 | unsigned short ds, __dsh; | |
3456 | unsigned short fs, __fsh; | |
3457 | unsigned short gs, __gsh; | |
3458 | unsigned short ldt, __ldth; | |
3459 | unsigned short trace; | |
3460 | unsigned short io_bitmap_base; | |
3461 | } __attribute__((packed)); | |
3462 | struct tss_struct { | |
3463 | struct x86_hw_tss x86_tss; | |
3464 | unsigned long io_bitmap[((65536/8)/sizeof(long)) + 1]; | |
3465 | unsigned long stack[64]; | |
3466 | } __attribute__((__aligned__((1 << (6))))); | |
3467 | extern __attribute__((section(".data..percpu" ""))) __typeof__(struct tss_struct) init_tss __attribute__((__aligned__((1 << (6))))); | |
3468 | struct orig_ist { | |
3469 | unsigned long ist[7]; | |
3470 | }; | |
3471 | struct i387_fsave_struct { | |
3472 | u32 cwd; | |
3473 | u32 swd; | |
3474 | u32 twd; | |
3475 | u32 fip; | |
3476 | u32 fcs; | |
3477 | u32 foo; | |
3478 | u32 fos; | |
3479 | u32 st_space[20]; | |
3480 | u32 status; | |
3481 | }; | |
3482 | struct i387_fxsave_struct { | |
3483 | u16 cwd; | |
3484 | u16 swd; | |
3485 | u16 twd; | |
3486 | u16 fop; | |
3487 | union { | |
3488 | struct { | |
3489 | u64 rip; | |
3490 | u64 rdp; | |
3491 | }; | |
3492 | struct { | |
3493 | u32 fip; | |
3494 | u32 fcs; | |
3495 | u32 foo; | |
3496 | u32 fos; | |
3497 | }; | |
3498 | }; | |
3499 | u32 mxcsr; | |
3500 | u32 mxcsr_mask; | |
3501 | u32 st_space[32]; | |
3502 | u32 xmm_space[64]; | |
3503 | u32 padding[12]; | |
3504 | union { | |
3505 | u32 padding1[12]; | |
3506 | u32 sw_reserved[12]; | |
3507 | }; | |
3508 | } __attribute__((aligned(16))); | |
3509 | struct i387_soft_struct { | |
3510 | u32 cwd; | |
3511 | u32 swd; | |
3512 | u32 twd; | |
3513 | u32 fip; | |
3514 | u32 fcs; | |
3515 | u32 foo; | |
3516 | u32 fos; | |
3517 | u32 st_space[20]; | |
3518 | u8 ftop; | |
3519 | u8 changed; | |
3520 | u8 lookahead; | |
3521 | u8 no_update; | |
3522 | u8 rm; | |
3523 | u8 alimit; | |
3524 | struct math_emu_info *info; | |
3525 | u32 entry_eip; | |
3526 | }; | |
3527 | struct ymmh_struct { | |
3528 | u32 ymmh_space[64]; | |
3529 | }; | |
3530 | struct xsave_hdr_struct { | |
3531 | u64 xstate_bv; | |
3532 | u64 reserved1[2]; | |
3533 | u64 reserved2[5]; | |
3534 | } __attribute__((packed)); | |
3535 | struct xsave_struct { | |
3536 | struct i387_fxsave_struct i387; | |
3537 | struct xsave_hdr_struct xsave_hdr; | |
3538 | struct ymmh_struct ymmh; | |
3539 | } __attribute__ ((packed, aligned (64))); | |
3540 | union thread_xstate { | |
3541 | struct i387_fsave_struct fsave; | |
3542 | struct i387_fxsave_struct fxsave; | |
3543 | struct i387_soft_struct soft; | |
3544 | struct xsave_struct xsave; | |
3545 | }; | |
3546 | struct fpu { | |
3547 | union thread_xstate *state; | |
3548 | }; | |
3549 | struct stack_canary { | |
3550 | char __pad[20]; | |
3551 | unsigned long canary; | |
3552 | }; | |
3553 | extern __attribute__((section(".data..percpu" ""))) __typeof__(struct stack_canary) stack_canary __attribute__((__aligned__((1 << (6))))); | |
3554 | extern unsigned int xstate_size; | |
3555 | extern void free_thread_xstate(struct task_struct *); | |
3556 | extern struct kmem_cache *task_xstate_cachep; | |
3557 | struct perf_event; | |
3558 | struct thread_struct { | |
3559 | struct desc_struct tls_array[3]; | |
3560 | unsigned long sp0; | |
3561 | unsigned long sp; | |
3562 | unsigned long sysenter_cs; | |
3563 | unsigned long ip; | |
3564 | unsigned long gs; | |
3565 | struct perf_event *ptrace_bps[4]; | |
3566 | unsigned long debugreg6; | |
3567 | unsigned long ptrace_dr7; | |
3568 | unsigned long cr2; | |
3569 | unsigned long trap_no; | |
3570 | unsigned long error_code; | |
3571 | struct fpu fpu; | |
3572 | struct vm86_struct *vm86_info; | |
3573 | unsigned long screen_bitmap; | |
3574 | unsigned long v86flags; | |
3575 | unsigned long v86mask; | |
3576 | unsigned long saved_sp0; | |
3577 | unsigned int saved_fs; | |
3578 | unsigned int saved_gs; | |
3579 | unsigned long *io_bitmap_ptr; | |
3580 | unsigned long iopl; | |
3581 | unsigned io_bitmap_max; | |
3582 | }; | |
3583 | static inline __attribute__((always_inline)) unsigned long native_get_debugreg(int regno) | |
3584 | { | |
3585 | unsigned long val = 0; | |
3586 | switch (regno) { | |
3587 | case 0: | |
3588 | asm("mov %%db0, %0" :"=r" (val)); | |
3589 | break; | |
3590 | case 1: | |
3591 | asm("mov %%db1, %0" :"=r" (val)); | |
3592 | break; | |
3593 | case 2: | |
3594 | asm("mov %%db2, %0" :"=r" (val)); | |
3595 | break; | |
3596 | case 3: | |
3597 | asm("mov %%db3, %0" :"=r" (val)); | |
3598 | break; | |
3599 | case 6: | |
3600 | asm("mov %%db6, %0" :"=r" (val)); | |
3601 | break; | |
3602 | case 7: | |
3603 | asm("mov %%db7, %0" :"=r" (val)); | |
3604 | break; | |
3605 | default: | |
3606 | do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h"), "i" (499), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); | |
3607 | } | |
3608 | return val; | |
3609 | } | |
3610 | static inline __attribute__((always_inline)) void native_set_debugreg(int regno, unsigned long value) | |
3611 | { | |
3612 | switch (regno) { | |
3613 | case 0: | |
3614 | asm("mov %0, %%db0" ::"r" (value)); | |
3615 | break; | |
3616 | case 1: | |
3617 | asm("mov %0, %%db1" ::"r" (value)); | |
3618 | break; | |
3619 | case 2: | |
3620 | asm("mov %0, %%db2" ::"r" (value)); | |
3621 | break; | |
3622 | case 3: | |
3623 | asm("mov %0, %%db3" ::"r" (value)); | |
3624 | break; | |
3625 | case 6: | |
3626 | asm("mov %0, %%db6" ::"r" (value)); | |
3627 | break; | |
3628 | case 7: | |
3629 | asm("mov %0, %%db7" ::"r" (value)); | |
3630 | break; | |
3631 | default: | |
3632 | do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h"), "i" (526), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); | |
3633 | } | |
3634 | } | |
3635 | static inline __attribute__((always_inline)) void native_set_iopl_mask(unsigned mask) | |
3636 | { | |
3637 | unsigned int reg; | |
3638 | asm volatile ("pushfl;" | |
3639 | "popl %0;" | |
3640 | "andl %1, %0;" | |
3641 | "orl %2, %0;" | |
3642 | "pushl %0;" | |
3643 | "popfl" | |
3644 | : "=&r" (reg) | |
3645 | : "i" (~0x00003000), "r" (mask)); | |
3646 | } | |
3647 | static inline __attribute__((always_inline)) void | |
3648 | native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) | |
3649 | { | |
3650 | tss->x86_tss.sp0 = thread->sp0; | |
3651 | if (__builtin_constant_p((((__builtin_constant_p(tss->x86_tss.ss1 != thread->sysenter_cs) ? !!(tss->x86_tss.ss1 != thread->sysenter_cs) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 555, }; ______r = __builtin_expect(!!(tss->x86_tss.ss1 != thread->sysenter_cs), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(tss->x86_tss.ss1 != thread->sysenter_cs) ? !!(tss->x86_tss.ss1 != thread->sysenter_cs) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 555, }; ______r = __builtin_expect(!!(tss->x86_tss.ss1 != thread->sysenter_cs), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 555, }; ______r = !!(((__builtin_constant_p(tss->x86_tss.ss1 != thread->sysenter_cs) ? !!(tss->x86_tss.ss1 != thread->sysenter_cs) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 555, }; ______r = __builtin_expect(!!(tss->x86_tss.ss1 != thread->sysenter_cs), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) { | |
3652 | tss->x86_tss.ss1 = thread->sysenter_cs; | |
3653 | do { paravirt_write_msr(0x00000174, thread->sysenter_cs, 0); } while (0); | |
3654 | } | |
3655 | } | |
3656 | static inline __attribute__((always_inline)) void native_swapgs(void) | |
3657 | { | |
3658 | } | |
3659 | extern unsigned long mmu_cr4_features; | |
3660 | static inline __attribute__((always_inline)) void set_in_cr4(unsigned long mask) | |
3661 | { | |
3662 | unsigned long cr4; | |
3663 | mmu_cr4_features |= mask; | |
3664 | cr4 = read_cr4(); | |
3665 | cr4 |= mask; | |
3666 | write_cr4(cr4); | |
3667 | } | |
3668 | static inline __attribute__((always_inline)) void clear_in_cr4(unsigned long mask) | |
3669 | { | |
3670 | unsigned long cr4; | |
3671 | mmu_cr4_features &= ~mask; | |
3672 | cr4 = read_cr4(); | |
3673 | cr4 &= ~mask; | |
3674 | write_cr4(cr4); | |
3675 | } | |
3676 | typedef struct { | |
3677 | unsigned long seg; | |
3678 | } mm_segment_t; | |
3679 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | |
3680 | extern void release_thread(struct task_struct *); | |
3681 | extern void prepare_to_copy(struct task_struct *tsk); | |
3682 | unsigned long get_wchan(struct task_struct *p); | |
3683 | static inline __attribute__((always_inline)) void cpuid(unsigned int op, | |
3684 | unsigned int *eax, unsigned int *ebx, | |
3685 | unsigned int *ecx, unsigned int *edx) | |
3686 | { | |
3687 | *eax = op; | |
3688 | *ecx = 0; | |
3689 | __cpuid(eax, ebx, ecx, edx); | |
3690 | } | |
3691 | static inline __attribute__((always_inline)) void cpuid_count(unsigned int op, int count, | |
3692 | unsigned int *eax, unsigned int *ebx, | |
3693 | unsigned int *ecx, unsigned int *edx) | |
3694 | { | |
3695 | *eax = op; | |
3696 | *ecx = count; | |
3697 | __cpuid(eax, ebx, ecx, edx); | |
3698 | } | |
3699 | static inline __attribute__((always_inline)) unsigned int cpuid_eax(unsigned int op) | |
3700 | { | |
3701 | unsigned int eax, ebx, ecx, edx; | |
3702 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
3703 | return eax; | |
3704 | } | |
3705 | static inline __attribute__((always_inline)) unsigned int cpuid_ebx(unsigned int op) | |
3706 | { | |
3707 | unsigned int eax, ebx, ecx, edx; | |
3708 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
3709 | return ebx; | |
3710 | } | |
3711 | static inline __attribute__((always_inline)) unsigned int cpuid_ecx(unsigned int op) | |
3712 | { | |
3713 | unsigned int eax, ebx, ecx, edx; | |
3714 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
3715 | return ecx; | |
3716 | } | |
3717 | static inline __attribute__((always_inline)) unsigned int cpuid_edx(unsigned int op) | |
3718 | { | |
3719 | unsigned int eax, ebx, ecx, edx; | |
3720 | cpuid(op, &eax, &ebx, &ecx, &edx); | |
3721 | return edx; | |
3722 | } | |
3723 | static inline __attribute__((always_inline)) void rep_nop(void) | |
3724 | { | |
3725 | asm volatile("rep; nop" ::: "memory"); | |
3726 | } | |
3727 | static inline __attribute__((always_inline)) void cpu_relax(void) | |
3728 | { | |
3729 | rep_nop(); | |
3730 | } | |
3731 | static inline __attribute__((always_inline)) void sync_core(void) | |
3732 | { | |
3733 | int tmp; | |
3734 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) | |
3735 | : "ebx", "ecx", "edx", "memory"); | |
3736 | } | |
3737 | static inline __attribute__((always_inline)) void __monitor(const void *eax, unsigned long ecx, | |
3738 | unsigned long edx) | |
3739 | { | |
3740 | asm volatile(".byte 0x0f, 0x01, 0xc8;" | |
3741 | :: "a" (eax), "c" (ecx), "d"(edx)); | |
3742 | } | |
3743 | static inline __attribute__((always_inline)) void __mwait(unsigned long eax, unsigned long ecx) | |
3744 | { | |
3745 | asm volatile(".byte 0x0f, 0x01, 0xc9;" | |
3746 | :: "a" (eax), "c" (ecx)); | |
3747 | } | |
3748 | static inline __attribute__((always_inline)) void __sti_mwait(unsigned long eax, unsigned long ecx) | |
3749 | { | |
3750 | trace_hardirqs_on(); | |
3751 | asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" | |
3752 | :: "a" (eax), "c" (ecx)); | |
3753 | } | |
3754 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | |
3755 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | |
3756 | extern void init_amd_e400_c1e_mask(void); | |
3757 | extern unsigned long boot_option_idle_override; | |
3758 | extern bool amd_e400_c1e_detected; | |
3759 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, | |
3760 | IDLE_POLL, IDLE_FORCE_MWAIT}; | |
3761 | extern void enable_sep_cpu(void); | |
3762 | extern int sysenter_setup(void); | |
3763 | extern void early_trap_init(void); | |
3764 | extern struct desc_ptr early_gdt_descr; | |
3765 | extern void cpu_set_gdt(int); | |
3766 | extern void switch_to_new_gdt(int); | |
3767 | extern void load_percpu_segment(int); | |
3768 | extern void cpu_init(void); | |
3769 | static inline __attribute__((always_inline)) unsigned long get_debugctlmsr(void) | |
3770 | { | |
3771 | unsigned long debugctlmsr = 0; | |
3772 | do { int _err; debugctlmsr = paravirt_read_msr(0x000001d9, &_err); } while (0); | |
3773 | return debugctlmsr; | |
3774 | } | |
3775 | static inline __attribute__((always_inline)) void update_debugctlmsr(unsigned long debugctlmsr) | |
3776 | { | |
3777 | do { paravirt_write_msr(0x000001d9, (u32)((u64)(debugctlmsr)), ((u64)(debugctlmsr))>>32); } while (0); | |
3778 | } | |
3779 | extern unsigned int machine_id; | |
3780 | extern unsigned int machine_submodel_id; | |
3781 | extern unsigned int BIOS_revision; | |
3782 | extern int bootloader_type; | |
3783 | extern int bootloader_version; | |
3784 | extern char ignore_fpu_irq; | |
3785 | static inline __attribute__((always_inline)) void prefetch(const void *x) | |
3786 | { | |
3787 | asm volatile ("661:\n\t" ".byte " "0x8d,0x74,0x26,0x00" "\n" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(0*32+25)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "prefetchnta (%1)" "\n664:\n" ".previous" : : "i" (0), "r" (x)) | |
3788 | ; | |
3789 | } | |
3790 | static inline __attribute__((always_inline)) void prefetchw(const void *x) | |
3791 | { | |
3792 | asm volatile ("661:\n\t" ".byte " "0x8d,0x74,0x26,0x00" "\n" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(1*32+31)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "prefetchw (%1)" "\n664:\n" ".previous" : : "i" (0), "r" (x)) | |
3793 | ; | |
3794 | } | |
3795 | static inline __attribute__((always_inline)) void spin_lock_prefetch(const void *x) | |
3796 | { | |
3797 | prefetchw(x); | |
3798 | } | |
3799 | extern unsigned long thread_saved_pc(struct task_struct *tsk); | |
3800 | extern void start_thread(struct pt_regs *regs, unsigned long new_ip, | |
3801 | unsigned long new_sp); | |
3802 | extern int get_tsc_mode(unsigned long adr); | |
3803 | extern int set_tsc_mode(unsigned int val); | |
3804 | extern int amd_get_nb_id(int cpu); | |
3805 | struct aperfmperf { | |
3806 | u64 aperf, mperf; | |
3807 | }; | |
3808 | static inline __attribute__((always_inline)) void get_aperfmperf(struct aperfmperf *am) | |
3809 | { | |
3810 | ({ static bool __warned; int __ret_warn_once = !!(!(__builtin_constant_p((3*32+28)) && ( ((((3*32+28))>>5)==0 && (1UL<<(((3*32+28))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((3*32+28))>>5)==1 && (1UL<<(((3*32+28))&31) & (0|0))) || ((((3*32+28))>>5)==2 && (1UL<<(((3*32+28))&31) & 0)) || ((((3*32+28))>>5)==3 && (1UL<<(((3*32+28))&31) & (0))) || ((((3*32+28))>>5)==4 && (1UL<<(((3*32+28))&31) & 0)) || ((((3*32+28))>>5)==5 && (1UL<<(((3*32+28))&31) & 0)) || ((((3*32+28))>>5)==6 && (1UL<<(((3*32+28))&31) & 0)) || ((((3*32+28))>>5)==7 && (1UL<<(((3*32+28))&31) & 0)) || ((((3*32+28))>>5)==8 && (1UL<<(((3*32+28))&31) & 0)) || ((((3*32+28))>>5)==9 && (1UL<<(((3*32+28))&31) & 0)) ) ? 1 : (__builtin_constant_p(((3*32+28))) ? constant_test_bit(((3*32+28)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((3*32+28)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))); if (__builtin_constant_p((((__builtin_constant_p(__ret_warn_once) ? !!(__ret_warn_once) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_once), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(__ret_warn_once) ? !!(__ret_warn_once) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_once), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = !!(((__builtin_constant_p(__ret_warn_once) ? !!(__ret_warn_once) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_once), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) if (__builtin_constant_p(((({ int __ret_warn_on = !!(!__warned); if (__builtin_constant_p((((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) warn_slowpath_null("/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", 981); (__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); })))) ? !!((({ int __ret_warn_on = !!(!__warned); if (__builtin_constant_p((((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) warn_slowpath_null("/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", 981); (__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = !!((({ int __ret_warn_on = !!(!__warned); if (__builtin_constant_p((((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) warn_slowpath_null("/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", 981); (__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); }))); ______f.miss_hit[______r]++; ______r; })) __warned = true; (__builtin_constant_p(__ret_warn_once) ? !!(__ret_warn_once) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_once), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); }); | |
3811 | do { int _err; am->aperf = paravirt_read_msr(0x000000e8, &_err); } while (0); | |
3812 | do { int _err; am->mperf = paravirt_read_msr(0x000000e7, &_err); } while (0); | |
3813 | } | |
3814 | static inline __attribute__((always_inline)) | |
3815 | unsigned long calc_aperfmperf_ratio(struct aperfmperf *old, | |
3816 | struct aperfmperf *new) | |
3817 | { | |
3818 | u64 aperf = new->aperf - old->aperf; | |
3819 | u64 mperf = new->mperf - old->mperf; | |
3820 | unsigned long ratio = aperf; | |
3821 | mperf >>= 10; | |
3822 | if (__builtin_constant_p(((mperf))) ? !!((mperf)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 998, }; ______r = !!((mperf)); ______f.miss_hit[______r]++; ______r; })) | |
3823 | ratio = div64_u64(aperf, mperf); | |
3824 | return ratio; | |
3825 | } | |
3826 | extern const int amd_erratum_383[]; | |
3827 | extern const int amd_erratum_400[]; | |
3828 | extern bool cpu_has_amd_erratum(const int *); | |
3829 | extern void mcount(void); | |
3830 | static inline __attribute__((always_inline)) unsigned long ftrace_call_adjust(unsigned long addr) | |
3831 | { | |
3832 | return addr; | |
3833 | } | |
3834 | struct dyn_arch_ftrace { | |
3835 | }; | |
3836 | static inline __attribute__((always_inline)) int atomic_read(const atomic_t *v) | |
3837 | { | |
3838 | return (*(volatile int *)&(v)->counter); | |
3839 | } | |
3840 | static inline __attribute__((always_inline)) void atomic_set(atomic_t *v, int i) | |
3841 | { | |
3842 | v->counter = i; | |
3843 | } | |
3844 | static inline __attribute__((always_inline)) void atomic_add(int i, atomic_t *v) | |
3845 | { | |
3846 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "addl %1,%0" | |
3847 | : "+m" (v->counter) | |
3848 | : "ir" (i)); | |
3849 | } | |
3850 | static inline __attribute__((always_inline)) void atomic_sub(int i, atomic_t *v) | |
3851 | { | |
3852 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "subl %1,%0" | |
3853 | : "+m" (v->counter) | |
3854 | : "ir" (i)); | |
3855 | } | |
3856 | static inline __attribute__((always_inline)) int atomic_sub_and_test(int i, atomic_t *v) | |
3857 | { | |
3858 | unsigned char c; | |
3859 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "subl %2,%0; sete %1" | |
3860 | : "+m" (v->counter), "=qm" (c) | |
3861 | : "ir" (i) : "memory"); | |
3862 | return c; | |
3863 | } | |
3864 | static inline __attribute__((always_inline)) void atomic_inc(atomic_t *v) | |
3865 | { | |
3866 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "incl %0" | |
3867 | : "+m" (v->counter)); | |
3868 | } | |
3869 | static inline __attribute__((always_inline)) void atomic_dec(atomic_t *v) | |
3870 | { | |
3871 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "decl %0" | |
3872 | : "+m" (v->counter)); | |
3873 | } | |
3874 | static inline __attribute__((always_inline)) int atomic_dec_and_test(atomic_t *v) | |
3875 | { | |
3876 | unsigned char c; | |
3877 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "decl %0; sete %1" | |
3878 | : "+m" (v->counter), "=qm" (c) | |
3879 | : : "memory"); | |
3880 | return c != 0; | |
3881 | } | |
3882 | static inline __attribute__((always_inline)) int atomic_inc_and_test(atomic_t *v) | |
3883 | { | |
3884 | unsigned char c; | |
3885 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "incl %0; sete %1" | |
3886 | : "+m" (v->counter), "=qm" (c) | |
3887 | : : "memory"); | |
3888 | return c != 0; | |
3889 | } | |
3890 | static inline __attribute__((always_inline)) int atomic_add_negative(int i, atomic_t *v) | |
3891 | { | |
3892 | unsigned char c; | |
3893 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "addl %2,%0; sets %1" | |
3894 | : "+m" (v->counter), "=qm" (c) | |
3895 | : "ir" (i) : "memory"); | |
3896 | return c; | |
3897 | } | |
3898 | static inline __attribute__((always_inline)) int atomic_add_return(int i, atomic_t *v) | |
3899 | { | |
3900 | int __i; | |
3901 | __i = i; | |
3902 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "xaddl %0, %1" | |
3903 | : "+r" (i), "+m" (v->counter) | |
3904 | : : "memory"); | |
3905 | return i + __i; | |
3906 | } | |
3907 | static inline __attribute__((always_inline)) int atomic_sub_return(int i, atomic_t *v) | |
3908 | { | |
3909 | return atomic_add_return(-i, v); | |
3910 | } | |
3911 | static inline __attribute__((always_inline)) int atomic_cmpxchg(atomic_t *v, int old, int new) | |
3912 | { | |
3913 | return ({ __typeof__(*(((&v->counter)))) __ret; __typeof__(*(((&v->counter)))) __old = (((old))); __typeof__(*(((&v->counter)))) __new = (((new))); switch ((sizeof(*&v->counter))) { case 1: { volatile u8 *__ptr = (volatile u8 *)(((&v->counter))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgb %2,%1" : "=a" (__ret), "+m" (*__ptr) : "q" (__new), "0" (__old) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)(((&v->counter))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgw %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)(((&v->counter))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgl %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } default: __cmpxchg_wrong_size(); } __ret; }); | |
3914 | } | |
3915 | static inline __attribute__((always_inline)) int atomic_xchg(atomic_t *v, int new) | |
3916 | { | |
3917 | return ({ __typeof(*((&v->counter))) __x = ((new)); switch (sizeof(*&v->counter)) { case 1: { volatile u8 *__ptr = (volatile u8 *)((&v->counter)); asm volatile("xchgb %0,%1" : "=q" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)((&v->counter)); asm volatile("xchgw %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)((&v->counter)); asm volatile("xchgl %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } default: __xchg_wrong_size(); } __x; }); | |
3918 | } | |
3919 | static inline __attribute__((always_inline)) int atomic_add_unless(atomic_t *v, int a, int u) | |
3920 | { | |
3921 | int c, old; | |
3922 | c = atomic_read(v); | |
3923 | for (;;) { | |
3924 | if (__builtin_constant_p((((__builtin_constant_p(c == (u)) ? !!(c == (u)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 237, }; ______r = __builtin_expect(!!(c == (u)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(c == (u)) ? !!(c == (u)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 237, }; ______r = __builtin_expect(!!(c == (u)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 237, }; ______r = !!(((__builtin_constant_p(c == (u)) ? !!(c == (u)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 237, }; ______r = __builtin_expect(!!(c == (u)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
3925 | break; | |
3926 | old = atomic_cmpxchg((v), c, c + (a)); | |
3927 | if (__builtin_constant_p((((__builtin_constant_p(old == c) ? !!(old == c) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 240, }; ______r = __builtin_expect(!!(old == c), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(old == c) ? !!(old == c) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 240, }; ______r = __builtin_expect(!!(old == c), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 240, }; ______r = !!(((__builtin_constant_p(old == c) ? !!(old == c) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 240, }; ______r = __builtin_expect(!!(old == c), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
3928 | break; | |
3929 | c = old; | |
3930 | } | |
3931 | return c != (u); | |
3932 | } | |
3933 | static inline __attribute__((always_inline)) int atomic_dec_if_positive(atomic_t *v) | |
3934 | { | |
3935 | int c, old, dec; | |
3936 | c = atomic_read(v); | |
3937 | for (;;) { | |
3938 | dec = c - 1; | |
3939 | if (__builtin_constant_p((((__builtin_constant_p(dec < 0) ? !!(dec < 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 262, }; ______r = __builtin_expect(!!(dec < 0), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(dec < 0) ? !!(dec < 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 262, }; ______r = __builtin_expect(!!(dec < 0), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 262, }; ______r = !!(((__builtin_constant_p(dec < 0) ? !!(dec < 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 262, }; ______r = __builtin_expect(!!(dec < 0), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
3940 | break; | |
3941 | old = atomic_cmpxchg((v), c, dec); | |
3942 | if (__builtin_constant_p((((__builtin_constant_p(old == c) ? !!(old == c) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 265, }; ______r = __builtin_expect(!!(old == c), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(old == c) ? !!(old == c) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 265, }; ______r = __builtin_expect(!!(old == c), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 265, }; ______r = !!(((__builtin_constant_p(old == c) ? !!(old == c) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 265, }; ______r = __builtin_expect(!!(old == c), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
3943 | break; | |
3944 | c = old; | |
3945 | } | |
3946 | return dec; | |
3947 | } | |
3948 | static inline __attribute__((always_inline)) short int atomic_inc_short(short int *v) | |
3949 | { | |
3950 | asm(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "addw $1, %0" : "+m" (*v)); | |
3951 | return *v; | |
3952 | } | |
3953 | typedef struct { | |
3954 | u64 __attribute__((aligned(8))) counter; | |
3955 | } atomic64_t; | |
3956 | static inline __attribute__((always_inline)) long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) | |
3957 | { | |
3958 | return ((__typeof__(*(&v->counter)))__cmpxchg64((&v->counter), (unsigned long long)(o), (unsigned long long)(n))); | |
3959 | } | |
3960 | static inline __attribute__((always_inline)) long long atomic64_xchg(atomic64_t *v, long long n) | |
3961 | { | |
3962 | long long o; | |
3963 | unsigned high = (unsigned)(n >> 32); | |
3964 | unsigned low = (unsigned)n; | |
3965 | asm volatile("call atomic64_" "xchg" "_cx8" | |
3966 | : "=A" (o), "+b" (low), "+c" (high) | |
3967 | : "S" (v) | |
3968 | : "memory" | |
3969 | ); | |
3970 | return o; | |
3971 | } | |
3972 | static inline __attribute__((always_inline)) void atomic64_set(atomic64_t *v, long long i) | |
3973 | { | |
3974 | unsigned high = (unsigned)(i >> 32); | |
3975 | unsigned low = (unsigned)i; | |
3976 | asm volatile("call atomic64_" "set" "_cx8" | |
3977 | : "+b" (low), "+c" (high) | |
3978 | : "S" (v) | |
3979 | : "eax", "edx", "memory" | |
3980 | ); | |
3981 | } | |
3982 | static inline __attribute__((always_inline)) long long atomic64_read(atomic64_t *v) | |
3983 | { | |
3984 | long long r; | |
3985 | asm volatile("call atomic64_" "read" "_cx8" | |
3986 | : "=A" (r), "+c" (v) | |
3987 | : : "memory" | |
3988 | ); | |
3989 | return r; | |
3990 | } | |
3991 | static inline __attribute__((always_inline)) long long atomic64_add_return(long long i, atomic64_t *v) | |
3992 | { | |
3993 | asm volatile("call atomic64_" "add_return" "_cx8" | |
3994 | : "+A" (i), "+c" (v) | |
3995 | : : "memory" | |
3996 | ); | |
3997 | return i; | |
3998 | } | |
3999 | static inline __attribute__((always_inline)) long long atomic64_sub_return(long long i, atomic64_t *v) | |
4000 | { | |
4001 | asm volatile("call atomic64_" "sub_return" "_cx8" | |
4002 | : "+A" (i), "+c" (v) | |
4003 | : : "memory" | |
4004 | ); | |
4005 | return i; | |
4006 | } | |
4007 | static inline __attribute__((always_inline)) long long atomic64_inc_return(atomic64_t *v) | |
4008 | { | |
4009 | long long a; | |
4010 | asm volatile("call atomic64_" "inc_return" "_cx8" | |
4011 | : "=A" (a) | |
4012 | : "S" (v) | |
4013 | : "memory", "ecx" | |
4014 | ); | |
4015 | return a; | |
4016 | } | |
4017 | static inline __attribute__((always_inline)) long long atomic64_dec_return(atomic64_t *v) | |
4018 | { | |
4019 | long long a; | |
4020 | asm volatile("call atomic64_" "dec_return" "_cx8" | |
4021 | : "=A" (a) | |
4022 | : "S" (v) | |
4023 | : "memory", "ecx" | |
4024 | ); | |
4025 | return a; | |
4026 | } | |
4027 | static inline __attribute__((always_inline)) long long atomic64_add(long long i, atomic64_t *v) | |
4028 | { | |
4029 | asm volatile("call atomic64_" "add_return" "_cx8" | |
4030 | : "+A" (i), "+c" (v) | |
4031 | : : "memory" | |
4032 | ); | |
4033 | return i; | |
4034 | } | |
4035 | static inline __attribute__((always_inline)) long long atomic64_sub(long long i, atomic64_t *v) | |
4036 | { | |
4037 | asm volatile("call atomic64_" "sub_return" "_cx8" | |
4038 | : "+A" (i), "+c" (v) | |
4039 | : : "memory" | |
4040 | ); | |
4041 | return i; | |
4042 | } | |
4043 | static inline __attribute__((always_inline)) int atomic64_sub_and_test(long long i, atomic64_t *v) | |
4044 | { | |
4045 | return atomic64_sub_return(i, v) == 0; | |
4046 | } | |
4047 | static inline __attribute__((always_inline)) void atomic64_inc(atomic64_t *v) | |
4048 | { | |
4049 | asm volatile("call atomic64_" "inc_return" "_cx8" | |
4050 | : : "S" (v) | |
4051 | : "memory", "eax", "ecx", "edx" | |
4052 | ); | |
4053 | } | |
4054 | static inline __attribute__((always_inline)) void atomic64_dec(atomic64_t *v) | |
4055 | { | |
4056 | asm volatile("call atomic64_" "dec_return" "_cx8" | |
4057 | : : "S" (v) | |
4058 | : "memory", "eax", "ecx", "edx" | |
4059 | ); | |
4060 | } | |
4061 | static inline __attribute__((always_inline)) int atomic64_dec_and_test(atomic64_t *v) | |
4062 | { | |
4063 | return atomic64_dec_return(v) == 0; | |
4064 | } | |
4065 | static inline __attribute__((always_inline)) int atomic64_inc_and_test(atomic64_t *v) | |
4066 | { | |
4067 | return atomic64_inc_return(v) == 0; | |
4068 | } | |
4069 | static inline __attribute__((always_inline)) int atomic64_add_negative(long long i, atomic64_t *v) | |
4070 | { | |
4071 | return atomic64_add_return(i, v) < 0; | |
4072 | } | |
4073 | static inline __attribute__((always_inline)) int atomic64_add_unless(atomic64_t *v, long long a, long long u) | |
4074 | { | |
4075 | unsigned low = (unsigned)u; | |
4076 | unsigned high = (unsigned)(u >> 32); | |
4077 | asm volatile("call atomic64_" "add_unless" "_cx8" "\n\t" | |
4078 | : "+A" (a), "+c" (v), "+S" (low), "+D" (high) | |
4079 | : : "memory"); | |
4080 | return (int)a; | |
4081 | } | |
4082 | static inline __attribute__((always_inline)) int atomic64_inc_not_zero(atomic64_t *v) | |
4083 | { | |
4084 | int r; | |
4085 | asm volatile("call atomic64_" "inc_not_zero" "_cx8" | |
4086 | : "=a" (r) | |
4087 | : "S" (v) | |
4088 | : "ecx", "edx", "memory" | |
4089 | ); | |
4090 | return r; | |
4091 | } | |
4092 | static inline __attribute__((always_inline)) long long atomic64_dec_if_positive(atomic64_t *v) | |
4093 | { | |
4094 | long long r; | |
4095 | asm volatile("call atomic64_" "dec_if_positive" "_cx8" | |
4096 | : "=A" (r) | |
4097 | : "S" (v) | |
4098 | : "ecx", "memory" | |
4099 | ); | |
4100 | return r; | |
4101 | } | |
4102 | typedef atomic_t atomic_long_t; | |
4103 | static inline __attribute__((always_inline)) long atomic_long_read(atomic_long_t *l) | |
4104 | { | |
4105 | atomic_t *v = (atomic_t *)l; | |
4106 | return (long)atomic_read(v); | |
4107 | } | |
4108 | static inline __attribute__((always_inline)) void atomic_long_set(atomic_long_t *l, long i) | |
4109 | { | |
4110 | atomic_t *v = (atomic_t *)l; | |
4111 | atomic_set(v, i); | |
4112 | } | |
4113 | static inline __attribute__((always_inline)) void atomic_long_inc(atomic_long_t *l) | |
4114 | { | |
4115 | atomic_t *v = (atomic_t *)l; | |
4116 | atomic_inc(v); | |
4117 | } | |
4118 | static inline __attribute__((always_inline)) void atomic_long_dec(atomic_long_t *l) | |
4119 | { | |
4120 | atomic_t *v = (atomic_t *)l; | |
4121 | atomic_dec(v); | |
4122 | } | |
4123 | static inline __attribute__((always_inline)) void atomic_long_add(long i, atomic_long_t *l) | |
4124 | { | |
4125 | atomic_t *v = (atomic_t *)l; | |
4126 | atomic_add(i, v); | |
4127 | } | |
4128 | static inline __attribute__((always_inline)) void atomic_long_sub(long i, atomic_long_t *l) | |
4129 | { | |
4130 | atomic_t *v = (atomic_t *)l; | |
4131 | atomic_sub(i, v); | |
4132 | } | |
4133 | static inline __attribute__((always_inline)) int atomic_long_sub_and_test(long i, atomic_long_t *l) | |
4134 | { | |
4135 | atomic_t *v = (atomic_t *)l; | |
4136 | return atomic_sub_and_test(i, v); | |
4137 | } | |
4138 | static inline __attribute__((always_inline)) int atomic_long_dec_and_test(atomic_long_t *l) | |
4139 | { | |
4140 | atomic_t *v = (atomic_t *)l; | |
4141 | return atomic_dec_and_test(v); | |
4142 | } | |
4143 | static inline __attribute__((always_inline)) int atomic_long_inc_and_test(atomic_long_t *l) | |
4144 | { | |
4145 | atomic_t *v = (atomic_t *)l; | |
4146 | return atomic_inc_and_test(v); | |
4147 | } | |
4148 | static inline __attribute__((always_inline)) int atomic_long_add_negative(long i, atomic_long_t *l) | |
4149 | { | |
4150 | atomic_t *v = (atomic_t *)l; | |
4151 | return atomic_add_negative(i, v); | |
4152 | } | |
4153 | static inline __attribute__((always_inline)) long atomic_long_add_return(long i, atomic_long_t *l) | |
4154 | { | |
4155 | atomic_t *v = (atomic_t *)l; | |
4156 | return (long)atomic_add_return(i, v); | |
4157 | } | |
4158 | static inline __attribute__((always_inline)) long atomic_long_sub_return(long i, atomic_long_t *l) | |
4159 | { | |
4160 | atomic_t *v = (atomic_t *)l; | |
4161 | return (long)atomic_sub_return(i, v); | |
4162 | } | |
4163 | static inline __attribute__((always_inline)) long atomic_long_inc_return(atomic_long_t *l) | |
4164 | { | |
4165 | atomic_t *v = (atomic_t *)l; | |
4166 | return (long)(atomic_add_return(1, v)); | |
4167 | } | |
4168 | static inline __attribute__((always_inline)) long atomic_long_dec_return(atomic_long_t *l) | |
4169 | { | |
4170 | atomic_t *v = (atomic_t *)l; | |
4171 | return (long)(atomic_sub_return(1, v)); | |
4172 | } | |
4173 | static inline __attribute__((always_inline)) long atomic_long_add_unless(atomic_long_t *l, long a, long u) | |
4174 | { | |
4175 | atomic_t *v = (atomic_t *)l; | |
4176 | return (long)atomic_add_unless(v, a, u); | |
4177 | } | |
4178 | struct thread_info { | |
4179 | struct task_struct *task; | |
4180 | struct exec_domain *exec_domain; | |
4181 | __u32 flags; | |
4182 | __u32 status; | |
4183 | __u32 cpu; | |
4184 | int preempt_count; | |
4185 | mm_segment_t addr_limit; | |
4186 | struct restart_block restart_block; | |
4187 | void *sysenter_return; | |
4188 | unsigned long previous_esp; | |
4189 | __u8 supervisor_stack[0]; | |
4190 | int uaccess_err; | |
4191 | }; | |
4192 | register unsigned long current_stack_pointer asm("esp") __attribute__((__used__)); | |
4193 | static inline __attribute__((always_inline)) struct thread_info *current_thread_info(void) | |
4194 | { | |
4195 | return (struct thread_info *) | |
4196 | (current_stack_pointer & ~((((1UL) << 12) << 1) - 1)); | |
4197 | } | |
4198 | static inline __attribute__((always_inline)) void set_restore_sigmask(void) | |
4199 | { | |
4200 | struct thread_info *ti = current_thread_info(); | |
4201 | ti->status |= 0x0008; | |
4202 | set_bit(2, (unsigned long *)&ti->flags); | |
4203 | } | |
4204 | extern void arch_task_cache_init(void); | |
4205 | extern void free_thread_info(struct thread_info *ti); | |
4206 | extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); | |
4207 | static inline __attribute__((always_inline)) void set_ti_thread_flag(struct thread_info *ti, int flag) | |
4208 | { | |
4209 | set_bit(flag, (unsigned long *)&ti->flags); | |
4210 | } | |
4211 | static inline __attribute__((always_inline)) void clear_ti_thread_flag(struct thread_info *ti, int flag) | |
4212 | { | |
4213 | clear_bit(flag, (unsigned long *)&ti->flags); | |
4214 | } | |
4215 | static inline __attribute__((always_inline)) int test_and_set_ti_thread_flag(struct thread_info *ti, int flag) | |
4216 | { | |
4217 | return test_and_set_bit(flag, (unsigned long *)&ti->flags); | |
4218 | } | |
4219 | static inline __attribute__((always_inline)) int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag) | |
4220 | { | |
4221 | return test_and_clear_bit(flag, (unsigned long *)&ti->flags); | |
4222 | } | |
4223 | static inline __attribute__((always_inline)) int test_ti_thread_flag(struct thread_info *ti, int flag) | |
4224 | { | |
4225 | return (__builtin_constant_p((flag)) ? constant_test_bit((flag), ((unsigned long *)&ti->flags)) : variable_test_bit((flag), ((unsigned long *)&ti->flags))); | |
4226 | } | |
4227 | static inline __attribute__((always_inline)) void INIT_LIST_HEAD(struct list_head *list) | |
4228 | { | |
4229 | list->next = list; | |
4230 | list->prev = list; | |
4231 | } | |
4232 | static inline __attribute__((always_inline)) void __list_add(struct list_head *new, | |
4233 | struct list_head *prev, | |
4234 | struct list_head *next) | |
4235 | { | |
4236 | next->prev = new; | |
4237 | new->next = next; | |
4238 | new->prev = prev; | |
4239 | prev->next = new; | |
4240 | } | |
4241 | static inline __attribute__((always_inline)) void list_add(struct list_head *new, struct list_head *head) | |
4242 | { | |
4243 | __list_add(new, head, head->next); | |
4244 | } | |
4245 | static inline __attribute__((always_inline)) void list_add_tail(struct list_head *new, struct list_head *head) | |
4246 | { | |
4247 | __list_add(new, head->prev, head); | |
4248 | } | |
4249 | static inline __attribute__((always_inline)) void __list_del(struct list_head * prev, struct list_head * next) | |
4250 | { | |
4251 | next->prev = prev; | |
4252 | prev->next = next; | |
4253 | } | |
4254 | static inline __attribute__((always_inline)) void __list_del_entry(struct list_head *entry) | |
4255 | { | |
4256 | __list_del(entry->prev, entry->next); | |
4257 | } | |
4258 | static inline __attribute__((always_inline)) void list_del(struct list_head *entry) | |
4259 | { | |
4260 | __list_del(entry->prev, entry->next); | |
4261 | entry->next = ((void *) 0x00100100 + (0x0UL)); | |
4262 | entry->prev = ((void *) 0x00200200 + (0x0UL)); | |
4263 | } | |
4264 | static inline __attribute__((always_inline)) void list_replace(struct list_head *old, | |
4265 | struct list_head *new) | |
4266 | { | |
4267 | new->next = old->next; | |
4268 | new->next->prev = new; | |
4269 | new->prev = old->prev; | |
4270 | new->prev->next = new; | |
4271 | } | |
4272 | static inline __attribute__((always_inline)) void list_replace_init(struct list_head *old, | |
4273 | struct list_head *new) | |
4274 | { | |
4275 | list_replace(old, new); | |
4276 | INIT_LIST_HEAD(old); | |
4277 | } | |
4278 | static inline __attribute__((always_inline)) void list_del_init(struct list_head *entry) | |
4279 | { | |
4280 | __list_del_entry(entry); | |
4281 | INIT_LIST_HEAD(entry); | |
4282 | } | |
4283 | static inline __attribute__((always_inline)) void list_move(struct list_head *list, struct list_head *head) | |
4284 | { | |
4285 | __list_del_entry(list); | |
4286 | list_add(list, head); | |
4287 | } | |
4288 | static inline __attribute__((always_inline)) void list_move_tail(struct list_head *list, | |
4289 | struct list_head *head) | |
4290 | { | |
4291 | __list_del_entry(list); | |
4292 | list_add_tail(list, head); | |
4293 | } | |
4294 | static inline __attribute__((always_inline)) int list_is_last(const struct list_head *list, | |
4295 | const struct list_head *head) | |
4296 | { | |
4297 | return list->next == head; | |
4298 | } | |
4299 | static inline __attribute__((always_inline)) int list_empty(const struct list_head *head) | |
4300 | { | |
4301 | return head->next == head; | |
4302 | } | |
4303 | static inline __attribute__((always_inline)) int list_empty_careful(const struct list_head *head) | |
4304 | { | |
4305 | struct list_head *next = head->next; | |
4306 | return (next == head) && (next == head->prev); | |
4307 | } | |
4308 | static inline __attribute__((always_inline)) void list_rotate_left(struct list_head *head) | |
4309 | { | |
4310 | struct list_head *first; | |
4311 | if (__builtin_constant_p(((!list_empty(head)))) ? !!((!list_empty(head))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 218, }; ______r = !!((!list_empty(head))); ______f.miss_hit[______r]++; ______r; })) { | |
4312 | first = head->next; | |
4313 | list_move_tail(first, head); | |
4314 | } | |
4315 | } | |
4316 | static inline __attribute__((always_inline)) int list_is_singular(const struct list_head *head) | |
4317 | { | |
4318 | return !list_empty(head) && (head->next == head->prev); | |
4319 | } | |
4320 | static inline __attribute__((always_inline)) void __list_cut_position(struct list_head *list, | |
4321 | struct list_head *head, struct list_head *entry) | |
4322 | { | |
4323 | struct list_head *new_first = entry->next; | |
4324 | list->next = head->next; | |
4325 | list->next->prev = list; | |
4326 | list->prev = entry; | |
4327 | entry->next = list; | |
4328 | head->next = new_first; | |
4329 | new_first->prev = head; | |
4330 | } | |
4331 | static inline __attribute__((always_inline)) void list_cut_position(struct list_head *list, | |
4332 | struct list_head *head, struct list_head *entry) | |
4333 | { | |
4334 | if (__builtin_constant_p(((list_empty(head)))) ? !!((list_empty(head))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 262, }; ______r = !!((list_empty(head))); ______f.miss_hit[______r]++; ______r; })) | |
4335 | return; | |
4336 | if (__builtin_constant_p(((list_is_singular(head) && (head->next != entry && head != entry)))) ? !!((list_is_singular(head) && (head->next != entry && head != entry))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
4337 | "include/linux/list.h" | |
4338 | , .line = | |
4339 | 265 | |
4340 | , }; ______r = !!((list_is_singular(head) && (head->next != entry && head != entry))); ______f.miss_hit[______r]++; ______r; })) | |
4341 | return; | |
4342 | if (__builtin_constant_p(((entry == head))) ? !!((entry == head)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 267, }; ______r = !!((entry == head)); ______f.miss_hit[______r]++; ______r; })) | |
4343 | INIT_LIST_HEAD(list); | |
4344 | else | |
4345 | __list_cut_position(list, head, entry); | |
4346 | } | |
4347 | static inline __attribute__((always_inline)) void __list_splice(const struct list_head *list, | |
4348 | struct list_head *prev, | |
4349 | struct list_head *next) | |
4350 | { | |
4351 | struct list_head *first = list->next; | |
4352 | struct list_head *last = list->prev; | |
4353 | first->prev = prev; | |
4354 | prev->next = first; | |
4355 | last->next = next; | |
4356 | next->prev = last; | |
4357 | } | |
4358 | static inline __attribute__((always_inline)) void list_splice(const struct list_head *list, | |
4359 | struct list_head *head) | |
4360 | { | |
4361 | if (__builtin_constant_p(((!list_empty(list)))) ? !!((!list_empty(list))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 295, }; ______r = !!((!list_empty(list))); ______f.miss_hit[______r]++; ______r; })) | |
4362 | __list_splice(list, head, head->next); | |
4363 | } | |
4364 | static inline __attribute__((always_inline)) void list_splice_tail(struct list_head *list, | |
4365 | struct list_head *head) | |
4366 | { | |
4367 | if (__builtin_constant_p(((!list_empty(list)))) ? !!((!list_empty(list))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 307, }; ______r = !!((!list_empty(list))); ______f.miss_hit[______r]++; ______r; })) | |
4368 | __list_splice(list, head->prev, head); | |
4369 | } | |
4370 | static inline __attribute__((always_inline)) void list_splice_init(struct list_head *list, | |
4371 | struct list_head *head) | |
4372 | { | |
4373 | if (__builtin_constant_p(((!list_empty(list)))) ? !!((!list_empty(list))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 321, }; ______r = !!((!list_empty(list))); ______f.miss_hit[______r]++; ______r; })) { | |
4374 | __list_splice(list, head, head->next); | |
4375 | INIT_LIST_HEAD(list); | |
4376 | } | |
4377 | } | |
4378 | static inline __attribute__((always_inline)) void list_splice_tail_init(struct list_head *list, | |
4379 | struct list_head *head) | |
4380 | { | |
4381 | if (__builtin_constant_p(((!list_empty(list)))) ? !!((!list_empty(list))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 338, }; ______r = !!((!list_empty(list))); ______f.miss_hit[______r]++; ______r; })) { | |
4382 | __list_splice(list, head->prev, head); | |
4383 | INIT_LIST_HEAD(list); | |
4384 | } | |
4385 | } | |
4386 | static inline __attribute__((always_inline)) void INIT_HLIST_NODE(struct hlist_node *h) | |
4387 | { | |
4388 | h->next = ((void *)0); | |
4389 | h->pprev = ((void *)0); | |
4390 | } | |
4391 | static inline __attribute__((always_inline)) int hlist_unhashed(const struct hlist_node *h) | |
4392 | { | |
4393 | return !h->pprev; | |
4394 | } | |
4395 | static inline __attribute__((always_inline)) int hlist_empty(const struct hlist_head *h) | |
4396 | { | |
4397 | return !h->first; | |
4398 | } | |
4399 | static inline __attribute__((always_inline)) void __hlist_del(struct hlist_node *n) | |
4400 | { | |
4401 | struct hlist_node *next = n->next; | |
4402 | struct hlist_node **pprev = n->pprev; | |
4403 | *pprev = next; | |
4404 | if (__builtin_constant_p(((next))) ? !!((next)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 591, }; ______r = !!((next)); ______f.miss_hit[______r]++; ______r; })) | |
4405 | next->pprev = pprev; | |
4406 | } | |
4407 | static inline __attribute__((always_inline)) void hlist_del(struct hlist_node *n) | |
4408 | { | |
4409 | __hlist_del(n); | |
4410 | n->next = ((void *) 0x00100100 + (0x0UL)); | |
4411 | n->pprev = ((void *) 0x00200200 + (0x0UL)); | |
4412 | } | |
4413 | static inline __attribute__((always_inline)) void hlist_del_init(struct hlist_node *n) | |
4414 | { | |
4415 | if (__builtin_constant_p(((!hlist_unhashed(n)))) ? !!((!hlist_unhashed(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 604, }; ______r = !!((!hlist_unhashed(n))); ______f.miss_hit[______r]++; ______r; })) { | |
4416 | __hlist_del(n); | |
4417 | INIT_HLIST_NODE(n); | |
4418 | } | |
4419 | } | |
4420 | static inline __attribute__((always_inline)) void hlist_add_head(struct hlist_node *n, struct hlist_head *h) | |
4421 | { | |
4422 | struct hlist_node *first = h->first; | |
4423 | n->next = first; | |
4424 | if (__builtin_constant_p(((first))) ? !!((first)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 614, }; ______r = !!((first)); ______f.miss_hit[______r]++; ______r; })) | |
4425 | first->pprev = &n->next; | |
4426 | h->first = n; | |
4427 | n->pprev = &h->first; | |
4428 | } | |
4429 | static inline __attribute__((always_inline)) void hlist_add_before(struct hlist_node *n, | |
4430 | struct hlist_node *next) | |
4431 | { | |
4432 | n->pprev = next->pprev; | |
4433 | n->next = next; | |
4434 | next->pprev = &n->next; | |
4435 | *(n->pprev) = n; | |
4436 | } | |
4437 | static inline __attribute__((always_inline)) void hlist_add_after(struct hlist_node *n, | |
4438 | struct hlist_node *next) | |
4439 | { | |
4440 | next->next = n->next; | |
4441 | n->next = next; | |
4442 | next->pprev = &n->next; | |
4443 | if (__builtin_constant_p(((next->next))) ? !!((next->next)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 637, }; ______r = !!((next->next)); ______f.miss_hit[______r]++; ______r; })) | |
4444 | next->next->pprev = &next->next; | |
4445 | } | |
4446 | static inline __attribute__((always_inline)) void hlist_add_fake(struct hlist_node *n) | |
4447 | { | |
4448 | n->pprev = &n->next; | |
4449 | } | |
4450 | static inline __attribute__((always_inline)) void hlist_move_list(struct hlist_head *old, | |
4451 | struct hlist_head *new) | |
4452 | { | |
4453 | new->first = old->first; | |
4454 | if (__builtin_constant_p(((new->first))) ? !!((new->first)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 655, }; ______r = !!((new->first)); ______f.miss_hit[______r]++; ______r; })) | |
4455 | new->first->pprev = &new->first; | |
4456 | old->first = ((void *)0); | |
4457 | } | |
4458 | extern void add_preempt_count(int val); | |
4459 | extern void sub_preempt_count(int val); | |
4460 | __attribute__((regparm(0))) void preempt_schedule(void); | |
4461 | struct preempt_notifier; | |
4462 | struct preempt_ops { | |
4463 | void (*sched_in)(struct preempt_notifier *notifier, int cpu); | |
4464 | void (*sched_out)(struct preempt_notifier *notifier, | |
4465 | struct task_struct *next); | |
4466 | }; | |
4467 | struct preempt_notifier { | |
4468 | struct hlist_node link; | |
4469 | struct preempt_ops *ops; | |
4470 | }; | |
4471 | void preempt_notifier_register(struct preempt_notifier *notifier); | |
4472 | void preempt_notifier_unregister(struct preempt_notifier *notifier); | |
4473 | static inline __attribute__((always_inline)) void preempt_notifier_init(struct preempt_notifier *notifier, | |
4474 | struct preempt_ops *ops) | |
4475 | { | |
4476 | INIT_HLIST_NODE(¬ifier->link); | |
4477 | notifier->ops = ops; | |
4478 | } | |
4479 | struct task_struct; | |
4480 | struct lockdep_map; | |
4481 | extern int prove_locking; | |
4482 | extern int lock_stat; | |
4483 | struct task_struct; | |
4484 | extern int debug_locks; | |
4485 | extern int debug_locks_silent; | |
4486 | static inline __attribute__((always_inline)) int __debug_locks_off(void) | |
4487 | { | |
4488 | return ({ __typeof(*((&debug_locks))) __x = ((0)); switch (sizeof(*&debug_locks)) { case 1: { volatile u8 *__ptr = (volatile u8 *)((&debug_locks)); asm volatile("xchgb %0,%1" : "=q" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)((&debug_locks)); asm volatile("xchgw %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)((&debug_locks)); asm volatile("xchgl %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } default: __xchg_wrong_size(); } __x; }); | |
4489 | } | |
4490 | extern int debug_locks_off(void); | |
4491 | struct task_struct; | |
4492 | extern void debug_show_all_locks(void); | |
4493 | extern void debug_show_held_locks(struct task_struct *task); | |
4494 | extern void debug_check_no_locks_freed(const void *from, unsigned long len); | |
4495 | extern void debug_check_no_locks_held(struct task_struct *task); | |
4496 | struct task_struct; | |
4497 | struct pt_regs; | |
4498 | struct task_struct; | |
4499 | struct stack_trace { | |
4500 | unsigned int nr_entries, max_entries; | |
4501 | unsigned long *entries; | |
4502 | int skip; | |
4503 | }; | |
4504 | extern void save_stack_trace(struct stack_trace *trace); | |
4505 | extern void save_stack_trace_regs(struct stack_trace *trace, | |
4506 | struct pt_regs *regs); | |
4507 | extern void save_stack_trace_tsk(struct task_struct *tsk, | |
4508 | struct stack_trace *trace); | |
4509 | extern void print_stack_trace(struct stack_trace *trace, int spaces); | |
4510 | extern void save_stack_trace_user(struct stack_trace *trace); | |
4511 | struct lockdep_subclass_key { | |
4512 | char __one_byte; | |
4513 | } __attribute__ ((__packed__)); | |
4514 | struct lock_class_key { | |
4515 | struct lockdep_subclass_key subkeys[8UL]; | |
4516 | }; | |
4517 | extern struct lock_class_key __lockdep_no_validate__; | |
4518 | struct lock_class { | |
4519 | struct list_head hash_entry; | |
4520 | struct list_head lock_entry; | |
4521 | struct lockdep_subclass_key *key; | |
4522 | unsigned int subclass; | |
4523 | unsigned int dep_gen_id; | |
4524 | unsigned long usage_mask; | |
4525 | struct stack_trace usage_traces[(1+3*4)]; | |
4526 | struct list_head locks_after, locks_before; | |
4527 | unsigned int version; | |
4528 | unsigned long ops; | |
4529 | const char *name; | |
4530 | int name_version; | |
4531 | }; | |
4532 | struct lockdep_map { | |
4533 | struct lock_class_key *key; | |
4534 | struct lock_class *class_cache[2]; | |
4535 | const char *name; | |
4536 | }; | |
4537 | struct lock_list { | |
4538 | struct list_head entry; | |
4539 | struct lock_class *class; | |
4540 | struct stack_trace trace; | |
4541 | int distance; | |
4542 | struct lock_list *parent; | |
4543 | }; | |
4544 | struct lock_chain { | |
4545 | u8 irq_context; | |
4546 | u8 depth; | |
4547 | u16 base; | |
4548 | struct list_head entry; | |
4549 | u64 chain_key; | |
4550 | }; | |
4551 | struct held_lock { | |
4552 | u64 prev_chain_key; | |
4553 | unsigned long acquire_ip; | |
4554 | struct lockdep_map *instance; | |
4555 | struct lockdep_map *nest_lock; | |
4556 | unsigned int class_idx:13; | |
4557 | unsigned int irq_context:2; | |
4558 | unsigned int trylock:1; | |
4559 | unsigned int read:2; | |
4560 | unsigned int check:2; | |
4561 | unsigned int hardirqs_off:1; | |
4562 | unsigned int references:11; | |
4563 | }; | |
4564 | extern void lockdep_init(void); | |
4565 | extern void lockdep_info(void); | |
4566 | extern void lockdep_reset(void); | |
4567 | extern void lockdep_reset_lock(struct lockdep_map *lock); | |
4568 | extern void lockdep_free_key_range(void *start, unsigned long size); | |
4569 | extern void lockdep_sys_exit(void); | |
4570 | extern void lockdep_off(void); | |
4571 | extern void lockdep_on(void); | |
4572 | extern void lockdep_init_map(struct lockdep_map *lock, const char *name, | |
4573 | struct lock_class_key *key, int subclass); | |
4574 | static inline __attribute__((always_inline)) int lockdep_match_key(struct lockdep_map *lock, | |
4575 | struct lock_class_key *key) | |
4576 | { | |
4577 | return lock->key == key; | |
4578 | } | |
4579 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |
4580 | int trylock, int read, int check, | |
4581 | struct lockdep_map *nest_lock, unsigned long ip); | |
4582 | extern void lock_release(struct lockdep_map *lock, int nested, | |
4583 | unsigned long ip); | |
4584 | extern int lock_is_held(struct lockdep_map *lock); | |
4585 | extern void lock_set_class(struct lockdep_map *lock, const char *name, | |
4586 | struct lock_class_key *key, unsigned int subclass, | |
4587 | unsigned long ip); | |
4588 | static inline __attribute__((always_inline)) void lock_set_subclass(struct lockdep_map *lock, | |
4589 | unsigned int subclass, unsigned long ip) | |
4590 | { | |
4591 | lock_set_class(lock, lock->name, lock->key, subclass, ip); | |
4592 | } | |
4593 | extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); | |
4594 | extern void lockdep_clear_current_reclaim_state(void); | |
4595 | extern void lockdep_trace_alloc(gfp_t mask); | |
4596 | extern void print_irqtrace_events(struct task_struct *curr); | |
4597 | extern void ftrace_nmi_enter(void); | |
4598 | extern void ftrace_nmi_exit(void); | |
4599 | extern void cpu_idle(void); | |
4600 | typedef void (*smp_call_func_t)(void *info); | |
4601 | struct call_single_data { | |
4602 | struct list_head list; | |
4603 | smp_call_func_t func; | |
4604 | void *info; | |
4605 | u16 flags; | |
4606 | u16 priv; | |
4607 | }; | |
4608 | extern unsigned int total_cpus; | |
4609 | int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, | |
4610 | int wait); | |
4611 | struct mpf_intel { | |
4612 | char signature[4]; | |
4613 | unsigned int physptr; | |
4614 | unsigned char length; | |
4615 | unsigned char specification; | |
4616 | unsigned char checksum; | |
4617 | unsigned char feature1; | |
4618 | unsigned char feature2; | |
4619 | unsigned char feature3; | |
4620 | unsigned char feature4; | |
4621 | unsigned char feature5; | |
4622 | }; | |
4623 | struct mpc_table { | |
4624 | char signature[4]; | |
4625 | unsigned short length; | |
4626 | char spec; | |
4627 | char checksum; | |
4628 | char oem[8]; | |
4629 | char productid[12]; | |
4630 | unsigned int oemptr; | |
4631 | unsigned short oemsize; | |
4632 | unsigned short oemcount; | |
4633 | unsigned int lapic; | |
4634 | unsigned int reserved; | |
4635 | }; | |
4636 | struct mpc_cpu { | |
4637 | unsigned char type; | |
4638 | unsigned char apicid; | |
4639 | unsigned char apicver; | |
4640 | unsigned char cpuflag; | |
4641 | unsigned int cpufeature; | |
4642 | unsigned int featureflag; | |
4643 | unsigned int reserved[2]; | |
4644 | }; | |
4645 | struct mpc_bus { | |
4646 | unsigned char type; | |
4647 | unsigned char busid; | |
4648 | unsigned char bustype[6]; | |
4649 | }; | |
4650 | struct mpc_ioapic { | |
4651 | unsigned char type; | |
4652 | unsigned char apicid; | |
4653 | unsigned char apicver; | |
4654 | unsigned char flags; | |
4655 | unsigned int apicaddr; | |
4656 | }; | |
4657 | struct mpc_intsrc { | |
4658 | unsigned char type; | |
4659 | unsigned char irqtype; | |
4660 | unsigned short irqflag; | |
4661 | unsigned char srcbus; | |
4662 | unsigned char srcbusirq; | |
4663 | unsigned char dstapic; | |
4664 | unsigned char dstirq; | |
4665 | }; | |
4666 | enum mp_irq_source_types { | |
4667 | mp_INT = 0, | |
4668 | mp_NMI = 1, | |
4669 | mp_SMI = 2, | |
4670 | mp_ExtINT = 3 | |
4671 | }; | |
4672 | struct mpc_lintsrc { | |
4673 | unsigned char type; | |
4674 | unsigned char irqtype; | |
4675 | unsigned short irqflag; | |
4676 | unsigned char srcbusid; | |
4677 | unsigned char srcbusirq; | |
4678 | unsigned char destapic; | |
4679 | unsigned char destapiclint; | |
4680 | }; | |
4681 | struct mpc_oemtable { | |
4682 | char signature[4]; | |
4683 | unsigned short length; | |
4684 | char rev; | |
4685 | char checksum; | |
4686 | char mpc[8]; | |
4687 | }; | |
4688 | enum mp_bustype { | |
4689 | MP_BUS_ISA = 1, | |
4690 | MP_BUS_EISA, | |
4691 | MP_BUS_PCI, | |
4692 | MP_BUS_MCA, | |
4693 | }; | |
4694 | struct screen_info { | |
4695 | __u8 orig_x; | |
4696 | __u8 orig_y; | |
4697 | __u16 ext_mem_k; | |
4698 | __u16 orig_video_page; | |
4699 | __u8 orig_video_mode; | |
4700 | __u8 orig_video_cols; | |
4701 | __u8 flags; | |
4702 | __u8 unused2; | |
4703 | __u16 orig_video_ega_bx; | |
4704 | __u16 unused3; | |
4705 | __u8 orig_video_lines; | |
4706 | __u8 orig_video_isVGA; | |
4707 | __u16 orig_video_points; | |
4708 | __u16 lfb_width; | |
4709 | __u16 lfb_height; | |
4710 | __u16 lfb_depth; | |
4711 | __u32 lfb_base; | |
4712 | __u32 lfb_size; | |
4713 | __u16 cl_magic, cl_offset; | |
4714 | __u16 lfb_linelength; | |
4715 | __u8 red_size; | |
4716 | __u8 red_pos; | |
4717 | __u8 green_size; | |
4718 | __u8 green_pos; | |
4719 | __u8 blue_size; | |
4720 | __u8 blue_pos; | |
4721 | __u8 rsvd_size; | |
4722 | __u8 rsvd_pos; | |
4723 | __u16 vesapm_seg; | |
4724 | __u16 vesapm_off; | |
4725 | __u16 pages; | |
4726 | __u16 vesa_attributes; | |
4727 | __u32 capabilities; | |
4728 | __u8 _reserved[6]; | |
4729 | } __attribute__((packed)); | |
4730 | extern struct screen_info screen_info; | |
4731 | typedef unsigned short apm_event_t; | |
4732 | typedef unsigned short apm_eventinfo_t; | |
4733 | struct apm_bios_info { | |
4734 | __u16 version; | |
4735 | __u16 cseg; | |
4736 | __u32 offset; | |
4737 | __u16 cseg_16; | |
4738 | __u16 dseg; | |
4739 | __u16 flags; | |
4740 | __u16 cseg_len; | |
4741 | __u16 cseg_16_len; | |
4742 | __u16 dseg_len; | |
4743 | }; | |
4744 | struct apm_info { | |
4745 | struct apm_bios_info bios; | |
4746 | unsigned short connection_version; | |
4747 | int get_power_status_broken; | |
4748 | int get_power_status_swabinminutes; | |
4749 | int allow_ints; | |
4750 | int forbid_idle; | |
4751 | int realmode_power_off; | |
4752 | int disabled; | |
4753 | }; | |
4754 | extern struct apm_info apm_info; | |
4755 | struct edd_device_params { | |
4756 | __u16 length; | |
4757 | __u16 info_flags; | |
4758 | __u32 num_default_cylinders; | |
4759 | __u32 num_default_heads; | |
4760 | __u32 sectors_per_track; | |
4761 | __u64 number_of_sectors; | |
4762 | __u16 bytes_per_sector; | |
4763 | __u32 dpte_ptr; | |
4764 | __u16 key; | |
4765 | __u8 device_path_info_length; | |
4766 | __u8 reserved2; | |
4767 | __u16 reserved3; | |
4768 | __u8 host_bus_type[4]; | |
4769 | __u8 interface_type[8]; | |
4770 | union { | |
4771 | struct { | |
4772 | __u16 base_address; | |
4773 | __u16 reserved1; | |
4774 | __u32 reserved2; | |
4775 | } __attribute__ ((packed)) isa; | |
4776 | struct { | |
4777 | __u8 bus; | |
4778 | __u8 slot; | |
4779 | __u8 function; | |
4780 | __u8 channel; | |
4781 | __u32 reserved; | |
4782 | } __attribute__ ((packed)) pci; | |
4783 | struct { | |
4784 | __u64 reserved; | |
4785 | } __attribute__ ((packed)) ibnd; | |
4786 | struct { | |
4787 | __u64 reserved; | |
4788 | } __attribute__ ((packed)) xprs; | |
4789 | struct { | |
4790 | __u64 reserved; | |
4791 | } __attribute__ ((packed)) htpt; | |
4792 | struct { | |
4793 | __u64 reserved; | |
4794 | } __attribute__ ((packed)) unknown; | |
4795 | } interface_path; | |
4796 | union { | |
4797 | struct { | |
4798 | __u8 device; | |
4799 | __u8 reserved1; | |
4800 | __u16 reserved2; | |
4801 | __u32 reserved3; | |
4802 | __u64 reserved4; | |
4803 | } __attribute__ ((packed)) ata; | |
4804 | struct { | |
4805 | __u8 device; | |
4806 | __u8 lun; | |
4807 | __u8 reserved1; | |
4808 | __u8 reserved2; | |
4809 | __u32 reserved3; | |
4810 | __u64 reserved4; | |
4811 | } __attribute__ ((packed)) atapi; | |
4812 | struct { | |
4813 | __u16 id; | |
4814 | __u64 lun; | |
4815 | __u16 reserved1; | |
4816 | __u32 reserved2; | |
4817 | } __attribute__ ((packed)) scsi; | |
4818 | struct { | |
4819 | __u64 serial_number; | |
4820 | __u64 reserved; | |
4821 | } __attribute__ ((packed)) usb; | |
4822 | struct { | |
4823 | __u64 eui; | |
4824 | __u64 reserved; | |
4825 | } __attribute__ ((packed)) i1394; | |
4826 | struct { | |
4827 | __u64 wwid; | |
4828 | __u64 lun; | |
4829 | } __attribute__ ((packed)) fibre; | |
4830 | struct { | |
4831 | __u64 identity_tag; | |
4832 | __u64 reserved; | |
4833 | } __attribute__ ((packed)) i2o; | |
4834 | struct { | |
4835 | __u32 array_number; | |
4836 | __u32 reserved1; | |
4837 | __u64 reserved2; | |
4838 | } __attribute__ ((packed)) raid; | |
4839 | struct { | |
4840 | __u8 device; | |
4841 | __u8 reserved1; | |
4842 | __u16 reserved2; | |
4843 | __u32 reserved3; | |
4844 | __u64 reserved4; | |
4845 | } __attribute__ ((packed)) sata; | |
4846 | struct { | |
4847 | __u64 reserved1; | |
4848 | __u64 reserved2; | |
4849 | } __attribute__ ((packed)) unknown; | |
4850 | } device_path; | |
4851 | __u8 reserved4; | |
4852 | __u8 checksum; | |
4853 | } __attribute__ ((packed)); | |
4854 | struct edd_info { | |
4855 | __u8 device; | |
4856 | __u8 version; | |
4857 | __u16 interface_support; | |
4858 | __u16 legacy_max_cylinder; | |
4859 | __u8 legacy_max_head; | |
4860 | __u8 legacy_sectors_per_track; | |
4861 | struct edd_device_params params; | |
4862 | } __attribute__ ((packed)); | |
4863 | struct edd { | |
4864 | unsigned int mbr_signature[16]; | |
4865 | struct edd_info edd_info[6]; | |
4866 | unsigned char mbr_signature_nr; | |
4867 | unsigned char edd_info_nr; | |
4868 | }; | |
4869 | extern struct edd edd; | |
4870 | struct e820entry { | |
4871 | __u64 addr; | |
4872 | __u64 size; | |
4873 | __u32 type; | |
4874 | } __attribute__((packed)); | |
4875 | struct e820map { | |
4876 | __u32 nr_map; | |
4877 | struct e820entry map[128]; | |
4878 | }; | |
4879 | extern struct e820map e820; | |
4880 | extern struct e820map e820_saved; | |
4881 | extern unsigned long pci_mem_start; | |
4882 | extern int e820_any_mapped(u64 start, u64 end, unsigned type); | |
4883 | extern int e820_all_mapped(u64 start, u64 end, unsigned type); | |
4884 | extern void e820_add_region(u64 start, u64 size, int type); | |
4885 | extern void e820_print_map(char *who); | |
4886 | extern int | |
4887 | sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, u32 *pnr_map); | |
4888 | extern u64 e820_update_range(u64 start, u64 size, unsigned old_type, | |
4889 | unsigned new_type); | |
4890 | extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type, | |
4891 | int checktype); | |
4892 | extern void update_e820(void); | |
4893 | extern void e820_setup_gap(void); | |
4894 | extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize, | |
4895 | unsigned long start_addr, unsigned long long end_addr); | |
4896 | struct setup_data; | |
4897 | extern void parse_e820_ext(struct setup_data *data); | |
4898 | extern void e820_mark_nosave_regions(unsigned long limit_pfn); | |
4899 | static inline __attribute__((always_inline)) void early_memtest(unsigned long start, unsigned long end) | |
4900 | { | |
4901 | } | |
4902 | extern unsigned long e820_end_of_ram_pfn(void); | |
4903 | extern unsigned long e820_end_of_low_ram_pfn(void); | |
4904 | extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); | |
4905 | void memblock_x86_fill(void); | |
4906 | void memblock_find_dma_reserve(void); | |
4907 | extern void finish_e820_parsing(void); | |
4908 | extern void e820_reserve_resources(void); | |
4909 | extern void e820_reserve_resources_late(void); | |
4910 | extern void setup_memory_map(void); | |
4911 | extern char *default_machine_specific_memory_setup(void); | |
4912 | static inline __attribute__((always_inline)) bool is_ISA_range(u64 s, u64 e) | |
4913 | { | |
4914 | return s >= 0xa0000 && e <= 0x100000; | |
4915 | } | |
4916 | struct resource { | |
4917 | resource_size_t start; | |
4918 | resource_size_t end; | |
4919 | const char *name; | |
4920 | unsigned long flags; | |
4921 | struct resource *parent, *sibling, *child; | |
4922 | }; | |
4923 | struct resource_list { | |
4924 | struct resource_list *next; | |
4925 | struct resource *res; | |
4926 | struct pci_dev *dev; | |
4927 | }; | |
4928 | extern struct resource ioport_resource; | |
4929 | extern struct resource iomem_resource; | |
4930 | extern struct resource *request_resource_conflict(struct resource *root, struct resource *new); | |
4931 | extern int request_resource(struct resource *root, struct resource *new); | |
4932 | extern int release_resource(struct resource *new); | |
4933 | void release_child_resources(struct resource *new); | |
4934 | extern void reserve_region_with_split(struct resource *root, | |
4935 | resource_size_t start, resource_size_t end, | |
4936 | const char *name); | |
4937 | extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new); | |
4938 | extern int insert_resource(struct resource *parent, struct resource *new); | |
4939 | extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); | |
4940 | extern void arch_remove_reservations(struct resource *avail); | |
4941 | extern int allocate_resource(struct resource *root, struct resource *new, | |
4942 | resource_size_t size, resource_size_t min, | |
4943 | resource_size_t max, resource_size_t align, | |
4944 | resource_size_t (*alignf)(void *, | |
4945 | const struct resource *, | |
4946 | resource_size_t, | |
4947 | resource_size_t), | |
4948 | void *alignf_data); | |
4949 | int adjust_resource(struct resource *res, resource_size_t start, | |
4950 | resource_size_t size); | |
4951 | resource_size_t resource_alignment(struct resource *res); | |
4952 | static inline __attribute__((always_inline)) resource_size_t resource_size(const struct resource *res) | |
4953 | { | |
4954 | return res->end - res->start + 1; | |
4955 | } | |
4956 | static inline __attribute__((always_inline)) unsigned long resource_type(const struct resource *res) | |
4957 | { | |
4958 | return res->flags & 0x00001f00; | |
4959 | } | |
4960 | extern struct resource * __request_region(struct resource *, | |
4961 | resource_size_t start, | |
4962 | resource_size_t n, | |
4963 | const char *name, int flags); | |
4964 | extern int __check_region(struct resource *, resource_size_t, resource_size_t); | |
4965 | extern void __release_region(struct resource *, resource_size_t, | |
4966 | resource_size_t); | |
4967 | static inline __attribute__((always_inline)) int __attribute__((deprecated)) check_region(resource_size_t s, | |
4968 | resource_size_t n) | |
4969 | { | |
4970 | return __check_region(&ioport_resource, s, n); | |
4971 | } | |
4972 | struct device; | |
4973 | extern struct resource * __devm_request_region(struct device *dev, | |
4974 | struct resource *parent, resource_size_t start, | |
4975 | resource_size_t n, const char *name); | |
4976 | extern void __devm_release_region(struct device *dev, struct resource *parent, | |
4977 | resource_size_t start, resource_size_t n); | |
4978 | extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); | |
4979 | extern int iomem_is_exclusive(u64 addr); | |
4980 | extern int | |
4981 | walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, | |
4982 | void *arg, int (*func)(unsigned long, unsigned long, void *)); | |
4983 | struct ist_info { | |
4984 | __u32 signature; | |
4985 | __u32 command; | |
4986 | __u32 event; | |
4987 | __u32 perf_level; | |
4988 | }; | |
4989 | extern struct ist_info ist_info; | |
4990 | struct edid_info { | |
4991 | unsigned char dummy[128]; | |
4992 | }; | |
4993 | extern struct edid_info edid_info; | |
4994 | struct setup_data { | |
4995 | __u64 next; | |
4996 | __u32 type; | |
4997 | __u32 len; | |
4998 | __u8 data[0]; | |
4999 | }; | |
5000 | struct setup_header { | |
5001 | __u8 setup_sects; | |
5002 | __u16 root_flags; | |
5003 | __u32 syssize; | |
5004 | __u16 ram_size; | |
5005 | __u16 vid_mode; | |
5006 | __u16 root_dev; | |
5007 | __u16 boot_flag; | |
5008 | __u16 jump; | |
5009 | __u32 header; | |
5010 | __u16 version; | |
5011 | __u32 realmode_swtch; | |
5012 | __u16 start_sys; | |
5013 | __u16 kernel_version; | |
5014 | __u8 type_of_loader; | |
5015 | __u8 loadflags; | |
5016 | __u16 setup_move_size; | |
5017 | __u32 code32_start; | |
5018 | __u32 ramdisk_image; | |
5019 | __u32 ramdisk_size; | |
5020 | __u32 bootsect_kludge; | |
5021 | __u16 heap_end_ptr; | |
5022 | __u8 ext_loader_ver; | |
5023 | __u8 ext_loader_type; | |
5024 | __u32 cmd_line_ptr; | |
5025 | __u32 initrd_addr_max; | |
5026 | __u32 kernel_alignment; | |
5027 | __u8 relocatable_kernel; | |
5028 | __u8 _pad2[3]; | |
5029 | __u32 cmdline_size; | |
5030 | __u32 hardware_subarch; | |
5031 | __u64 hardware_subarch_data; | |
5032 | __u32 payload_offset; | |
5033 | __u32 payload_length; | |
5034 | __u64 setup_data; | |
5035 | } __attribute__((packed)); | |
5036 | struct sys_desc_table { | |
5037 | __u16 length; | |
5038 | __u8 table[14]; | |
5039 | }; | |
5040 | struct olpc_ofw_header { | |
5041 | __u32 ofw_magic; | |
5042 | __u32 ofw_version; | |
5043 | __u32 cif_handler; | |
5044 | __u32 irq_desc_table; | |
5045 | } __attribute__((packed)); | |
5046 | struct efi_info { | |
5047 | __u32 efi_loader_signature; | |
5048 | __u32 efi_systab; | |
5049 | __u32 efi_memdesc_size; | |
5050 | __u32 efi_memdesc_version; | |
5051 | __u32 efi_memmap; | |
5052 | __u32 efi_memmap_size; | |
5053 | __u32 efi_systab_hi; | |
5054 | __u32 efi_memmap_hi; | |
5055 | }; | |
5056 | struct boot_params { | |
5057 | struct screen_info screen_info; | |
5058 | struct apm_bios_info apm_bios_info; | |
5059 | __u8 _pad2[4]; | |
5060 | __u64 tboot_addr; | |
5061 | struct ist_info ist_info; | |
5062 | __u8 _pad3[16]; | |
5063 | __u8 hd0_info[16]; | |
5064 | __u8 hd1_info[16]; | |
5065 | struct sys_desc_table sys_desc_table; | |
5066 | struct olpc_ofw_header olpc_ofw_header; | |
5067 | __u8 _pad4[128]; | |
5068 | struct edid_info edid_info; | |
5069 | struct efi_info efi_info; | |
5070 | __u32 alt_mem_k; | |
5071 | __u32 scratch; | |
5072 | __u8 e820_entries; | |
5073 | __u8 eddbuf_entries; | |
5074 | __u8 edd_mbr_sig_buf_entries; | |
5075 | __u8 _pad6[6]; | |
5076 | struct setup_header hdr; | |
5077 | __u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)]; | |
5078 | __u32 edd_mbr_sig_buffer[16]; | |
5079 | struct e820entry e820_map[128]; | |
5080 | __u8 _pad8[48]; | |
5081 | struct edd_info eddbuf[6]; | |
5082 | __u8 _pad9[276]; | |
5083 | } __attribute__((packed)); | |
5084 | enum { | |
5085 | X86_SUBARCH_PC = 0, | |
5086 | X86_SUBARCH_LGUEST, | |
5087 | X86_SUBARCH_XEN, | |
5088 | X86_SUBARCH_MRST, | |
5089 | X86_SUBARCH_CE4100, | |
5090 | X86_NR_SUBARCHS, | |
5091 | }; | |
5092 | struct mpc_bus; | |
5093 | struct mpc_cpu; | |
5094 | struct mpc_table; | |
5095 | struct x86_init_mpparse { | |
5096 | void (*mpc_record)(unsigned int mode); | |
5097 | void (*setup_ioapic_ids)(void); | |
5098 | int (*mpc_apic_id)(struct mpc_cpu *m); | |
5099 | void (*smp_read_mpc_oem)(struct mpc_table *mpc); | |
5100 | void (*mpc_oem_pci_bus)(struct mpc_bus *m); | |
5101 | void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name); | |
5102 | void (*find_smp_config)(void); | |
5103 | void (*get_smp_config)(unsigned int early); | |
5104 | }; | |
5105 | struct x86_init_resources { | |
5106 | void (*probe_roms)(void); | |
5107 | void (*reserve_resources)(void); | |
5108 | char *(*memory_setup)(void); | |
5109 | }; | |
5110 | struct x86_init_irqs { | |
5111 | void (*pre_vector_init)(void); | |
5112 | void (*intr_init)(void); | |
5113 | void (*trap_init)(void); | |
5114 | }; | |
5115 | struct x86_init_oem { | |
5116 | void (*arch_setup)(void); | |
5117 | void (*banner)(void); | |
5118 | }; | |
5119 | struct x86_init_mapping { | |
5120 | void (*pagetable_reserve)(u64 start, u64 end); | |
5121 | }; | |
5122 | struct x86_init_paging { | |
5123 | void (*pagetable_setup_start)(pgd_t *base); | |
5124 | void (*pagetable_setup_done)(pgd_t *base); | |
5125 | }; | |
5126 | struct x86_init_timers { | |
5127 | void (*setup_percpu_clockev)(void); | |
5128 | void (*tsc_pre_init)(void); | |
5129 | void (*timer_init)(void); | |
5130 | void (*wallclock_init)(void); | |
5131 | }; | |
5132 | struct x86_init_iommu { | |
5133 | int (*iommu_init)(void); | |
5134 | }; | |
5135 | struct x86_init_pci { | |
5136 | int (*arch_init)(void); | |
5137 | int (*init)(void); | |
5138 | void (*init_irq)(void); | |
5139 | void (*fixup_irqs)(void); | |
5140 | }; | |
5141 | struct x86_init_ops { | |
5142 | struct x86_init_resources resources; | |
5143 | struct x86_init_mpparse mpparse; | |
5144 | struct x86_init_irqs irqs; | |
5145 | struct x86_init_oem oem; | |
5146 | struct x86_init_mapping mapping; | |
5147 | struct x86_init_paging paging; | |
5148 | struct x86_init_timers timers; | |
5149 | struct x86_init_iommu iommu; | |
5150 | struct x86_init_pci pci; | |
5151 | }; | |
5152 | struct x86_cpuinit_ops { | |
5153 | void (*setup_percpu_clockev)(void); | |
5154 | }; | |
5155 | struct x86_platform_ops { | |
5156 | unsigned long (*calibrate_tsc)(void); | |
5157 | unsigned long (*get_wallclock)(void); | |
5158 | int (*set_wallclock)(unsigned long nowtime); | |
5159 | void (*iommu_shutdown)(void); | |
5160 | bool (*is_untracked_pat_range)(u64 start, u64 end); | |
5161 | void (*nmi_init)(void); | |
5162 | int (*i8042_detect)(void); | |
5163 | }; | |
5164 | struct pci_dev; | |
5165 | struct x86_msi_ops { | |
5166 | int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); | |
5167 | void (*teardown_msi_irq)(unsigned int irq); | |
5168 | void (*teardown_msi_irqs)(struct pci_dev *dev); | |
5169 | }; | |
5170 | extern struct x86_init_ops x86_init; | |
5171 | extern struct x86_cpuinit_ops x86_cpuinit; | |
5172 | extern struct x86_platform_ops x86_platform; | |
5173 | extern struct x86_msi_ops x86_msi; | |
5174 | extern void x86_init_noop(void); | |
5175 | extern void x86_init_uint_noop(unsigned int unused); | |
5176 | struct local_apic { | |
5177 | struct { unsigned int __reserved[4]; } __reserved_01; | |
5178 | struct { unsigned int __reserved[4]; } __reserved_02; | |
5179 | struct { | |
5180 | unsigned int __reserved_1 : 24, | |
5181 | phys_apic_id : 4, | |
5182 | __reserved_2 : 4; | |
5183 | unsigned int __reserved[3]; | |
5184 | } id; | |
5185 | const | |
5186 | struct { | |
5187 | unsigned int version : 8, | |
5188 | __reserved_1 : 8, | |
5189 | max_lvt : 8, | |
5190 | __reserved_2 : 8; | |
5191 | unsigned int __reserved[3]; | |
5192 | } version; | |
5193 | struct { unsigned int __reserved[4]; } __reserved_03; | |
5194 | struct { unsigned int __reserved[4]; } __reserved_04; | |
5195 | struct { unsigned int __reserved[4]; } __reserved_05; | |
5196 | struct { unsigned int __reserved[4]; } __reserved_06; | |
5197 | struct { | |
5198 | unsigned int priority : 8, | |
5199 | __reserved_1 : 24; | |
5200 | unsigned int __reserved_2[3]; | |
5201 | } tpr; | |
5202 | const | |
5203 | struct { | |
5204 | unsigned int priority : 8, | |
5205 | __reserved_1 : 24; | |
5206 | unsigned int __reserved_2[3]; | |
5207 | } apr; | |
5208 | const | |
5209 | struct { | |
5210 | unsigned int priority : 8, | |
5211 | __reserved_1 : 24; | |
5212 | unsigned int __reserved_2[3]; | |
5213 | } ppr; | |
5214 | struct { | |
5215 | unsigned int eoi; | |
5216 | unsigned int __reserved[3]; | |
5217 | } eoi; | |
5218 | struct { unsigned int __reserved[4]; } __reserved_07; | |
5219 | struct { | |
5220 | unsigned int __reserved_1 : 24, | |
5221 | logical_dest : 8; | |
5222 | unsigned int __reserved_2[3]; | |
5223 | } ldr; | |
5224 | struct { | |
5225 | unsigned int __reserved_1 : 28, | |
5226 | model : 4; | |
5227 | unsigned int __reserved_2[3]; | |
5228 | } dfr; | |
5229 | struct { | |
5230 | unsigned int spurious_vector : 8, | |
5231 | apic_enabled : 1, | |
5232 | focus_cpu : 1, | |
5233 | __reserved_2 : 22; | |
5234 | unsigned int __reserved_3[3]; | |
5235 | } svr; | |
5236 | struct { | |
5237 | unsigned int bitfield; | |
5238 | unsigned int __reserved[3]; | |
5239 | } isr [8]; | |
5240 | struct { | |
5241 | unsigned int bitfield; | |
5242 | unsigned int __reserved[3]; | |
5243 | } tmr [8]; | |
5244 | struct { | |
5245 | unsigned int bitfield; | |
5246 | unsigned int __reserved[3]; | |
5247 | } irr [8]; | |
5248 | union { | |
5249 | struct { | |
5250 | unsigned int send_cs_error : 1, | |
5251 | receive_cs_error : 1, | |
5252 | send_accept_error : 1, | |
5253 | receive_accept_error : 1, | |
5254 | __reserved_1 : 1, | |
5255 | send_illegal_vector : 1, | |
5256 | receive_illegal_vector : 1, | |
5257 | illegal_register_address : 1, | |
5258 | __reserved_2 : 24; | |
5259 | unsigned int __reserved_3[3]; | |
5260 | } error_bits; | |
5261 | struct { | |
5262 | unsigned int errors; | |
5263 | unsigned int __reserved_3[3]; | |
5264 | } all_errors; | |
5265 | } esr; | |
5266 | struct { unsigned int __reserved[4]; } __reserved_08; | |
5267 | struct { unsigned int __reserved[4]; } __reserved_09; | |
5268 | struct { unsigned int __reserved[4]; } __reserved_10; | |
5269 | struct { unsigned int __reserved[4]; } __reserved_11; | |
5270 | struct { unsigned int __reserved[4]; } __reserved_12; | |
5271 | struct { unsigned int __reserved[4]; } __reserved_13; | |
5272 | struct { unsigned int __reserved[4]; } __reserved_14; | |
5273 | struct { | |
5274 | unsigned int vector : 8, | |
5275 | delivery_mode : 3, | |
5276 | destination_mode : 1, | |
5277 | delivery_status : 1, | |
5278 | __reserved_1 : 1, | |
5279 | level : 1, | |
5280 | trigger : 1, | |
5281 | __reserved_2 : 2, | |
5282 | shorthand : 2, | |
5283 | __reserved_3 : 12; | |
5284 | unsigned int __reserved_4[3]; | |
5285 | } icr1; | |
5286 | struct { | |
5287 | union { | |
5288 | unsigned int __reserved_1 : 24, | |
5289 | phys_dest : 4, | |
5290 | __reserved_2 : 4; | |
5291 | unsigned int __reserved_3 : 24, | |
5292 | logical_dest : 8; | |
5293 | } dest; | |
5294 | unsigned int __reserved_4[3]; | |
5295 | } icr2; | |
5296 | struct { | |
5297 | unsigned int vector : 8, | |
5298 | __reserved_1 : 4, | |
5299 | delivery_status : 1, | |
5300 | __reserved_2 : 3, | |
5301 | mask : 1, | |
5302 | timer_mode : 1, | |
5303 | __reserved_3 : 14; | |
5304 | unsigned int __reserved_4[3]; | |
5305 | } lvt_timer; | |
5306 | struct { | |
5307 | unsigned int vector : 8, | |
5308 | delivery_mode : 3, | |
5309 | __reserved_1 : 1, | |
5310 | delivery_status : 1, | |
5311 | __reserved_2 : 3, | |
5312 | mask : 1, | |
5313 | __reserved_3 : 15; | |
5314 | unsigned int __reserved_4[3]; | |
5315 | } lvt_thermal; | |
5316 | struct { | |
5317 | unsigned int vector : 8, | |
5318 | delivery_mode : 3, | |
5319 | __reserved_1 : 1, | |
5320 | delivery_status : 1, | |
5321 | __reserved_2 : 3, | |
5322 | mask : 1, | |
5323 | __reserved_3 : 15; | |
5324 | unsigned int __reserved_4[3]; | |
5325 | } lvt_pc; | |
5326 | struct { | |
5327 | unsigned int vector : 8, | |
5328 | delivery_mode : 3, | |
5329 | __reserved_1 : 1, | |
5330 | delivery_status : 1, | |
5331 | polarity : 1, | |
5332 | remote_irr : 1, | |
5333 | trigger : 1, | |
5334 | mask : 1, | |
5335 | __reserved_2 : 15; | |
5336 | unsigned int __reserved_3[3]; | |
5337 | } lvt_lint0; | |
5338 | struct { | |
5339 | unsigned int vector : 8, | |
5340 | delivery_mode : 3, | |
5341 | __reserved_1 : 1, | |
5342 | delivery_status : 1, | |
5343 | polarity : 1, | |
5344 | remote_irr : 1, | |
5345 | trigger : 1, | |
5346 | mask : 1, | |
5347 | __reserved_2 : 15; | |
5348 | unsigned int __reserved_3[3]; | |
5349 | } lvt_lint1; | |
5350 | struct { | |
5351 | unsigned int vector : 8, | |
5352 | __reserved_1 : 4, | |
5353 | delivery_status : 1, | |
5354 | __reserved_2 : 3, | |
5355 | mask : 1, | |
5356 | __reserved_3 : 15; | |
5357 | unsigned int __reserved_4[3]; | |
5358 | } lvt_error; | |
5359 | struct { | |
5360 | unsigned int initial_count; | |
5361 | unsigned int __reserved_2[3]; | |
5362 | } timer_icr; | |
5363 | const | |
5364 | struct { | |
5365 | unsigned int curr_count; | |
5366 | unsigned int __reserved_2[3]; | |
5367 | } timer_ccr; | |
5368 | struct { unsigned int __reserved[4]; } __reserved_16; | |
5369 | struct { unsigned int __reserved[4]; } __reserved_17; | |
5370 | struct { unsigned int __reserved[4]; } __reserved_18; | |
5371 | struct { unsigned int __reserved[4]; } __reserved_19; | |
5372 | struct { | |
5373 | unsigned int divisor : 4, | |
5374 | __reserved_1 : 28; | |
5375 | unsigned int __reserved_2[3]; | |
5376 | } timer_dcr; | |
5377 | struct { unsigned int __reserved[4]; } __reserved_20; | |
5378 | } __attribute__ ((packed)); | |
5379 | enum ioapic_irq_destination_types { | |
5380 | dest_Fixed = 0, | |
5381 | dest_LowestPrio = 1, | |
5382 | dest_SMI = 2, | |
5383 | dest__reserved_1 = 3, | |
5384 | dest_NMI = 4, | |
5385 | dest_INIT = 5, | |
5386 | dest__reserved_2 = 6, | |
5387 | dest_ExtINT = 7 | |
5388 | }; | |
5389 | extern int apic_version[]; | |
5390 | extern int pic_mode; | |
5391 | extern unsigned int def_to_bigsmp; | |
5392 | extern unsigned long mp_bus_not_pci[(((260) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; | |
5393 | extern unsigned int boot_cpu_physical_apicid; | |
5394 | extern unsigned int max_physical_apicid; | |
5395 | extern int mpc_default_type; | |
5396 | extern unsigned long mp_lapic_addr; | |
5397 | extern int smp_found_config; | |
5398 | static inline __attribute__((always_inline)) void get_smp_config(void) | |
5399 | { | |
5400 | x86_init.mpparse.get_smp_config(0); | |
5401 | } | |
5402 | static inline __attribute__((always_inline)) void early_get_smp_config(void) | |
5403 | { | |
5404 | x86_init.mpparse.get_smp_config(1); | |
5405 | } | |
5406 | static inline __attribute__((always_inline)) void find_smp_config(void) | |
5407 | { | |
5408 | x86_init.mpparse.find_smp_config(); | |
5409 | } | |
5410 | extern void early_reserve_e820_mpc_new(void); | |
5411 | extern int enable_update_mptable; | |
5412 | extern int default_mpc_apic_id(struct mpc_cpu *m); | |
5413 | extern void default_smp_read_mpc_oem(struct mpc_table *mpc); | |
5414 | extern void default_mpc_oem_bus_info(struct mpc_bus *m, char *str); | |
5415 | extern void default_find_smp_config(void); | |
5416 | extern void default_get_smp_config(unsigned int early); | |
5417 | void __attribute__ ((__section__(".cpuinit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) generic_processor_info(int apicid, int version); | |
5418 | extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); | |
5419 | extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, | |
5420 | u32 gsi); | |
5421 | extern void mp_config_acpi_legacy_irqs(void); | |
5422 | struct device; | |
5423 | extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level, | |
5424 | int active_high_low); | |
5425 | struct physid_mask { | |
5426 | unsigned long mask[(((256) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; | |
5427 | }; | |
5428 | typedef struct physid_mask physid_mask_t; | |
5429 | static inline __attribute__((always_inline)) unsigned long physids_coerce(physid_mask_t *map) | |
5430 | { | |
5431 | return map->mask[0]; | |
5432 | } | |
5433 | static inline __attribute__((always_inline)) void physids_promote(unsigned long physids, physid_mask_t *map) | |
5434 | { | |
5435 | bitmap_zero((*map).mask, 256); | |
5436 | map->mask[0] = physids; | |
5437 | } | |
5438 | static inline __attribute__((always_inline)) void physid_set_mask_of_physid(int physid, physid_mask_t *map) | |
5439 | { | |
5440 | bitmap_zero((*map).mask, 256); | |
5441 | set_bit(physid, (*map).mask); | |
5442 | } | |
5443 | extern physid_mask_t phys_cpu_present_map; | |
5444 | extern int generic_mps_oem_check(struct mpc_table *, char *, char *); | |
5445 | extern int default_acpi_madt_oem_check(char *, char *); | |
5446 | extern void local_bh_disable(void); | |
5447 | extern void _local_bh_enable(void); | |
5448 | extern void local_bh_enable(void); | |
5449 | extern void local_bh_enable_ip(unsigned long ip); | |
5450 | typedef struct arch_spinlock { | |
5451 | unsigned int slock; | |
5452 | } arch_spinlock_t; | |
5453 | typedef struct { | |
5454 | unsigned int lock; | |
5455 | } arch_rwlock_t; | |
5456 | typedef struct raw_spinlock { | |
5457 | arch_spinlock_t raw_lock; | |
5458 | unsigned int magic, owner_cpu; | |
5459 | void *owner; | |
5460 | struct lockdep_map dep_map; | |
5461 | } raw_spinlock_t; | |
5462 | typedef struct spinlock { | |
5463 | union { | |
5464 | struct raw_spinlock rlock; | |
5465 | struct { | |
5466 | u8 __padding[(__builtin_offsetof(struct raw_spinlock,dep_map))]; | |
5467 | struct lockdep_map dep_map; | |
5468 | }; | |
5469 | }; | |
5470 | } spinlock_t; | |
5471 | typedef struct { | |
5472 | arch_rwlock_t raw_lock; | |
5473 | unsigned int magic, owner_cpu; | |
5474 | void *owner; | |
5475 | struct lockdep_map dep_map; | |
5476 | } rwlock_t; | |
5477 | static inline __attribute__((always_inline)) __attribute__((always_inline)) void __ticket_spin_lock(arch_spinlock_t *lock) | |
5478 | { | |
5479 | short inc = 0x0100; | |
5480 | asm volatile ( | |
5481 | ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "xaddw %w0, %1\n" | |
5482 | "1:\t" | |
5483 | "cmpb %h0, %b0\n\t" | |
5484 | "je 2f\n\t" | |
5485 | "rep ; nop\n\t" | |
5486 | "movb %1, %b0\n\t" | |
5487 | "jmp 1b\n" | |
5488 | "2:" | |
5489 | : "+Q" (inc), "+m" (lock->slock) | |
5490 | : | |
5491 | : "memory", "cc"); | |
5492 | } | |
5493 | static inline __attribute__((always_inline)) __attribute__((always_inline)) int __ticket_spin_trylock(arch_spinlock_t *lock) | |
5494 | { | |
5495 | int tmp, new; | |
5496 | asm volatile("movzwl %2, %0\n\t" | |
5497 | "cmpb %h0,%b0\n\t" | |
5498 | "leal 0x100(%" "k" "0), %1\n\t" | |
5499 | "jne 1f\n\t" | |
5500 | ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgw %w1,%2\n\t" | |
5501 | "1:" | |
5502 | "sete %b1\n\t" | |
5503 | "movzbl %b1,%0\n\t" | |
5504 | : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) | |
5505 | : | |
5506 | : "memory", "cc"); | |
5507 | return tmp; | |
5508 | } | |
5509 | static inline __attribute__((always_inline)) __attribute__((always_inline)) void __ticket_spin_unlock(arch_spinlock_t *lock) | |
5510 | { | |
5511 | asm volatile( "incb %0" | |
5512 | : "+m" (lock->slock) | |
5513 | : | |
5514 | : "memory", "cc"); | |
5515 | } | |
5516 | static inline __attribute__((always_inline)) int __ticket_spin_is_locked(arch_spinlock_t *lock) | |
5517 | { | |
5518 | int tmp = (*(volatile typeof(lock->slock) *)&(lock->slock)); | |
5519 | return !!(((tmp >> 8) ^ tmp) & ((1 << 8) - 1)); | |
5520 | } | |
5521 | static inline __attribute__((always_inline)) int __ticket_spin_is_contended(arch_spinlock_t *lock) | |
5522 | { | |
5523 | int tmp = (*(volatile typeof(lock->slock) *)&(lock->slock)); | |
5524 | return (((tmp >> 8) - tmp) & ((1 << 8) - 1)) > 1; | |
5525 | } | |
5526 | static inline __attribute__((always_inline)) int arch_spin_is_locked(arch_spinlock_t *lock) | |
5527 | { | |
5528 | return __ticket_spin_is_locked(lock); | |
5529 | } | |
5530 | static inline __attribute__((always_inline)) int arch_spin_is_contended(arch_spinlock_t *lock) | |
5531 | { | |
5532 | return __ticket_spin_is_contended(lock); | |
5533 | } | |
5534 | static inline __attribute__((always_inline)) __attribute__((always_inline)) void arch_spin_lock(arch_spinlock_t *lock) | |
5535 | { | |
5536 | __ticket_spin_lock(lock); | |
5537 | } | |
5538 | static inline __attribute__((always_inline)) __attribute__((always_inline)) int arch_spin_trylock(arch_spinlock_t *lock) | |
5539 | { | |
5540 | return __ticket_spin_trylock(lock); | |
5541 | } | |
5542 | static inline __attribute__((always_inline)) __attribute__((always_inline)) void arch_spin_unlock(arch_spinlock_t *lock) | |
5543 | { | |
5544 | __ticket_spin_unlock(lock); | |
5545 | } | |
5546 | static inline __attribute__((always_inline)) __attribute__((always_inline)) void arch_spin_lock_flags(arch_spinlock_t *lock, | |
5547 | unsigned long flags) | |
5548 | { | |
5549 | arch_spin_lock(lock); | |
5550 | } | |
5551 | static inline __attribute__((always_inline)) void arch_spin_unlock_wait(arch_spinlock_t *lock) | |
5552 | { | |
5553 | while (arch_spin_is_locked(lock)) | |
5554 | cpu_relax(); | |
5555 | } | |
5556 | static inline __attribute__((always_inline)) int arch_read_can_lock(arch_rwlock_t *lock) | |
5557 | { | |
5558 | return (int)(lock)->lock > 0; | |
5559 | } | |
5560 | static inline __attribute__((always_inline)) int arch_write_can_lock(arch_rwlock_t *lock) | |
5561 | { | |
5562 | return (lock)->lock == 0x01000000; | |
5563 | } | |
5564 | static inline __attribute__((always_inline)) void arch_read_lock(arch_rwlock_t *rw) | |
5565 | { | |
5566 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " subl $1,(%0)\n\t" | |
5567 | "jns 1f\n" | |
5568 | "call __read_lock_failed\n\t" | |
5569 | "1:\n" | |
5570 | ::"a" (rw) : "memory"); | |
5571 | } | |
5572 | static inline __attribute__((always_inline)) void arch_write_lock(arch_rwlock_t *rw) | |
5573 | { | |
5574 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " subl %1,(%0)\n\t" | |
5575 | "jz 1f\n" | |
5576 | "call __write_lock_failed\n\t" | |
5577 | "1:\n" | |
5578 | ::"a" (rw), "i" (0x01000000) : "memory"); | |
5579 | } | |
5580 | static inline __attribute__((always_inline)) int arch_read_trylock(arch_rwlock_t *lock) | |
5581 | { | |
5582 | atomic_t *count = (atomic_t *)lock; | |
5583 | if (__builtin_constant_p((((atomic_sub_return(1, count)) >= 0))) ? !!(((atomic_sub_return(1, count)) >= 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/spinlock.h", .line = 271, }; ______r = !!(((atomic_sub_return(1, count)) >= 0)); ______f.miss_hit[______r]++; ______r; })) | |
5584 | return 1; | |
5585 | atomic_inc(count); | |
5586 | return 0; | |
5587 | } | |
5588 | static inline __attribute__((always_inline)) int arch_write_trylock(arch_rwlock_t *lock) | |
5589 | { | |
5590 | atomic_t *count = (atomic_t *)lock; | |
5591 | if (__builtin_constant_p(((atomic_sub_and_test(0x01000000, count)))) ? !!((atomic_sub_and_test(0x01000000, count))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/spinlock.h", .line = 281, }; ______r = !!((atomic_sub_and_test(0x01000000, count))); ______f.miss_hit[______r]++; ______r; })) | |
5592 | return 1; | |
5593 | atomic_add(0x01000000, count); | |
5594 | return 0; | |
5595 | } | |
5596 | static inline __attribute__((always_inline)) void arch_read_unlock(arch_rwlock_t *rw) | |
5597 | { | |
5598 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "incl %0" :"+m" (rw->lock) : : "memory"); | |
5599 | } | |
5600 | static inline __attribute__((always_inline)) void arch_write_unlock(arch_rwlock_t *rw) | |
5601 | { | |
5602 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "addl %1, %0" | |
5603 | : "+m" (rw->lock) : "i" (0x01000000) : "memory"); | |
5604 | } | |
5605 | static inline __attribute__((always_inline)) void smp_mb__after_lock(void) { } | |
5606 | extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, | |
5607 | struct lock_class_key *key); | |
5608 | extern void do_raw_spin_lock(raw_spinlock_t *lock) ; | |
5609 | extern int do_raw_spin_trylock(raw_spinlock_t *lock); | |
5610 | extern void do_raw_spin_unlock(raw_spinlock_t *lock) ; | |
5611 | extern void __rwlock_init(rwlock_t *lock, const char *name, | |
5612 | struct lock_class_key *key); | |
5613 | extern void do_raw_read_lock(rwlock_t *lock) ; | |
5614 | extern int do_raw_read_trylock(rwlock_t *lock); | |
5615 | extern void do_raw_read_unlock(rwlock_t *lock) ; | |
5616 | extern void do_raw_write_lock(rwlock_t *lock) ; | |
5617 | extern int do_raw_write_trylock(rwlock_t *lock); | |
5618 | extern void do_raw_write_unlock(rwlock_t *lock) ; | |
5619 | int in_lock_functions(unsigned long addr); | |
5620 | void __attribute__((section(".spinlock.text"))) _raw_spin_lock(raw_spinlock_t *lock) ; | |
5621 | void __attribute__((section(".spinlock.text"))) _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) | |
5622 | ; | |
5623 | void __attribute__((section(".spinlock.text"))) | |
5624 | _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) | |
5625 | ; | |
5626 | void __attribute__((section(".spinlock.text"))) _raw_spin_lock_bh(raw_spinlock_t *lock) ; | |
5627 | void __attribute__((section(".spinlock.text"))) _raw_spin_lock_irq(raw_spinlock_t *lock) | |
5628 | ; | |
5629 | unsigned long __attribute__((section(".spinlock.text"))) _raw_spin_lock_irqsave(raw_spinlock_t *lock) | |
5630 | ; | |
5631 | unsigned long __attribute__((section(".spinlock.text"))) | |
5632 | _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) | |
5633 | ; | |
5634 | int __attribute__((section(".spinlock.text"))) _raw_spin_trylock(raw_spinlock_t *lock); | |
5635 | int __attribute__((section(".spinlock.text"))) _raw_spin_trylock_bh(raw_spinlock_t *lock); | |
5636 | void __attribute__((section(".spinlock.text"))) _raw_spin_unlock(raw_spinlock_t *lock) ; | |
5637 | void __attribute__((section(".spinlock.text"))) _raw_spin_unlock_bh(raw_spinlock_t *lock) ; | |
5638 | void __attribute__((section(".spinlock.text"))) _raw_spin_unlock_irq(raw_spinlock_t *lock) ; | |
5639 | void __attribute__((section(".spinlock.text"))) | |
5640 | _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) | |
5641 | ; | |
5642 | static inline __attribute__((always_inline)) int __raw_spin_trylock(raw_spinlock_t *lock) | |
5643 | { | |
5644 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
5645 | if (__builtin_constant_p(((do_raw_spin_trylock(lock)))) ? !!((do_raw_spin_trylock(lock))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 89, }; ______r = !!((do_raw_spin_trylock(lock))); ______f.miss_hit[______r]++; ______r; })) { | |
5646 | lock_acquire(&lock->dep_map, 0, 1, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0)); | |
5647 | return 1; | |
5648 | } | |
5649 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 93, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 93, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 93, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 93, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
5650 | return 0; | |
5651 | } | |
5652 | static inline __attribute__((always_inline)) unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) | |
5653 | { | |
5654 | unsigned long flags; | |
5655 | do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); | |
5656 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
5657 | lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0)); | |
5658 | do_raw_spin_lock(lock); | |
5659 | return flags; | |
5660 | } | |
5661 | static inline __attribute__((always_inline)) void __raw_spin_lock_irq(raw_spinlock_t *lock) | |
5662 | { | |
5663 | do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0); | |
5664 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
5665 | lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0)); | |
5666 | do_raw_spin_lock(lock); | |
5667 | } | |
5668 | static inline __attribute__((always_inline)) void __raw_spin_lock_bh(raw_spinlock_t *lock) | |
5669 | { | |
5670 | local_bh_disable(); | |
5671 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
5672 | lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0)); | |
5673 | do_raw_spin_lock(lock); | |
5674 | } | |
5675 | static inline __attribute__((always_inline)) void __raw_spin_lock(raw_spinlock_t *lock) | |
5676 | { | |
5677 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
5678 | lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0)); | |
5679 | do_raw_spin_lock(lock); | |
5680 | } | |
5681 | static inline __attribute__((always_inline)) void __raw_spin_unlock(raw_spinlock_t *lock) | |
5682 | { | |
5683 | lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0)); | |
5684 | do_raw_spin_unlock(lock); | |
5685 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 153, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 153, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 153, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 153, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
5686 | } | |
5687 | static inline __attribute__((always_inline)) void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, | |
5688 | unsigned long flags) | |
5689 | { | |
5690 | lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0)); | |
5691 | do_raw_spin_unlock(lock); | |
5692 | do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 161, }; ______r = !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); | |
5693 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 162, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 162, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 162, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 162, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
5694 | } | |
5695 | static inline __attribute__((always_inline)) void __raw_spin_unlock_irq(raw_spinlock_t *lock) | |
5696 | { | |
5697 | lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0)); | |
5698 | do_raw_spin_unlock(lock); | |
5699 | do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0); | |
5700 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 170, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 170, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 170, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 170, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
5701 | } | |
5702 | static inline __attribute__((always_inline)) void __raw_spin_unlock_bh(raw_spinlock_t *lock) | |
5703 | { | |
5704 | lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0)); | |
5705 | do_raw_spin_unlock(lock); | |
5706 | do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); | |
5707 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | |
5708 | } | |
5709 | static inline __attribute__((always_inline)) int __raw_spin_trylock_bh(raw_spinlock_t *lock) | |
5710 | { | |
5711 | local_bh_disable(); | |
5712 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
5713 | if (__builtin_constant_p(((do_raw_spin_trylock(lock)))) ? !!((do_raw_spin_trylock(lock))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 185, }; ______r = !!((do_raw_spin_trylock(lock))); ______f.miss_hit[______r]++; ______r; })) { | |
5714 | lock_acquire(&lock->dep_map, 0, 1, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0)); | |
5715 | return 1; | |
5716 | } | |
5717 | do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); | |
5718 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | |
5719 | return 0; | |
5720 | } | |
5721 | void __attribute__((section(".spinlock.text"))) _raw_read_lock(rwlock_t *lock) ; | |
5722 | void __attribute__((section(".spinlock.text"))) _raw_write_lock(rwlock_t *lock) ; | |
5723 | void __attribute__((section(".spinlock.text"))) _raw_read_lock_bh(rwlock_t *lock) ; | |
5724 | void __attribute__((section(".spinlock.text"))) _raw_write_lock_bh(rwlock_t *lock) ; | |
5725 | void __attribute__((section(".spinlock.text"))) _raw_read_lock_irq(rwlock_t *lock) ; | |
5726 | void __attribute__((section(".spinlock.text"))) _raw_write_lock_irq(rwlock_t *lock) ; | |
5727 | unsigned long __attribute__((section(".spinlock.text"))) _raw_read_lock_irqsave(rwlock_t *lock) | |
5728 | ; | |
5729 | unsigned long __attribute__((section(".spinlock.text"))) _raw_write_lock_irqsave(rwlock_t *lock) | |
5730 | ; | |
5731 | int __attribute__((section(".spinlock.text"))) _raw_read_trylock(rwlock_t *lock); | |
5732 | int __attribute__((section(".spinlock.text"))) _raw_write_trylock(rwlock_t *lock); | |
5733 | void __attribute__((section(".spinlock.text"))) _raw_read_unlock(rwlock_t *lock) ; | |
5734 | void __attribute__((section(".spinlock.text"))) _raw_write_unlock(rwlock_t *lock) ; | |
5735 | void __attribute__((section(".spinlock.text"))) _raw_read_unlock_bh(rwlock_t *lock) ; | |
5736 | void __attribute__((section(".spinlock.text"))) _raw_write_unlock_bh(rwlock_t *lock) ; | |
5737 | void __attribute__((section(".spinlock.text"))) _raw_read_unlock_irq(rwlock_t *lock) ; | |
5738 | void __attribute__((section(".spinlock.text"))) _raw_write_unlock_irq(rwlock_t *lock) ; | |
5739 | void __attribute__((section(".spinlock.text"))) | |
5740 | _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |
5741 | ; | |
5742 | void __attribute__((section(".spinlock.text"))) | |
5743 | _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |
5744 | ; | |
5745 | static inline __attribute__((always_inline)) int __raw_read_trylock(rwlock_t *lock) | |
5746 | { | |
5747 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
5748 | if (__builtin_constant_p(((do_raw_read_trylock(lock)))) ? !!((do_raw_read_trylock(lock))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 120, }; ______r = !!((do_raw_read_trylock(lock))); ______f.miss_hit[______r]++; ______r; })) { | |
5749 | lock_acquire(&lock->dep_map, 0, 1, 2, 2, ((void *)0), (unsigned long)__builtin_return_address(0)); | |
5750 | return 1; | |
5751 | } | |
5752 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 124, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 124, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 124, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 124, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
5753 | return 0; | |
5754 | } | |
5755 | static inline __attribute__((always_inline)) int __raw_write_trylock(rwlock_t *lock) | |
5756 | { | |
5757 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
5758 | if (__builtin_constant_p(((do_raw_write_trylock(lock)))) ? !!((do_raw_write_trylock(lock))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 131, }; ______r = !!((do_raw_write_trylock(lock))); ______f.miss_hit[______r]++; ______r; })) { | |
5759 | lock_acquire(&lock->dep_map, 0, 1, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0)); | |
5760 | return 1; | |
5761 | } | |
5762 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 135, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 135, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 135, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 135, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
5763 | return 0; | |
5764 | } | |
5765 | static inline __attribute__((always_inline)) void __raw_read_lock(rwlock_t *lock) | |
5766 | { | |
5767 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
5768 | lock_acquire(&lock->dep_map, 0, 0, 2, 2, ((void *)0), (unsigned long)__builtin_return_address(0)); | |
5769 | do_raw_read_lock(lock); | |
5770 | } | |
5771 | static inline __attribute__((always_inline)) unsigned long __raw_read_lock_irqsave(rwlock_t *lock) | |
5772 | { | |
5773 | unsigned long flags; | |
5774 | do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); | |
5775 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
5776 | lock_acquire(&lock->dep_map, 0, 0, 2, 2, ((void *)0), (unsigned long)__builtin_return_address(0)); | |
5777 | (do_raw_read_lock)((lock)) | |
5778 | ; | |
5779 | return flags; | |
5780 | } | |
5781 | static inline __attribute__((always_inline)) void __raw_read_lock_irq(rwlock_t *lock) | |
5782 | { | |
5783 | do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0); | |
5784 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
5785 | lock_acquire(&lock->dep_map, 0, 0, 2, 2, ((void *)0), (unsigned long)__builtin_return_address(0)); | |
5786 | do_raw_read_lock(lock); | |
5787 | } | |
5788 | static inline __attribute__((always_inline)) void __raw_read_lock_bh(rwlock_t *lock) | |
5789 | { | |
5790 | local_bh_disable(); | |
5791 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
5792 | lock_acquire(&lock->dep_map, 0, 0, 2, 2, ((void *)0), (unsigned long)__builtin_return_address(0)); | |
5793 | do_raw_read_lock(lock); | |
5794 | } | |
5795 | static inline __attribute__((always_inline)) unsigned long __raw_write_lock_irqsave(rwlock_t *lock) | |
5796 | { | |
5797 | unsigned long flags; | |
5798 | do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); | |
5799 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
5800 | lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0)); | |
5801 | (do_raw_write_lock)((lock)) | |
5802 | ; | |
5803 | return flags; | |
5804 | } | |
5805 | static inline __attribute__((always_inline)) void __raw_write_lock_irq(rwlock_t *lock) | |
5806 | { | |
5807 | do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0); | |
5808 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
5809 | lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0)); | |
5810 | do_raw_write_lock(lock); | |
5811 | } | |
5812 | static inline __attribute__((always_inline)) void __raw_write_lock_bh(rwlock_t *lock) | |
5813 | { | |
5814 | local_bh_disable(); | |
5815 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
5816 | lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0)); | |
5817 | do_raw_write_lock(lock); | |
5818 | } | |
5819 | static inline __attribute__((always_inline)) void __raw_write_lock(rwlock_t *lock) | |
5820 | { | |
5821 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
5822 | lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0)); | |
5823 | do_raw_write_lock(lock); | |
5824 | } | |
5825 | static inline __attribute__((always_inline)) void __raw_write_unlock(rwlock_t *lock) | |
5826 | { | |
5827 | lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0)); | |
5828 | do_raw_write_unlock(lock); | |
5829 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 222, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 222, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 222, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 222, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
5830 | } | |
5831 | static inline __attribute__((always_inline)) void __raw_read_unlock(rwlock_t *lock) | |
5832 | { | |
5833 | lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0)); | |
5834 | do_raw_read_unlock(lock); | |
5835 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 229, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 229, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 229, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 229, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
5836 | } | |
5837 | static inline __attribute__((always_inline)) void | |
5838 | __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |
5839 | { | |
5840 | lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0)); | |
5841 | do_raw_read_unlock(lock); | |
5842 | do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 237, }; ______r = !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); | |
5843 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 238, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 238, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 238, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 238, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
5844 | } | |
5845 | static inline __attribute__((always_inline)) void __raw_read_unlock_irq(rwlock_t *lock) | |
5846 | { | |
5847 | lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0)); | |
5848 | do_raw_read_unlock(lock); | |
5849 | do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0); | |
5850 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 246, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 246, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 246, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 246, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
5851 | } | |
5852 | static inline __attribute__((always_inline)) void __raw_read_unlock_bh(rwlock_t *lock) | |
5853 | { | |
5854 | lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0)); | |
5855 | do_raw_read_unlock(lock); | |
5856 | do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); | |
5857 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | |
5858 | } | |
5859 | static inline __attribute__((always_inline)) void __raw_write_unlock_irqrestore(rwlock_t *lock, | |
5860 | unsigned long flags) | |
5861 | { | |
5862 | lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0)); | |
5863 | do_raw_write_unlock(lock); | |
5864 | do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 262, }; ______r = !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); | |
5865 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 263, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 263, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 263, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 263, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
5866 | } | |
5867 | static inline __attribute__((always_inline)) void __raw_write_unlock_irq(rwlock_t *lock) | |
5868 | { | |
5869 | lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0)); | |
5870 | do_raw_write_unlock(lock); | |
5871 | do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0); | |
5872 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 271, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 271, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 271, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 271, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
5873 | } | |
5874 | static inline __attribute__((always_inline)) void __raw_write_unlock_bh(rwlock_t *lock) | |
5875 | { | |
5876 | lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0)); | |
5877 | do_raw_write_unlock(lock); | |
5878 | do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); | |
5879 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | |
5880 | } | |
5881 | static inline __attribute__((always_inline)) raw_spinlock_t *spinlock_check(spinlock_t *lock) | |
5882 | { | |
5883 | return &lock->rlock; | |
5884 | } | |
5885 | static inline __attribute__((always_inline)) void spin_lock(spinlock_t *lock) | |
5886 | { | |
5887 | _raw_spin_lock(&lock->rlock); | |
5888 | } | |
5889 | static inline __attribute__((always_inline)) void spin_lock_bh(spinlock_t *lock) | |
5890 | { | |
5891 | _raw_spin_lock_bh(&lock->rlock); | |
5892 | } | |
5893 | static inline __attribute__((always_inline)) int spin_trylock(spinlock_t *lock) | |
5894 | { | |
5895 | return (_raw_spin_trylock(&lock->rlock)); | |
5896 | } | |
5897 | static inline __attribute__((always_inline)) void spin_lock_irq(spinlock_t *lock) | |
5898 | { | |
5899 | _raw_spin_lock_irq(&lock->rlock); | |
5900 | } | |
5901 | static inline __attribute__((always_inline)) void spin_unlock(spinlock_t *lock) | |
5902 | { | |
5903 | _raw_spin_unlock(&lock->rlock); | |
5904 | } | |
5905 | static inline __attribute__((always_inline)) void spin_unlock_bh(spinlock_t *lock) | |
5906 | { | |
5907 | _raw_spin_unlock_bh(&lock->rlock); | |
5908 | } | |
5909 | static inline __attribute__((always_inline)) void spin_unlock_irq(spinlock_t *lock) | |
5910 | { | |
5911 | _raw_spin_unlock_irq(&lock->rlock); | |
5912 | } | |
5913 | static inline __attribute__((always_inline)) void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | |
5914 | { | |
5915 | do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _raw_spin_unlock_irqrestore(&lock->rlock, flags); } while (0); | |
5916 | } | |
5917 | static inline __attribute__((always_inline)) int spin_trylock_bh(spinlock_t *lock) | |
5918 | { | |
5919 | return (_raw_spin_trylock_bh(&lock->rlock)); | |
5920 | } | |
5921 | static inline __attribute__((always_inline)) int spin_trylock_irq(spinlock_t *lock) | |
5922 | { | |
5923 | return ({ do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0); (_raw_spin_trylock(&lock->rlock)) ? 1 : ({ do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0); 0; }); }); | |
5924 | } | |
5925 | static inline __attribute__((always_inline)) void spin_unlock_wait(spinlock_t *lock) | |
5926 | { | |
5927 | arch_spin_unlock_wait(&(&lock->rlock)->raw_lock); | |
5928 | } | |
5929 | static inline __attribute__((always_inline)) int spin_is_locked(spinlock_t *lock) | |
5930 | { | |
5931 | return arch_spin_is_locked(&(&lock->rlock)->raw_lock); | |
5932 | } | |
5933 | static inline __attribute__((always_inline)) int spin_is_contended(spinlock_t *lock) | |
5934 | { | |
5935 | return arch_spin_is_contended(&(&lock->rlock)->raw_lock); | |
5936 | } | |
5937 | static inline __attribute__((always_inline)) int spin_can_lock(spinlock_t *lock) | |
5938 | { | |
5939 | return (!arch_spin_is_locked(&(&lock->rlock)->raw_lock)); | |
5940 | } | |
5941 | static inline __attribute__((always_inline)) void assert_spin_locked(spinlock_t *lock) | |
5942 | { | |
5943 | do { if (__builtin_constant_p((((__builtin_constant_p(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)) ? !!(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock.h", .line = 380, }; ______r = __builtin_expect(!!(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)) ? !!(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock.h", .line = 380, }; ______r = __builtin_expect(!!(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock.h", .line = 380, }; ______r = !!(((__builtin_constant_p(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)) ? !!(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock.h", .line = 380, }; ______r = __builtin_expect(!!(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/spinlock.h"), "i" (380), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
5944 | } | |
5945 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | |
5946 | typedef struct { | |
5947 | unsigned sequence; | |
5948 | spinlock_t lock; | |
5949 | } seqlock_t; | |
5950 | static inline __attribute__((always_inline)) void write_seqlock(seqlock_t *sl) | |
5951 | { | |
5952 | spin_lock(&sl->lock); | |
5953 | ++sl->sequence; | |
5954 | __asm__ __volatile__("": : :"memory"); | |
5955 | } | |
5956 | static inline __attribute__((always_inline)) void write_sequnlock(seqlock_t *sl) | |
5957 | { | |
5958 | __asm__ __volatile__("": : :"memory"); | |
5959 | sl->sequence++; | |
5960 | spin_unlock(&sl->lock); | |
5961 | } | |
5962 | static inline __attribute__((always_inline)) int write_tryseqlock(seqlock_t *sl) | |
5963 | { | |
5964 | int ret = spin_trylock(&sl->lock); | |
5965 | if (__builtin_constant_p(((ret))) ? !!((ret)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 76, }; ______r = !!((ret)); ______f.miss_hit[______r]++; ______r; })) { | |
5966 | ++sl->sequence; | |
5967 | __asm__ __volatile__("": : :"memory"); | |
5968 | } | |
5969 | return ret; | |
5970 | } | |
5971 | static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned read_seqbegin(const seqlock_t *sl) | |
5972 | { | |
5973 | unsigned ret; | |
5974 | repeat: | |
5975 | ret = (*(volatile typeof(sl->sequence) *)&(sl->sequence)); | |
5976 | if (__builtin_constant_p((((__builtin_constant_p(ret & 1) ? !!(ret & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 90, }; ______r = __builtin_expect(!!(ret & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(ret & 1) ? !!(ret & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 90, }; ______r = __builtin_expect(!!(ret & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 90, }; ______r = !!(((__builtin_constant_p(ret & 1) ? !!(ret & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 90, }; ______r = __builtin_expect(!!(ret & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) { | |
5977 | cpu_relax(); | |
5978 | goto repeat; | |
5979 | } | |
5980 | __asm__ __volatile__("": : :"memory"); | |
5981 | return ret; | |
5982 | } | |
5983 | static inline __attribute__((always_inline)) __attribute__((always_inline)) int read_seqretry(const seqlock_t *sl, unsigned start) | |
5984 | { | |
5985 | __asm__ __volatile__("": : :"memory"); | |
5986 | return (__builtin_constant_p(sl->sequence != start) ? !!(sl->sequence != start) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 108, }; ______r = __builtin_expect(!!(sl->sequence != start), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); | |
5987 | } | |
5988 | typedef struct seqcount { | |
5989 | unsigned sequence; | |
5990 | } seqcount_t; | |
5991 | static inline __attribute__((always_inline)) unsigned __read_seqcount_begin(const seqcount_t *s) | |
5992 | { | |
5993 | unsigned ret; | |
5994 | repeat: | |
5995 | ret = s->sequence; | |
5996 | if (__builtin_constant_p((((__builtin_constant_p(ret & 1) ? !!(ret & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 145, }; ______r = __builtin_expect(!!(ret & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(ret & 1) ? !!(ret & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 145, }; ______r = __builtin_expect(!!(ret & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 145, }; ______r = !!(((__builtin_constant_p(ret & 1) ? !!(ret & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 145, }; ______r = __builtin_expect(!!(ret & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) { | |
5997 | cpu_relax(); | |
5998 | goto repeat; | |
5999 | } | |
6000 | return ret; | |
6001 | } | |
6002 | static inline __attribute__((always_inline)) unsigned read_seqcount_begin(const seqcount_t *s) | |
6003 | { | |
6004 | unsigned ret = __read_seqcount_begin(s); | |
6005 | __asm__ __volatile__("": : :"memory"); | |
6006 | return ret; | |
6007 | } | |
6008 | static inline __attribute__((always_inline)) int __read_seqcount_retry(const seqcount_t *s, unsigned start) | |
6009 | { | |
6010 | return (__builtin_constant_p(s->sequence != start) ? !!(s->sequence != start) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 184, }; ______r = __builtin_expect(!!(s->sequence != start), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); | |
6011 | } | |
6012 | static inline __attribute__((always_inline)) int read_seqcount_retry(const seqcount_t *s, unsigned start) | |
6013 | { | |
6014 | __asm__ __volatile__("": : :"memory"); | |
6015 | return __read_seqcount_retry(s, start); | |
6016 | } | |
6017 | static inline __attribute__((always_inline)) void write_seqcount_begin(seqcount_t *s) | |
6018 | { | |
6019 | s->sequence++; | |
6020 | __asm__ __volatile__("": : :"memory"); | |
6021 | } | |
6022 | static inline __attribute__((always_inline)) void write_seqcount_end(seqcount_t *s) | |
6023 | { | |
6024 | __asm__ __volatile__("": : :"memory"); | |
6025 | s->sequence++; | |
6026 | } | |
6027 | static inline __attribute__((always_inline)) void write_seqcount_barrier(seqcount_t *s) | |
6028 | { | |
6029 | __asm__ __volatile__("": : :"memory"); | |
6030 | s->sequence+=2; | |
6031 | } | |
6032 | struct timespec { | |
6033 | __kernel_time_t tv_sec; | |
6034 | long tv_nsec; | |
6035 | }; | |
6036 | struct timeval { | |
6037 | __kernel_time_t tv_sec; | |
6038 | __kernel_suseconds_t tv_usec; | |
6039 | }; | |
6040 | struct timezone { | |
6041 | int tz_minuteswest; | |
6042 | int tz_dsttime; | |
6043 | }; | |
6044 | extern struct timezone sys_tz; | |
6045 | static inline __attribute__((always_inline)) int timespec_equal(const struct timespec *a, | |
6046 | const struct timespec *b) | |
6047 | { | |
6048 | return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); | |
6049 | } | |
6050 | static inline __attribute__((always_inline)) int timespec_compare(const struct timespec *lhs, const struct timespec *rhs) | |
6051 | { | |
6052 | if (__builtin_constant_p(((lhs->tv_sec < rhs->tv_sec))) ? !!((lhs->tv_sec < rhs->tv_sec)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/time.h", .line = 58, }; ______r = !!((lhs->tv_sec < rhs->tv_sec)); ______f.miss_hit[______r]++; ______r; })) | |
6053 | return -1; | |
6054 | if (__builtin_constant_p(((lhs->tv_sec > rhs->tv_sec))) ? !!((lhs->tv_sec > rhs->tv_sec)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/time.h", .line = 60, }; ______r = !!((lhs->tv_sec > rhs->tv_sec)); ______f.miss_hit[______r]++; ______r; })) | |
6055 | return 1; | |
6056 | return lhs->tv_nsec - rhs->tv_nsec; | |
6057 | } | |
6058 | static inline __attribute__((always_inline)) int timeval_compare(const struct timeval *lhs, const struct timeval *rhs) | |
6059 | { | |
6060 | if (__builtin_constant_p(((lhs->tv_sec < rhs->tv_sec))) ? !!((lhs->tv_sec < rhs->tv_sec)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/time.h", .line = 67, }; ______r = !!((lhs->tv_sec < rhs->tv_sec)); ______f.miss_hit[______r]++; ______r; })) | |
6061 | return -1; | |
6062 | if (__builtin_constant_p(((lhs->tv_sec > rhs->tv_sec))) ? !!((lhs->tv_sec > rhs->tv_sec)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/time.h", .line = 69, }; ______r = !!((lhs->tv_sec > rhs->tv_sec)); ______f.miss_hit[______r]++; ______r; })) | |
6063 | return 1; | |
6064 | return lhs->tv_usec - rhs->tv_usec; | |
6065 | } | |
6066 | extern unsigned long mktime(const unsigned int year, const unsigned int mon, | |
6067 | const unsigned int day, const unsigned int hour, | |
6068 | const unsigned int min, const unsigned int sec); | |
6069 | extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); | |
6070 | extern struct timespec timespec_add_safe(const struct timespec lhs, | |
6071 | const struct timespec rhs); | |
6072 | static inline __attribute__((always_inline)) struct timespec timespec_add(struct timespec lhs, | |
6073 | struct timespec rhs) | |
6074 | { | |
6075 | struct timespec ts_delta; | |
6076 | set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec, | |
6077 | lhs.tv_nsec + rhs.tv_nsec); | |
6078 | return ts_delta; | |
6079 | } | |
6080 | static inline __attribute__((always_inline)) struct timespec timespec_sub(struct timespec lhs, | |
6081 | struct timespec rhs) | |
6082 | { | |
6083 | struct timespec ts_delta; | |
6084 | set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec, | |
6085 | lhs.tv_nsec - rhs.tv_nsec); | |
6086 | return ts_delta; | |
6087 | } | |
6088 | extern void read_persistent_clock(struct timespec *ts); | |
6089 | extern void read_boot_clock(struct timespec *ts); | |
6090 | extern int update_persistent_clock(struct timespec now); | |
6091 | extern int no_sync_cmos_clock __attribute__((__section__(".data..read_mostly"))); | |
6092 | void timekeeping_init(void); | |
6093 | extern int timekeeping_suspended; | |
6094 | unsigned long get_seconds(void); | |
6095 | struct timespec current_kernel_time(void); | |
6096 | struct timespec __current_kernel_time(void); | |
6097 | struct timespec get_monotonic_coarse(void); | |
6098 | void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, | |
6099 | struct timespec *wtom, struct timespec *sleep); | |
6100 | void timekeeping_inject_sleeptime(struct timespec *delta); | |
6101 | static inline __attribute__((always_inline)) u32 arch_gettimeoffset(void) { return 0; } | |
6102 | extern void do_gettimeofday(struct timeval *tv); | |
6103 | extern int do_settimeofday(const struct timespec *tv); | |
6104 | extern int do_sys_settimeofday(const struct timespec *tv, | |
6105 | const struct timezone *tz); | |
6106 | extern long do_utimes(int dfd, const char *filename, struct timespec *times, int flags); | |
6107 | struct itimerval; | |
6108 | extern int do_setitimer(int which, struct itimerval *value, | |
6109 | struct itimerval *ovalue); | |
6110 | extern unsigned int alarm_setitimer(unsigned int seconds); | |
6111 | extern int do_getitimer(int which, struct itimerval *value); | |
6112 | extern void getnstimeofday(struct timespec *tv); | |
6113 | extern void getrawmonotonic(struct timespec *ts); | |
6114 | extern void getnstime_raw_and_real(struct timespec *ts_raw, | |
6115 | struct timespec *ts_real); | |
6116 | extern void getboottime(struct timespec *ts); | |
6117 | extern void monotonic_to_bootbased(struct timespec *ts); | |
6118 | extern void get_monotonic_boottime(struct timespec *ts); | |
6119 | extern struct timespec timespec_trunc(struct timespec t, unsigned gran); | |
6120 | extern int timekeeping_valid_for_hres(void); | |
6121 | extern u64 timekeeping_max_deferment(void); | |
6122 | extern void timekeeping_leap_insert(int leapsecond); | |
6123 | extern int timekeeping_inject_offset(struct timespec *ts); | |
6124 | struct tms; | |
6125 | extern void do_sys_times(struct tms *); | |
6126 | struct tm { | |
6127 | int tm_sec; | |
6128 | int tm_min; | |
6129 | int tm_hour; | |
6130 | int tm_mday; | |
6131 | int tm_mon; | |
6132 | long tm_year; | |
6133 | int tm_wday; | |
6134 | int tm_yday; | |
6135 | }; | |
6136 | void time_to_tm(time_t totalsecs, int offset, struct tm *result); | |
6137 | static inline __attribute__((always_inline)) s64 timespec_to_ns(const struct timespec *ts) | |
6138 | { | |
6139 | return ((s64) ts->tv_sec * 1000000000L) + ts->tv_nsec; | |
6140 | } | |
6141 | static inline __attribute__((always_inline)) s64 timeval_to_ns(const struct timeval *tv) | |
6142 | { | |
6143 | return ((s64) tv->tv_sec * 1000000000L) + | |
6144 | tv->tv_usec * 1000L; | |
6145 | } | |
6146 | extern struct timespec ns_to_timespec(const s64 nsec); | |
6147 | extern struct timeval ns_to_timeval(const s64 nsec); | |
6148 | static inline __attribute__((always_inline)) __attribute__((always_inline)) void timespec_add_ns(struct timespec *a, u64 ns) | |
6149 | { | |
6150 | a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, 1000000000L, &ns); | |
6151 | a->tv_nsec = ns; | |
6152 | } | |
6153 | struct itimerspec { | |
6154 | struct timespec it_interval; | |
6155 | struct timespec it_value; | |
6156 | }; | |
6157 | struct itimerval { | |
6158 | struct timeval it_interval; | |
6159 | struct timeval it_value; | |
6160 | }; | |
6161 | struct timex { | |
6162 | unsigned int modes; | |
6163 | long offset; | |
6164 | long freq; | |
6165 | long maxerror; | |
6166 | long esterror; | |
6167 | int status; | |
6168 | long constant; | |
6169 | long precision; | |
6170 | long tolerance; | |
6171 | struct timeval time; | |
6172 | long tick; | |
6173 | long ppsfreq; | |
6174 | long jitter; | |
6175 | int shift; | |
6176 | long stabil; | |
6177 | long jitcnt; | |
6178 | long calcnt; | |
6179 | long errcnt; | |
6180 | long stbcnt; | |
6181 | int tai; | |
6182 | int :32; int :32; int :32; int :32; | |
6183 | int :32; int :32; int :32; int :32; | |
6184 | int :32; int :32; int :32; | |
6185 | }; | |
6186 | typedef unsigned long long cycles_t; | |
6187 | extern unsigned int cpu_khz; | |
6188 | extern unsigned int tsc_khz; | |
6189 | extern void disable_TSC(void); | |
6190 | static inline __attribute__((always_inline)) cycles_t get_cycles(void) | |
6191 | { | |
6192 | unsigned long long ret = 0; | |
6193 | (ret = paravirt_read_tsc()); | |
6194 | return ret; | |
6195 | } | |
6196 | static inline __attribute__((always_inline)) __attribute__((always_inline)) cycles_t vget_cycles(void) | |
6197 | { | |
6198 | return (cycles_t)__native_read_tsc(); | |
6199 | } | |
6200 | extern void tsc_init(void); | |
6201 | extern void mark_tsc_unstable(char *reason); | |
6202 | extern int unsynchronized_tsc(void); | |
6203 | extern int check_tsc_unstable(void); | |
6204 | extern unsigned long native_calibrate_tsc(void); | |
6205 | extern void check_tsc_sync_source(int cpu); | |
6206 | extern void check_tsc_sync_target(void); | |
6207 | extern int notsc_setup(char *); | |
6208 | extern void save_sched_clock_state(void); | |
6209 | extern void restore_sched_clock_state(void); | |
6210 | extern unsigned long tick_usec; | |
6211 | extern unsigned long tick_nsec; | |
6212 | extern int time_status; | |
6213 | extern void ntp_init(void); | |
6214 | extern void ntp_clear(void); | |
6215 | static inline __attribute__((always_inline)) int ntp_synced(void) | |
6216 | { | |
6217 | return !(time_status & 0x0040); | |
6218 | } | |
6219 | extern u64 tick_length; | |
6220 | extern void second_overflow(void); | |
6221 | extern void update_ntp_one_tick(void); | |
6222 | extern int do_adjtimex(struct timex *); | |
6223 | extern void hardpps(const struct timespec *, const struct timespec *); | |
6224 | int read_current_timer(unsigned long *timer_val); | |
6225 | extern u64 __attribute__((section(".data"))) jiffies_64; | |
6226 | extern unsigned long volatile __attribute__((section(".data"))) jiffies; | |
6227 | u64 get_jiffies_64(void); | |
6228 | extern unsigned long preset_lpj; | |
6229 | extern unsigned int jiffies_to_msecs(const unsigned long j); | |
6230 | extern unsigned int jiffies_to_usecs(const unsigned long j); | |
6231 | extern unsigned long msecs_to_jiffies(const unsigned int m); | |
6232 | extern unsigned long usecs_to_jiffies(const unsigned int u); | |
6233 | extern unsigned long timespec_to_jiffies(const struct timespec *value); | |
6234 | extern void jiffies_to_timespec(const unsigned long jiffies, | |
6235 | struct timespec *value); | |
6236 | extern unsigned long timeval_to_jiffies(const struct timeval *value); | |
6237 | extern void jiffies_to_timeval(const unsigned long jiffies, | |
6238 | struct timeval *value); | |
6239 | extern clock_t jiffies_to_clock_t(long x); | |
6240 | extern unsigned long clock_t_to_jiffies(unsigned long x); | |
6241 | extern u64 jiffies_64_to_clock_t(u64 x); | |
6242 | extern u64 nsec_to_clock_t(u64 x); | |
6243 | extern u64 nsecs_to_jiffies64(u64 n); | |
6244 | extern unsigned long nsecs_to_jiffies(u64 n); | |
6245 | union ktime { | |
6246 | s64 tv64; | |
6247 | }; | |
6248 | typedef union ktime ktime_t; | |
6249 | static inline __attribute__((always_inline)) ktime_t ktime_set(const long secs, const unsigned long nsecs) | |
6250 | { | |
6251 | return (ktime_t) { .tv64 = (s64)secs * 1000000000L + (s64)nsecs }; | |
6252 | } | |
6253 | static inline __attribute__((always_inline)) ktime_t timespec_to_ktime(struct timespec ts) | |
6254 | { | |
6255 | return ktime_set(ts.tv_sec, ts.tv_nsec); | |
6256 | } | |
6257 | static inline __attribute__((always_inline)) ktime_t timeval_to_ktime(struct timeval tv) | |
6258 | { | |
6259 | return ktime_set(tv.tv_sec, tv.tv_usec * 1000L); | |
6260 | } | |
6261 | static inline __attribute__((always_inline)) int ktime_equal(const ktime_t cmp1, const ktime_t cmp2) | |
6262 | { | |
6263 | return cmp1.tv64 == cmp2.tv64; | |
6264 | } | |
6265 | static inline __attribute__((always_inline)) s64 ktime_to_us(const ktime_t kt) | |
6266 | { | |
6267 | struct timeval tv = ns_to_timeval((kt).tv64); | |
6268 | return (s64) tv.tv_sec * 1000000L + tv.tv_usec; | |
6269 | } | |
6270 | static inline __attribute__((always_inline)) s64 ktime_to_ms(const ktime_t kt) | |
6271 | { | |
6272 | struct timeval tv = ns_to_timeval((kt).tv64); | |
6273 | return (s64) tv.tv_sec * 1000L + tv.tv_usec / 1000L; | |
6274 | } | |
6275 | static inline __attribute__((always_inline)) s64 ktime_us_delta(const ktime_t later, const ktime_t earlier) | |
6276 | { | |
6277 | return ktime_to_us(({ (ktime_t){ .tv64 = (later).tv64 - (earlier).tv64 }; })); | |
6278 | } | |
6279 | static inline __attribute__((always_inline)) ktime_t ktime_add_us(const ktime_t kt, const u64 usec) | |
6280 | { | |
6281 | return ({ (ktime_t){ .tv64 = (kt).tv64 + (usec * 1000) }; }); | |
6282 | } | |
6283 | static inline __attribute__((always_inline)) ktime_t ktime_sub_us(const ktime_t kt, const u64 usec) | |
6284 | { | |
6285 | return ({ (ktime_t){ .tv64 = (kt).tv64 - (usec * 1000) }; }); | |
6286 | } | |
6287 | extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); | |
6288 | extern void ktime_get_ts(struct timespec *ts); | |
6289 | static inline __attribute__((always_inline)) ktime_t ns_to_ktime(u64 ns) | |
6290 | { | |
6291 | static const ktime_t ktime_zero = { .tv64 = 0 }; | |
6292 | return ({ (ktime_t){ .tv64 = (ktime_zero).tv64 + (ns) }; }); | |
6293 | } | |
6294 | enum debug_obj_state { | |
6295 | ODEBUG_STATE_NONE, | |
6296 | ODEBUG_STATE_INIT, | |
6297 | ODEBUG_STATE_INACTIVE, | |
6298 | ODEBUG_STATE_ACTIVE, | |
6299 | ODEBUG_STATE_DESTROYED, | |
6300 | ODEBUG_STATE_NOTAVAILABLE, | |
6301 | ODEBUG_STATE_MAX, | |
6302 | }; | |
6303 | struct debug_obj_descr; | |
6304 | struct debug_obj { | |
6305 | struct hlist_node node; | |
6306 | enum debug_obj_state state; | |
6307 | unsigned int astate; | |
6308 | void *object; | |
6309 | struct debug_obj_descr *descr; | |
6310 | }; | |
6311 | struct debug_obj_descr { | |
6312 | const char *name; | |
6313 | void *(*debug_hint) (void *addr); | |
6314 | int (*fixup_init) (void *addr, enum debug_obj_state state); | |
6315 | int (*fixup_activate) (void *addr, enum debug_obj_state state); | |
6316 | int (*fixup_destroy) (void *addr, enum debug_obj_state state); | |
6317 | int (*fixup_free) (void *addr, enum debug_obj_state state); | |
6318 | }; | |
6319 | static inline __attribute__((always_inline)) void | |
6320 | debug_object_init (void *addr, struct debug_obj_descr *descr) { } | |
6321 | static inline __attribute__((always_inline)) void | |
6322 | debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { } | |
6323 | static inline __attribute__((always_inline)) void | |
6324 | debug_object_activate (void *addr, struct debug_obj_descr *descr) { } | |
6325 | static inline __attribute__((always_inline)) void | |
6326 | debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { } | |
6327 | static inline __attribute__((always_inline)) void | |
6328 | debug_object_destroy (void *addr, struct debug_obj_descr *descr) { } | |
6329 | static inline __attribute__((always_inline)) void | |
6330 | debug_object_free (void *addr, struct debug_obj_descr *descr) { } | |
6331 | static inline __attribute__((always_inline)) void debug_objects_early_init(void) { } | |
6332 | static inline __attribute__((always_inline)) void debug_objects_mem_init(void) { } | |
6333 | static inline __attribute__((always_inline)) void | |
6334 | debug_check_no_obj_freed(const void *address, unsigned long size) { } | |
6335 | struct tvec_base; | |
6336 | struct timer_list { | |
6337 | struct list_head entry; | |
6338 | unsigned long expires; | |
6339 | struct tvec_base *base; | |
6340 | void (*function)(unsigned long); | |
6341 | unsigned long data; | |
6342 | int slack; | |
6343 | int start_pid; | |
6344 | void *start_site; | |
6345 | char start_comm[16]; | |
6346 | struct lockdep_map lockdep_map; | |
6347 | }; | |
6348 | extern struct tvec_base boot_tvec_bases; | |
6349 | void init_timer_key(struct timer_list *timer, | |
6350 | const char *name, | |
6351 | struct lock_class_key *key); | |
6352 | void init_timer_deferrable_key(struct timer_list *timer, | |
6353 | const char *name, | |
6354 | struct lock_class_key *key); | |
6355 | static inline __attribute__((always_inline)) void destroy_timer_on_stack(struct timer_list *timer) { } | |
6356 | static inline __attribute__((always_inline)) void init_timer_on_stack_key(struct timer_list *timer, | |
6357 | const char *name, | |
6358 | struct lock_class_key *key) | |
6359 | { | |
6360 | init_timer_key(timer, name, key); | |
6361 | } | |
6362 | static inline __attribute__((always_inline)) void setup_timer_key(struct timer_list * timer, | |
6363 | const char *name, | |
6364 | struct lock_class_key *key, | |
6365 | void (*function)(unsigned long), | |
6366 | unsigned long data) | |
6367 | { | |
6368 | timer->function = function; | |
6369 | timer->data = data; | |
6370 | init_timer_key(timer, name, key); | |
6371 | } | |
6372 | static inline __attribute__((always_inline)) void setup_timer_on_stack_key(struct timer_list *timer, | |
6373 | const char *name, | |
6374 | struct lock_class_key *key, | |
6375 | void (*function)(unsigned long), | |
6376 | unsigned long data) | |
6377 | { | |
6378 | timer->function = function; | |
6379 | timer->data = data; | |
6380 | init_timer_on_stack_key(timer, name, key); | |
6381 | } | |
6382 | extern void setup_deferrable_timer_on_stack_key(struct timer_list *timer, | |
6383 | const char *name, | |
6384 | struct lock_class_key *key, | |
6385 | void (*function)(unsigned long), | |
6386 | unsigned long data); | |
6387 | static inline __attribute__((always_inline)) int timer_pending(const struct timer_list * timer) | |
6388 | { | |
6389 | return timer->entry.next != ((void *)0); | |
6390 | } | |
6391 | extern void add_timer_on(struct timer_list *timer, int cpu); | |
6392 | extern int del_timer(struct timer_list * timer); | |
6393 | extern int mod_timer(struct timer_list *timer, unsigned long expires); | |
6394 | extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); | |
6395 | extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires); | |
6396 | extern void set_timer_slack(struct timer_list *time, int slack_hz); | |
6397 | extern unsigned long get_next_timer_interrupt(unsigned long now); | |
6398 | extern int timer_stats_active; | |
6399 | extern void init_timer_stats(void); | |
6400 | extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf, | |
6401 | void *timerf, char *comm, | |
6402 | unsigned int timer_flag); | |
6403 | extern void __timer_stats_timer_set_start_info(struct timer_list *timer, | |
6404 | void *addr); | |
6405 | static inline __attribute__((always_inline)) void timer_stats_timer_set_start_info(struct timer_list *timer) | |
6406 | { | |
6407 | if (__builtin_constant_p((((__builtin_constant_p(!timer_stats_active) ? !!(!timer_stats_active) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/timer.h", .line = 252, }; ______r = __builtin_expect(!!(!timer_stats_active), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(!timer_stats_active) ? !!(!timer_stats_active) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/timer.h", .line = 252, }; ______r = __builtin_expect(!!(!timer_stats_active), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/timer.h", .line = 252, }; ______r = !!(((__builtin_constant_p(!timer_stats_active) ? !!(!timer_stats_active) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/timer.h", .line = 252, }; ______r = __builtin_expect(!!(!timer_stats_active), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
6408 | return; | |
6409 | __timer_stats_timer_set_start_info(timer, __builtin_return_address(0)); | |
6410 | } | |
6411 | static inline __attribute__((always_inline)) void timer_stats_timer_clear_start_info(struct timer_list *timer) | |
6412 | { | |
6413 | timer->start_site = ((void *)0); | |
6414 | } | |
6415 | extern void add_timer(struct timer_list *timer); | |
6416 | extern int try_to_del_timer_sync(struct timer_list *timer); | |
6417 | extern int del_timer_sync(struct timer_list *timer); | |
6418 | extern void init_timers(void); | |
6419 | extern void run_local_timers(void); | |
6420 | struct hrtimer; | |
6421 | extern enum hrtimer_restart it_real_fn(struct hrtimer *); | |
6422 | unsigned long __round_jiffies(unsigned long j, int cpu); | |
6423 | unsigned long __round_jiffies_relative(unsigned long j, int cpu); | |
6424 | unsigned long round_jiffies(unsigned long j); | |
6425 | unsigned long round_jiffies_relative(unsigned long j); | |
6426 | unsigned long __round_jiffies_up(unsigned long j, int cpu); | |
6427 | unsigned long __round_jiffies_up_relative(unsigned long j, int cpu); | |
6428 | unsigned long round_jiffies_up(unsigned long j); | |
6429 | unsigned long round_jiffies_up_relative(unsigned long j); | |
6430 | struct workqueue_struct; | |
6431 | struct work_struct; | |
6432 | typedef void (*work_func_t)(struct work_struct *work); | |
6433 | enum { | |
6434 | WORK_STRUCT_PENDING_BIT = 0, | |
6435 | WORK_STRUCT_DELAYED_BIT = 1, | |
6436 | WORK_STRUCT_CWQ_BIT = 2, | |
6437 | WORK_STRUCT_LINKED_BIT = 3, | |
6438 | WORK_STRUCT_COLOR_SHIFT = 4, | |
6439 | WORK_STRUCT_COLOR_BITS = 4, | |
6440 | WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, | |
6441 | WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, | |
6442 | WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, | |
6443 | WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, | |
6444 | WORK_STRUCT_STATIC = 0, | |
6445 | WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, | |
6446 | WORK_NO_COLOR = WORK_NR_COLORS, | |
6447 | WORK_CPU_UNBOUND = 8, | |
6448 | WORK_CPU_NONE = 8 + 1, | |
6449 | WORK_CPU_LAST = WORK_CPU_NONE, | |
6450 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + | |
6451 | WORK_STRUCT_COLOR_BITS, | |
6452 | WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, | |
6453 | WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, | |
6454 | WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS, | |
6455 | WORK_BUSY_PENDING = 1 << 0, | |
6456 | WORK_BUSY_RUNNING = 1 << 1, | |
6457 | }; | |
6458 | struct work_struct { | |
6459 | atomic_long_t data; | |
6460 | struct list_head entry; | |
6461 | work_func_t func; | |
6462 | struct lockdep_map lockdep_map; | |
6463 | }; | |
6464 | struct delayed_work { | |
6465 | struct work_struct work; | |
6466 | struct timer_list timer; | |
6467 | }; | |
6468 | static inline __attribute__((always_inline)) struct delayed_work *to_delayed_work(struct work_struct *work) | |
6469 | { | |
6470 | return ({ const typeof( ((struct delayed_work *)0)->work ) *__mptr = (work); (struct delayed_work *)( (char *)__mptr - __builtin_offsetof(struct delayed_work,work) );}); | |
6471 | } | |
6472 | struct execute_work { | |
6473 | struct work_struct work; | |
6474 | }; | |
6475 | static inline __attribute__((always_inline)) void __init_work(struct work_struct *work, int onstack) { } | |
6476 | static inline __attribute__((always_inline)) void destroy_work_on_stack(struct work_struct *work) { } | |
6477 | static inline __attribute__((always_inline)) unsigned int work_static(struct work_struct *work) { return 0; } | |
6478 | enum { | |
6479 | WQ_NON_REENTRANT = 1 << 0, | |
6480 | WQ_UNBOUND = 1 << 1, | |
6481 | WQ_FREEZABLE = 1 << 2, | |
6482 | WQ_MEM_RECLAIM = 1 << 3, | |
6483 | WQ_HIGHPRI = 1 << 4, | |
6484 | WQ_CPU_INTENSIVE = 1 << 5, | |
6485 | WQ_DYING = 1 << 6, | |
6486 | WQ_RESCUER = 1 << 7, | |
6487 | WQ_MAX_ACTIVE = 512, | |
6488 | WQ_MAX_UNBOUND_PER_CPU = 4, | |
6489 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, | |
6490 | }; | |
6491 | extern struct workqueue_struct *system_wq; | |
6492 | extern struct workqueue_struct *system_long_wq; | |
6493 | extern struct workqueue_struct *system_nrt_wq; | |
6494 | extern struct workqueue_struct *system_unbound_wq; | |
6495 | extern struct workqueue_struct *system_freezable_wq; | |
6496 | extern struct workqueue_struct * | |
6497 | __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, | |
6498 | struct lock_class_key *key, const char *lock_name); | |
6499 | static inline __attribute__((always_inline)) struct workqueue_struct * | |
6500 | alloc_ordered_workqueue(const char *name, unsigned int flags) | |
6501 | { | |
6502 | return ({ static struct lock_class_key __key; const char *__lock_name; if (__builtin_constant_p(((__builtin_constant_p(name)))) ? !!((__builtin_constant_p(name))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/workqueue.h", .line = 337, }; ______r = !!((__builtin_constant_p(name))); ______f.miss_hit[______r]++; ______r; })) __lock_name = (name); else __lock_name = "name"; __alloc_workqueue_key((name), (WQ_UNBOUND | flags), (1), &__key, __lock_name); }); | |
6503 | } | |
6504 | extern void destroy_workqueue(struct workqueue_struct *wq); | |
6505 | extern int queue_work(struct workqueue_struct *wq, struct work_struct *work); | |
6506 | extern int queue_work_on(int cpu, struct workqueue_struct *wq, | |
6507 | struct work_struct *work); | |
6508 | extern int queue_delayed_work(struct workqueue_struct *wq, | |
6509 | struct delayed_work *work, unsigned long delay); | |
6510 | extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |
6511 | struct delayed_work *work, unsigned long delay); | |
6512 | extern void flush_workqueue(struct workqueue_struct *wq); | |
6513 | extern void flush_scheduled_work(void); | |
6514 | extern int schedule_work(struct work_struct *work); | |
6515 | extern int schedule_work_on(int cpu, struct work_struct *work); | |
6516 | extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); | |
6517 | extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, | |
6518 | unsigned long delay); | |
6519 | extern int schedule_on_each_cpu(work_func_t func); | |
6520 | extern int keventd_up(void); | |
6521 | int execute_in_process_context(work_func_t fn, struct execute_work *); | |
6522 | extern bool flush_work(struct work_struct *work); | |
6523 | extern bool flush_work_sync(struct work_struct *work); | |
6524 | extern bool cancel_work_sync(struct work_struct *work); | |
6525 | extern bool flush_delayed_work(struct delayed_work *dwork); | |
6526 | extern bool flush_delayed_work_sync(struct delayed_work *work); | |
6527 | extern bool cancel_delayed_work_sync(struct delayed_work *dwork); | |
6528 | extern void workqueue_set_max_active(struct workqueue_struct *wq, | |
6529 | int max_active); | |
6530 | extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq); | |
6531 | extern unsigned int work_cpu(struct work_struct *work); | |
6532 | extern unsigned int work_busy(struct work_struct *work); | |
6533 | static inline __attribute__((always_inline)) bool cancel_delayed_work(struct delayed_work *work) | |
6534 | { | |
6535 | bool ret; | |
6536 | ret = del_timer_sync(&work->timer); | |
6537 | if (__builtin_constant_p(((ret))) ? !!((ret)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/workqueue.h", .line = 395, }; ______r = !!((ret)); ______f.miss_hit[______r]++; ______r; })) | |
6538 | clear_bit(WORK_STRUCT_PENDING_BIT, ((unsigned long *)(&(&work->work)->data))); | |
6539 | return ret; | |
6540 | } | |
6541 | static inline __attribute__((always_inline)) bool __cancel_delayed_work(struct delayed_work *work) | |
6542 | { | |
6543 | bool ret; | |
6544 | ret = del_timer(&work->timer); | |
6545 | if (__builtin_constant_p(((ret))) ? !!((ret)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/workqueue.h", .line = 410, }; ______r = !!((ret)); ______f.miss_hit[______r]++; ______r; })) | |
6546 | clear_bit(WORK_STRUCT_PENDING_BIT, ((unsigned long *)(&(&work->work)->data))); | |
6547 | return ret; | |
6548 | } | |
6549 | static inline __attribute__((always_inline)) __attribute__((deprecated)) | |
6550 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, | |
6551 | struct delayed_work *work) | |
6552 | { | |
6553 | cancel_delayed_work_sync(work); | |
6554 | } | |
6555 | static inline __attribute__((always_inline)) __attribute__((deprecated)) | |
6556 | void cancel_rearming_delayed_work(struct delayed_work *work) | |
6557 | { | |
6558 | cancel_delayed_work_sync(work); | |
6559 | } | |
6560 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); | |
6561 | extern void freeze_workqueues_begin(void); | |
6562 | extern bool freeze_workqueues_busy(void); | |
6563 | extern void thaw_workqueues(void); | |
6564 | typedef struct __wait_queue wait_queue_t; | |
6565 | typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); | |
6566 | int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); | |
6567 | struct __wait_queue { | |
6568 | unsigned int flags; | |
6569 | void *private; | |
6570 | wait_queue_func_t func; | |
6571 | struct list_head task_list; | |
6572 | }; | |
6573 | struct wait_bit_key { | |
6574 | void *flags; | |
6575 | int bit_nr; | |
6576 | }; | |
6577 | struct wait_bit_queue { | |
6578 | struct wait_bit_key key; | |
6579 | wait_queue_t wait; | |
6580 | }; | |
6581 | struct __wait_queue_head { | |
6582 | spinlock_t lock; | |
6583 | struct list_head task_list; | |
6584 | }; | |
6585 | typedef struct __wait_queue_head wait_queue_head_t; | |
6586 | struct task_struct; | |
6587 | extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *); | |
6588 | static inline __attribute__((always_inline)) void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) | |
6589 | { | |
6590 | q->flags = 0; | |
6591 | q->private = p; | |
6592 | q->func = default_wake_function; | |
6593 | } | |
6594 | static inline __attribute__((always_inline)) void init_waitqueue_func_entry(wait_queue_t *q, | |
6595 | wait_queue_func_t func) | |
6596 | { | |
6597 | q->flags = 0; | |
6598 | q->private = ((void *)0); | |
6599 | q->func = func; | |
6600 | } | |
6601 | static inline __attribute__((always_inline)) int waitqueue_active(wait_queue_head_t *q) | |
6602 | { | |
6603 | return !list_empty(&q->task_list); | |
6604 | } | |
6605 | extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); | |
6606 | extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); | |
6607 | extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); | |
6608 | static inline __attribute__((always_inline)) void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) | |
6609 | { | |
6610 | list_add(&new->task_list, &head->task_list); | |
6611 | } | |
6612 | static inline __attribute__((always_inline)) void __add_wait_queue_exclusive(wait_queue_head_t *q, | |
6613 | wait_queue_t *wait) | |
6614 | { | |
6615 | wait->flags |= 0x01; | |
6616 | __add_wait_queue(q, wait); | |
6617 | } | |
6618 | static inline __attribute__((always_inline)) void __add_wait_queue_tail(wait_queue_head_t *head, | |
6619 | wait_queue_t *new) | |
6620 | { | |
6621 | list_add_tail(&new->task_list, &head->task_list); | |
6622 | } | |
6623 | static inline __attribute__((always_inline)) void __add_wait_queue_tail_exclusive(wait_queue_head_t *q, | |
6624 | wait_queue_t *wait) | |
6625 | { | |
6626 | wait->flags |= 0x01; | |
6627 | __add_wait_queue_tail(q, wait); | |
6628 | } | |
6629 | static inline __attribute__((always_inline)) void __remove_wait_queue(wait_queue_head_t *head, | |
6630 | wait_queue_t *old) | |
6631 | { | |
6632 | list_del(&old->task_list); | |
6633 | } | |
6634 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); | |
6635 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); | |
6636 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, | |
6637 | void *key); | |
6638 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode); | |
6639 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); | |
6640 | void __wake_up_bit(wait_queue_head_t *, void *, int); | |
6641 | int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); | |
6642 | int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); | |
6643 | void wake_up_bit(void *, int); | |
6644 | int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned); | |
6645 | int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned); | |
6646 | wait_queue_head_t *bit_waitqueue(void *, int); | |
6647 | extern void sleep_on(wait_queue_head_t *q); | |
6648 | extern long sleep_on_timeout(wait_queue_head_t *q, | |
6649 | signed long timeout); | |
6650 | extern void interruptible_sleep_on(wait_queue_head_t *q); | |
6651 | extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, | |
6652 | signed long timeout); | |
6653 | void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); | |
6654 | void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); | |
6655 | void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); | |
6656 | void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, | |
6657 | unsigned int mode, void *key); | |
6658 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | |
6659 | int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | |
6660 | static inline __attribute__((always_inline)) int wait_on_bit(void *word, int bit, | |
6661 | int (*action)(void *), unsigned mode) | |
6662 | { | |
6663 | if (__builtin_constant_p(((!(__builtin_constant_p((bit)) ? constant_test_bit((bit), (word)) : variable_test_bit((bit), (word)))))) ? !!((!(__builtin_constant_p((bit)) ? constant_test_bit((bit), (word)) : variable_test_bit((bit), (word))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/wait.h", .line = 637, }; ______r = !!((!(__builtin_constant_p((bit)) ? constant_test_bit((bit), (word)) : variable_test_bit((bit), (word))))); ______f.miss_hit[______r]++; ______r; })) | |
6664 | return 0; | |
6665 | return out_of_line_wait_on_bit(word, bit, action, mode); | |
6666 | } | |
6667 | static inline __attribute__((always_inline)) int wait_on_bit_lock(void *word, int bit, | |
6668 | int (*action)(void *), unsigned mode) | |
6669 | { | |
6670 | if (__builtin_constant_p(((!test_and_set_bit(bit, word)))) ? !!((!test_and_set_bit(bit, word))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/wait.h", .line = 661, }; ______r = !!((!test_and_set_bit(bit, word))); ______f.miss_hit[______r]++; ______r; })) | |
6671 | return 0; | |
6672 | return out_of_line_wait_on_bit_lock(word, bit, action, mode); | |
6673 | } | |
6674 | struct completion { | |
6675 | unsigned int done; | |
6676 | wait_queue_head_t wait; | |
6677 | }; | |
6678 | static inline __attribute__((always_inline)) void init_completion(struct completion *x) | |
6679 | { | |
6680 | x->done = 0; | |
6681 | do { static struct lock_class_key __key; __init_waitqueue_head((&x->wait), &__key); } while (0); | |
6682 | } | |
6683 | extern void wait_for_completion(struct completion *); | |
6684 | extern int wait_for_completion_interruptible(struct completion *x); | |
6685 | extern int wait_for_completion_killable(struct completion *x); | |
6686 | extern unsigned long wait_for_completion_timeout(struct completion *x, | |
6687 | unsigned long timeout); | |
6688 | extern long wait_for_completion_interruptible_timeout( | |
6689 | struct completion *x, unsigned long timeout); | |
6690 | extern long wait_for_completion_killable_timeout( | |
6691 | struct completion *x, unsigned long timeout); | |
6692 | extern bool try_wait_for_completion(struct completion *x); | |
6693 | extern bool completion_done(struct completion *x); | |
6694 | extern void complete(struct completion *); | |
6695 | extern void complete_all(struct completion *); | |
6696 | extern void (*pm_idle)(void); | |
6697 | extern void (*pm_power_off)(void); | |
6698 | extern void (*pm_power_off_prepare)(void); | |
6699 | struct device; | |
6700 | extern const char power_group_name[]; | |
6701 | typedef struct pm_message { | |
6702 | int event; | |
6703 | } pm_message_t; | |
6704 | struct dev_pm_ops { | |
6705 | int (*prepare)(struct device *dev); | |
6706 | void (*complete)(struct device *dev); | |
6707 | int (*suspend)(struct device *dev); | |
6708 | int (*resume)(struct device *dev); | |
6709 | int (*freeze)(struct device *dev); | |
6710 | int (*thaw)(struct device *dev); | |
6711 | int (*poweroff)(struct device *dev); | |
6712 | int (*restore)(struct device *dev); | |
6713 | int (*suspend_noirq)(struct device *dev); | |
6714 | int (*resume_noirq)(struct device *dev); | |
6715 | int (*freeze_noirq)(struct device *dev); | |
6716 | int (*thaw_noirq)(struct device *dev); | |
6717 | int (*poweroff_noirq)(struct device *dev); | |
6718 | int (*restore_noirq)(struct device *dev); | |
6719 | int (*runtime_suspend)(struct device *dev); | |
6720 | int (*runtime_resume)(struct device *dev); | |
6721 | int (*runtime_idle)(struct device *dev); | |
6722 | }; | |
6723 | extern struct dev_pm_ops generic_subsys_pm_ops; | |
6724 | enum rpm_status { | |
6725 | RPM_ACTIVE = 0, | |
6726 | RPM_RESUMING, | |
6727 | RPM_SUSPENDED, | |
6728 | RPM_SUSPENDING, | |
6729 | }; | |
6730 | enum rpm_request { | |
6731 | RPM_REQ_NONE = 0, | |
6732 | RPM_REQ_IDLE, | |
6733 | RPM_REQ_SUSPEND, | |
6734 | RPM_REQ_AUTOSUSPEND, | |
6735 | RPM_REQ_RESUME, | |
6736 | }; | |
6737 | struct wakeup_source; | |
6738 | struct dev_pm_info { | |
6739 | pm_message_t power_state; | |
6740 | unsigned int can_wakeup:1; | |
6741 | unsigned int async_suspend:1; | |
6742 | bool is_prepared:1; | |
6743 | bool is_suspended:1; | |
6744 | spinlock_t lock; | |
6745 | struct list_head entry; | |
6746 | struct completion completion; | |
6747 | struct wakeup_source *wakeup; | |
6748 | struct timer_list suspend_timer; | |
6749 | unsigned long timer_expires; | |
6750 | struct work_struct work; | |
6751 | wait_queue_head_t wait_queue; | |
6752 | atomic_t usage_count; | |
6753 | atomic_t child_count; | |
6754 | unsigned int disable_depth:3; | |
6755 | unsigned int ignore_children:1; | |
6756 | unsigned int idle_notification:1; | |
6757 | unsigned int request_pending:1; | |
6758 | unsigned int deferred_resume:1; | |
6759 | unsigned int run_wake:1; | |
6760 | unsigned int runtime_auto:1; | |
6761 | unsigned int no_callbacks:1; | |
6762 | unsigned int irq_safe:1; | |
6763 | unsigned int use_autosuspend:1; | |
6764 | unsigned int timer_autosuspends:1; | |
6765 | enum rpm_request request; | |
6766 | enum rpm_status runtime_status; | |
6767 | int runtime_error; | |
6768 | int autosuspend_delay; | |
6769 | unsigned long last_busy; | |
6770 | unsigned long active_jiffies; | |
6771 | unsigned long suspended_jiffies; | |
6772 | unsigned long accounting_timestamp; | |
6773 | void *subsys_data; | |
6774 | }; | |
6775 | extern void update_pm_runtime_accounting(struct device *dev); | |
6776 | struct dev_power_domain { | |
6777 | struct dev_pm_ops ops; | |
6778 | }; | |
6779 | extern void device_pm_lock(void); | |
6780 | extern void dpm_resume_noirq(pm_message_t state); | |
6781 | extern void dpm_resume_end(pm_message_t state); | |
6782 | extern void dpm_resume(pm_message_t state); | |
6783 | extern void dpm_complete(pm_message_t state); | |
6784 | extern void device_pm_unlock(void); | |
6785 | extern int dpm_suspend_noirq(pm_message_t state); | |
6786 | extern int dpm_suspend_start(pm_message_t state); | |
6787 | extern int dpm_suspend(pm_message_t state); | |
6788 | extern int dpm_prepare(pm_message_t state); | |
6789 | extern void __suspend_report_result(const char *function, void *fn, int ret); | |
6790 | extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); | |
6791 | extern int pm_generic_prepare(struct device *dev); | |
6792 | extern int pm_generic_suspend(struct device *dev); | |
6793 | extern int pm_generic_resume(struct device *dev); | |
6794 | extern int pm_generic_freeze(struct device *dev); | |
6795 | extern int pm_generic_thaw(struct device *dev); | |
6796 | extern int pm_generic_restore(struct device *dev); | |
6797 | extern int pm_generic_poweroff(struct device *dev); | |
6798 | extern void pm_generic_complete(struct device *dev); | |
6799 | enum dpm_order { | |
6800 | DPM_ORDER_NONE, | |
6801 | DPM_ORDER_DEV_AFTER_PARENT, | |
6802 | DPM_ORDER_PARENT_BEFORE_DEV, | |
6803 | DPM_ORDER_DEV_LAST, | |
6804 | }; | |
6805 | typedef struct { unsigned long bits[((((1 << 0)) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; } nodemask_t; | |
6806 | extern nodemask_t _unused_nodemask_arg_; | |
6807 | static inline __attribute__((always_inline)) void __node_set(int node, volatile nodemask_t *dstp) | |
6808 | { | |
6809 | set_bit(node, dstp->bits); | |
6810 | } | |
6811 | static inline __attribute__((always_inline)) void __node_clear(int node, volatile nodemask_t *dstp) | |
6812 | { | |
6813 | clear_bit(node, dstp->bits); | |
6814 | } | |
6815 | static inline __attribute__((always_inline)) void __nodes_setall(nodemask_t *dstp, int nbits) | |
6816 | { | |
6817 | bitmap_fill(dstp->bits, nbits); | |
6818 | } | |
6819 | static inline __attribute__((always_inline)) void __nodes_clear(nodemask_t *dstp, int nbits) | |
6820 | { | |
6821 | bitmap_zero(dstp->bits, nbits); | |
6822 | } | |
6823 | static inline __attribute__((always_inline)) int __node_test_and_set(int node, nodemask_t *addr) | |
6824 | { | |
6825 | return test_and_set_bit(node, addr->bits); | |
6826 | } | |
6827 | static inline __attribute__((always_inline)) void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p, | |
6828 | const nodemask_t *src2p, int nbits) | |
6829 | { | |
6830 | bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); | |
6831 | } | |
6832 | static inline __attribute__((always_inline)) void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p, | |
6833 | const nodemask_t *src2p, int nbits) | |
6834 | { | |
6835 | bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); | |
6836 | } | |
6837 | static inline __attribute__((always_inline)) void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p, | |
6838 | const nodemask_t *src2p, int nbits) | |
6839 | { | |
6840 | bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); | |
6841 | } | |
6842 | static inline __attribute__((always_inline)) void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p, | |
6843 | const nodemask_t *src2p, int nbits) | |
6844 | { | |
6845 | bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); | |
6846 | } | |
6847 | static inline __attribute__((always_inline)) void __nodes_complement(nodemask_t *dstp, | |
6848 | const nodemask_t *srcp, int nbits) | |
6849 | { | |
6850 | bitmap_complement(dstp->bits, srcp->bits, nbits); | |
6851 | } | |
6852 | static inline __attribute__((always_inline)) int __nodes_equal(const nodemask_t *src1p, | |
6853 | const nodemask_t *src2p, int nbits) | |
6854 | { | |
6855 | return bitmap_equal(src1p->bits, src2p->bits, nbits); | |
6856 | } | |
6857 | static inline __attribute__((always_inline)) int __nodes_intersects(const nodemask_t *src1p, | |
6858 | const nodemask_t *src2p, int nbits) | |
6859 | { | |
6860 | return bitmap_intersects(src1p->bits, src2p->bits, nbits); | |
6861 | } | |
6862 | static inline __attribute__((always_inline)) int __nodes_subset(const nodemask_t *src1p, | |
6863 | const nodemask_t *src2p, int nbits) | |
6864 | { | |
6865 | return bitmap_subset(src1p->bits, src2p->bits, nbits); | |
6866 | } | |
6867 | static inline __attribute__((always_inline)) int __nodes_empty(const nodemask_t *srcp, int nbits) | |
6868 | { | |
6869 | return bitmap_empty(srcp->bits, nbits); | |
6870 | } | |
6871 | static inline __attribute__((always_inline)) int __nodes_full(const nodemask_t *srcp, int nbits) | |
6872 | { | |
6873 | return bitmap_full(srcp->bits, nbits); | |
6874 | } | |
6875 | static inline __attribute__((always_inline)) int __nodes_weight(const nodemask_t *srcp, int nbits) | |
6876 | { | |
6877 | return bitmap_weight(srcp->bits, nbits); | |
6878 | } | |
6879 | static inline __attribute__((always_inline)) void __nodes_shift_right(nodemask_t *dstp, | |
6880 | const nodemask_t *srcp, int n, int nbits) | |
6881 | { | |
6882 | bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); | |
6883 | } | |
6884 | static inline __attribute__((always_inline)) void __nodes_shift_left(nodemask_t *dstp, | |
6885 | const nodemask_t *srcp, int n, int nbits) | |
6886 | { | |
6887 | bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); | |
6888 | } | |
6889 | static inline __attribute__((always_inline)) int __first_node(const nodemask_t *srcp) | |
6890 | { | |
6891 | return ({ int __min1 = ((1 << 0)); int __min2 = (find_first_bit(srcp->bits, (1 << 0))); __min1 < __min2 ? __min1: __min2; }); | |
6892 | } | |
6893 | static inline __attribute__((always_inline)) int __next_node(int n, const nodemask_t *srcp) | |
6894 | { | |
6895 | return ({ int __min1 = ((1 << 0)); int __min2 = (find_next_bit(srcp->bits, (1 << 0), n+1)); __min1 < __min2 ? __min1: __min2; }); | |
6896 | } | |
6897 | static inline __attribute__((always_inline)) void init_nodemask_of_node(nodemask_t *mask, int node) | |
6898 | { | |
6899 | __nodes_clear(&(*mask), (1 << 0)); | |
6900 | __node_set((node), &(*mask)); | |
6901 | } | |
6902 | static inline __attribute__((always_inline)) int __first_unset_node(const nodemask_t *maskp) | |
6903 | { | |
6904 | return ({ int __min1 = ((1 << 0)); int __min2 = (find_first_zero_bit(maskp->bits, (1 << 0))); __min1 < __min2 ? __min1: __min2; }) | |
6905 | ; | |
6906 | } | |
6907 | static inline __attribute__((always_inline)) int __nodemask_scnprintf(char *buf, int len, | |
6908 | const nodemask_t *srcp, int nbits) | |
6909 | { | |
6910 | return bitmap_scnprintf(buf, len, srcp->bits, nbits); | |
6911 | } | |
6912 | static inline __attribute__((always_inline)) int __nodemask_parse_user(const char *buf, int len, | |
6913 | nodemask_t *dstp, int nbits) | |
6914 | { | |
6915 | return bitmap_parse_user(buf, len, dstp->bits, nbits); | |
6916 | } | |
6917 | static inline __attribute__((always_inline)) int __nodelist_scnprintf(char *buf, int len, | |
6918 | const nodemask_t *srcp, int nbits) | |
6919 | { | |
6920 | return bitmap_scnlistprintf(buf, len, srcp->bits, nbits); | |
6921 | } | |
6922 | static inline __attribute__((always_inline)) int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits) | |
6923 | { | |
6924 | return bitmap_parselist(buf, dstp->bits, nbits); | |
6925 | } | |
6926 | static inline __attribute__((always_inline)) int __node_remap(int oldbit, | |
6927 | const nodemask_t *oldp, const nodemask_t *newp, int nbits) | |
6928 | { | |
6929 | return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits); | |
6930 | } | |
6931 | static inline __attribute__((always_inline)) void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp, | |
6932 | const nodemask_t *oldp, const nodemask_t *newp, int nbits) | |
6933 | { | |
6934 | bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); | |
6935 | } | |
6936 | static inline __attribute__((always_inline)) void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp, | |
6937 | const nodemask_t *relmapp, int nbits) | |
6938 | { | |
6939 | bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); | |
6940 | } | |
6941 | static inline __attribute__((always_inline)) void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp, | |
6942 | int sz, int nbits) | |
6943 | { | |
6944 | bitmap_fold(dstp->bits, origp->bits, sz, nbits); | |
6945 | } | |
6946 | enum node_states { | |
6947 | N_POSSIBLE, | |
6948 | N_ONLINE, | |
6949 | N_NORMAL_MEMORY, | |
6950 | N_HIGH_MEMORY, | |
6951 | N_CPU, | |
6952 | NR_NODE_STATES | |
6953 | }; | |
6954 | extern nodemask_t node_states[NR_NODE_STATES]; | |
6955 | static inline __attribute__((always_inline)) int node_state(int node, enum node_states state) | |
6956 | { | |
6957 | return node == 0; | |
6958 | } | |
6959 | static inline __attribute__((always_inline)) void node_set_state(int node, enum node_states state) | |
6960 | { | |
6961 | } | |
6962 | static inline __attribute__((always_inline)) void node_clear_state(int node, enum node_states state) | |
6963 | { | |
6964 | } | |
6965 | static inline __attribute__((always_inline)) int num_node_state(enum node_states state) | |
6966 | { | |
6967 | return 1; | |
6968 | } | |
6969 | struct nodemask_scratch { | |
6970 | nodemask_t mask1; | |
6971 | nodemask_t mask2; | |
6972 | }; | |
6973 | static inline __attribute__((always_inline)) int numa_node_id(void) | |
6974 | { | |
6975 | return 0; | |
6976 | } | |
6977 | static inline __attribute__((always_inline)) int early_cpu_to_node(int cpu) | |
6978 | { | |
6979 | return 0; | |
6980 | } | |
6981 | static inline __attribute__((always_inline)) void setup_node_to_cpumask_map(void) { } | |
6982 | extern const struct cpumask *cpu_coregroup_mask(int cpu); | |
6983 | static inline __attribute__((always_inline)) void arch_fix_phys_package_id(int num, u32 slot) | |
6984 | { | |
6985 | } | |
6986 | struct pci_bus; | |
6987 | void x86_pci_root_bus_res_quirks(struct pci_bus *b); | |
6988 | static inline __attribute__((always_inline)) int get_mp_bus_to_node(int busnum) | |
6989 | { | |
6990 | return 0; | |
6991 | } | |
6992 | static inline __attribute__((always_inline)) void set_mp_bus_to_node(int busnum, int node) | |
6993 | { | |
6994 | } | |
6995 | static inline __attribute__((always_inline)) void set_apicid_to_node(int apicid, s16 node) | |
6996 | { | |
6997 | } | |
6998 | static inline __attribute__((always_inline)) int numa_cpu_node(int cpu) | |
6999 | { | |
7000 | return (-1); | |
7001 | } | |
7002 | extern void set_highmem_pages_init(void); | |
7003 | static inline __attribute__((always_inline)) void numa_set_node(int cpu, int node) { } | |
7004 | static inline __attribute__((always_inline)) void numa_clear_node(int cpu) { } | |
7005 | static inline __attribute__((always_inline)) void init_cpu_to_node(void) { } | |
7006 | static inline __attribute__((always_inline)) void numa_add_cpu(int cpu) { } | |
7007 | static inline __attribute__((always_inline)) void numa_remove_cpu(int cpu) { } | |
7008 | struct mutex { | |
7009 | atomic_t count; | |
7010 | spinlock_t wait_lock; | |
7011 | struct list_head wait_list; | |
7012 | struct task_struct *owner; | |
7013 | const char *name; | |
7014 | void *magic; | |
7015 | struct lockdep_map dep_map; | |
7016 | }; | |
7017 | struct mutex_waiter { | |
7018 | struct list_head list; | |
7019 | struct task_struct *task; | |
7020 | void *magic; | |
7021 | }; | |
7022 | extern void mutex_destroy(struct mutex *lock); | |
7023 | extern void __mutex_init(struct mutex *lock, const char *name, | |
7024 | struct lock_class_key *key); | |
7025 | static inline __attribute__((always_inline)) int mutex_is_locked(struct mutex *lock) | |
7026 | { | |
7027 | return atomic_read(&lock->count) != 1; | |
7028 | } | |
7029 | extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); | |
7030 | extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); | |
7031 | extern int __attribute__((warn_unused_result)) mutex_lock_interruptible_nested(struct mutex *lock, | |
7032 | unsigned int subclass); | |
7033 | extern int __attribute__((warn_unused_result)) mutex_lock_killable_nested(struct mutex *lock, | |
7034 | unsigned int subclass); | |
7035 | extern int mutex_trylock(struct mutex *lock); | |
7036 | extern void mutex_unlock(struct mutex *lock); | |
7037 | extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); | |
7038 | typedef struct { | |
7039 | void *ldt; | |
7040 | int size; | |
7041 | struct mutex lock; | |
7042 | void *vdso; | |
7043 | } mm_context_t; | |
7044 | void leave_mm(int cpu); | |
7045 | enum xen_domain_type { | |
7046 | XEN_NATIVE, | |
7047 | XEN_PV_DOMAIN, | |
7048 | XEN_HVM_DOMAIN, | |
7049 | }; | |
7050 | static inline __attribute__((always_inline)) unsigned char readb(const volatile void *addr) { unsigned char ret; asm volatile("mov" "b" " %1,%0":"=q" (ret) :"m" (*(volatile unsigned char *)addr) :"memory"); return ret; } | |
7051 | static inline __attribute__((always_inline)) unsigned short readw(const volatile void *addr) { unsigned short ret; asm volatile("mov" "w" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned short *)addr) :"memory"); return ret; } | |
7052 | static inline __attribute__((always_inline)) unsigned int readl(const volatile void *addr) { unsigned int ret; asm volatile("mov" "l" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned int *)addr) :"memory"); return ret; } | |
7053 | static inline __attribute__((always_inline)) unsigned char __readb(const volatile void *addr) { unsigned char ret; asm volatile("mov" "b" " %1,%0":"=q" (ret) :"m" (*(volatile unsigned char *)addr) ); return ret; } | |
7054 | static inline __attribute__((always_inline)) unsigned short __readw(const volatile void *addr) { unsigned short ret; asm volatile("mov" "w" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned short *)addr) ); return ret; } | |
7055 | static inline __attribute__((always_inline)) unsigned int __readl(const volatile void *addr) { unsigned int ret; asm volatile("mov" "l" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned int *)addr) ); return ret; } | |
7056 | static inline __attribute__((always_inline)) void writeb(unsigned char val, volatile void *addr) { asm volatile("mov" "b" " %0,%1": :"q" (val), "m" (*(volatile unsigned char *)addr) :"memory"); } | |
7057 | static inline __attribute__((always_inline)) void writew(unsigned short val, volatile void *addr) { asm volatile("mov" "w" " %0,%1": :"r" (val), "m" (*(volatile unsigned short *)addr) :"memory"); } | |
7058 | static inline __attribute__((always_inline)) void writel(unsigned int val, volatile void *addr) { asm volatile("mov" "l" " %0,%1": :"r" (val), "m" (*(volatile unsigned int *)addr) :"memory"); } | |
7059 | static inline __attribute__((always_inline)) void __writeb(unsigned char val, volatile void *addr) { asm volatile("mov" "b" " %0,%1": :"q" (val), "m" (*(volatile unsigned char *)addr) ); } | |
7060 | static inline __attribute__((always_inline)) void __writew(unsigned short val, volatile void *addr) { asm volatile("mov" "w" " %0,%1": :"r" (val), "m" (*(volatile unsigned short *)addr) ); } | |
7061 | static inline __attribute__((always_inline)) void __writel(unsigned int val, volatile void *addr) { asm volatile("mov" "l" " %0,%1": :"r" (val), "m" (*(volatile unsigned int *)addr) ); } | |
7062 | static inline __attribute__((always_inline)) phys_addr_t virt_to_phys(volatile void *address) | |
7063 | { | |
7064 | return (((unsigned long)(address)) - ((unsigned long)(0xC0000000UL))); | |
7065 | } | |
7066 | static inline __attribute__((always_inline)) void *phys_to_virt(phys_addr_t address) | |
7067 | { | |
7068 | return ((void *)((unsigned long)(address)+((unsigned long)(0xC0000000UL)))); | |
7069 | } | |
7070 | static inline __attribute__((always_inline)) unsigned int isa_virt_to_bus(volatile void *address) | |
7071 | { | |
7072 | return (unsigned int)virt_to_phys(address); | |
7073 | } | |
7074 | extern void *ioremap_nocache(resource_size_t offset, unsigned long size); | |
7075 | extern void *ioremap_cache(resource_size_t offset, unsigned long size); | |
7076 | extern void *ioremap_prot(resource_size_t offset, unsigned long size, | |
7077 | unsigned long prot_val); | |
7078 | static inline __attribute__((always_inline)) void *ioremap(resource_size_t offset, unsigned long size) | |
7079 | { | |
7080 | return ioremap_nocache(offset, size); | |
7081 | } | |
7082 | extern void iounmap(volatile void *addr); | |
7083 | extern void set_iounmap_nonlazy(void); | |
7084 | extern unsigned int ioread8(void *); | |
7085 | extern unsigned int ioread16(void *); | |
7086 | extern unsigned int ioread16be(void *); | |
7087 | extern unsigned int ioread32(void *); | |
7088 | extern unsigned int ioread32be(void *); | |
7089 | extern void iowrite8(u8, void *); | |
7090 | extern void iowrite16(u16, void *); | |
7091 | extern void iowrite16be(u16, void *); | |
7092 | extern void iowrite32(u32, void *); | |
7093 | extern void iowrite32be(u32, void *); | |
7094 | extern void ioread8_rep(void *port, void *buf, unsigned long count); | |
7095 | extern void ioread16_rep(void *port, void *buf, unsigned long count); | |
7096 | extern void ioread32_rep(void *port, void *buf, unsigned long count); | |
7097 | extern void iowrite8_rep(void *port, const void *buf, unsigned long count); | |
7098 | extern void iowrite16_rep(void *port, const void *buf, unsigned long count); | |
7099 | extern void iowrite32_rep(void *port, const void *buf, unsigned long count); | |
7100 | extern void *ioport_map(unsigned long port, unsigned int nr); | |
7101 | extern void ioport_unmap(void *); | |
7102 | struct pci_dev; | |
7103 | extern void *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); | |
7104 | extern void pci_iounmap(struct pci_dev *dev, void *); | |
7105 | struct vm_area_struct; | |
7106 | struct vm_struct { | |
7107 | struct vm_struct *next; | |
7108 | void *addr; | |
7109 | unsigned long size; | |
7110 | unsigned long flags; | |
7111 | struct page **pages; | |
7112 | unsigned int nr_pages; | |
7113 | phys_addr_t phys_addr; | |
7114 | void *caller; | |
7115 | }; | |
7116 | extern void vm_unmap_ram(const void *mem, unsigned int count); | |
7117 | extern void *vm_map_ram(struct page **pages, unsigned int count, | |
7118 | int node, pgprot_t prot); | |
7119 | extern void vm_unmap_aliases(void); | |
7120 | extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vmalloc_init(void); | |
7121 | extern void *vmalloc(unsigned long size); | |
7122 | extern void *vzalloc(unsigned long size); | |
7123 | extern void *vmalloc_user(unsigned long size); | |
7124 | extern void *vmalloc_node(unsigned long size, int node); | |
7125 | extern void *vzalloc_node(unsigned long size, int node); | |
7126 | extern void *vmalloc_exec(unsigned long size); | |
7127 | extern void *vmalloc_32(unsigned long size); | |
7128 | extern void *vmalloc_32_user(unsigned long size); | |
7129 | extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); | |
7130 | extern void *__vmalloc_node_range(unsigned long size, unsigned long align, | |
7131 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
7132 | pgprot_t prot, int node, void *caller); | |
7133 | extern void vfree(const void *addr); | |
7134 | extern void *vmap(struct page **pages, unsigned int count, | |
7135 | unsigned long flags, pgprot_t prot); | |
7136 | extern void vunmap(const void *addr); | |
7137 | extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | |
7138 | unsigned long pgoff); | |
7139 | void vmalloc_sync_all(void); | |
7140 | static inline __attribute__((always_inline)) size_t get_vm_area_size(const struct vm_struct *area) | |
7141 | { | |
7142 | return area->size - ((1UL) << 12); | |
7143 | } | |
7144 | extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); | |
7145 | extern struct vm_struct *get_vm_area_caller(unsigned long size, | |
7146 | unsigned long flags, void *caller); | |
7147 | extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, | |
7148 | unsigned long start, unsigned long end); | |
7149 | extern struct vm_struct *__get_vm_area_caller(unsigned long size, | |
7150 | unsigned long flags, | |
7151 | unsigned long start, unsigned long end, | |
7152 | void *caller); | |
7153 | extern struct vm_struct *remove_vm_area(const void *addr); | |
7154 | extern int map_vm_area(struct vm_struct *area, pgprot_t prot, | |
7155 | struct page ***pages); | |
7156 | extern int map_kernel_range_noflush(unsigned long start, unsigned long size, | |
7157 | pgprot_t prot, struct page **pages); | |
7158 | extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); | |
7159 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); | |
7160 | extern struct vm_struct *alloc_vm_area(size_t size); | |
7161 | extern void free_vm_area(struct vm_struct *area); | |
7162 | extern long vread(char *buf, char *addr, unsigned long count); | |
7163 | extern long vwrite(char *buf, char *addr, unsigned long count); | |
7164 | extern rwlock_t vmlist_lock; | |
7165 | extern struct vm_struct *vmlist; | |
7166 | extern __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) void vm_area_register_early(struct vm_struct *vm, size_t align); | |
7167 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, | |
7168 | const size_t *sizes, int nr_vms, | |
7169 | size_t align); | |
7170 | void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); | |
7171 | static inline __attribute__((always_inline)) void | |
7172 | memset_io(volatile void *addr, unsigned char val, size_t count) | |
7173 | { | |
7174 | __builtin_memset((void *)addr, val, count); | |
7175 | } | |
7176 | static inline __attribute__((always_inline)) void | |
7177 | memcpy_fromio(void *dst, const volatile void *src, size_t count) | |
7178 | { | |
7179 | __builtin_memcpy(dst, (const void *)src, count); | |
7180 | } | |
7181 | static inline __attribute__((always_inline)) void | |
7182 | memcpy_toio(volatile void *dst, const void *src, size_t count) | |
7183 | { | |
7184 | __builtin_memcpy((void *)dst, src, count); | |
7185 | } | |
7186 | static inline __attribute__((always_inline)) void flush_write_buffers(void) | |
7187 | { | |
7188 | } | |
7189 | extern void native_io_delay(void); | |
7190 | extern int io_delay_type; | |
7191 | extern void io_delay_init(void); | |
7192 | static inline __attribute__((always_inline)) void outb(unsigned char value, int port) { asm volatile("out" "b" " %" "b" "0, %w1" : : "a"(value), "Nd"(port)); } static inline __attribute__((always_inline)) unsigned char inb(int port) { unsigned char value; asm volatile("in" "b" " %w1, %" "b" "0" : "=a"(value) : "Nd"(port)); return value; } static inline __attribute__((always_inline)) void outb_p(unsigned char value, int port) { outb(value, port); slow_down_io(); } static inline __attribute__((always_inline)) unsigned char inb_p(int port) { unsigned char value = inb(port); slow_down_io(); return value; } static inline __attribute__((always_inline)) void outsb(int port, const void *addr, unsigned long count) { asm volatile("rep; outs" "b" : "+S"(addr), "+c"(count) : "d"(port)); } static inline __attribute__((always_inline)) void insb(int port, void *addr, unsigned long count) { asm volatile("rep; ins" "b" : "+D"(addr), "+c"(count) : "d"(port)); } | |
7193 | static inline __attribute__((always_inline)) void outw(unsigned short value, int port) { asm volatile("out" "w" " %" "w" "0, %w1" : : "a"(value), "Nd"(port)); } static inline __attribute__((always_inline)) unsigned short inw(int port) { unsigned short value; asm volatile("in" "w" " %w1, %" "w" "0" : "=a"(value) : "Nd"(port)); return value; } static inline __attribute__((always_inline)) void outw_p(unsigned short value, int port) { outw(value, port); slow_down_io(); } static inline __attribute__((always_inline)) unsigned short inw_p(int port) { unsigned short value = inw(port); slow_down_io(); return value; } static inline __attribute__((always_inline)) void outsw(int port, const void *addr, unsigned long count) { asm volatile("rep; outs" "w" : "+S"(addr), "+c"(count) : "d"(port)); } static inline __attribute__((always_inline)) void insw(int port, void *addr, unsigned long count) { asm volatile("rep; ins" "w" : "+D"(addr), "+c"(count) : "d"(port)); } | |
7194 | static inline __attribute__((always_inline)) void outl(unsigned int value, int port) { asm volatile("out" "l" " %" "" "0, %w1" : : "a"(value), "Nd"(port)); } static inline __attribute__((always_inline)) unsigned int inl(int port) { unsigned int value; asm volatile("in" "l" " %w1, %" "" "0" : "=a"(value) : "Nd"(port)); return value; } static inline __attribute__((always_inline)) void outl_p(unsigned int value, int port) { outl(value, port); slow_down_io(); } static inline __attribute__((always_inline)) unsigned int inl_p(int port) { unsigned int value = inl(port); slow_down_io(); return value; } static inline __attribute__((always_inline)) void outsl(int port, const void *addr, unsigned long count) { asm volatile("rep; outs" "l" : "+S"(addr), "+c"(count) : "d"(port)); } static inline __attribute__((always_inline)) void insl(int port, void *addr, unsigned long count) { asm volatile("rep; ins" "l" : "+D"(addr), "+c"(count) : "d"(port)); } | |
7195 | extern void *xlate_dev_mem_ptr(unsigned long phys); | |
7196 | extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr); | |
7197 | extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, | |
7198 | unsigned long prot_val); | |
7199 | extern void *ioremap_wc(resource_size_t offset, unsigned long size); | |
7200 | extern void early_ioremap_init(void); | |
7201 | extern void early_ioremap_reset(void); | |
7202 | extern void *early_ioremap(resource_size_t phys_addr, | |
7203 | unsigned long size); | |
7204 | extern void *early_memremap(resource_size_t phys_addr, | |
7205 | unsigned long size); | |
7206 | extern void early_iounmap(void *addr, unsigned long size); | |
7207 | extern void fixup_early_ioremap(void); | |
7208 | extern bool is_early_ioremap_ptep(pte_t *ptep); | |
7209 | extern const unsigned char x86_trampoline_start []; | |
7210 | extern const unsigned char x86_trampoline_end []; | |
7211 | extern unsigned char *x86_trampoline_base; | |
7212 | extern unsigned long init_rsp; | |
7213 | extern unsigned long initial_code; | |
7214 | extern unsigned long initial_gs; | |
7215 | extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) setup_trampolines(void); | |
7216 | extern const unsigned char trampoline_data[]; | |
7217 | extern const unsigned char trampoline_status[]; | |
7218 | static inline __attribute__((always_inline)) unsigned long trampoline_address(void) | |
7219 | { | |
7220 | return virt_to_phys(((void *)(x86_trampoline_base + ((const unsigned char *)(trampoline_data) - x86_trampoline_start)))); | |
7221 | } | |
7222 | int __acpi_acquire_global_lock(unsigned int *lock); | |
7223 | int __acpi_release_global_lock(unsigned int *lock); | |
7224 | extern int acpi_lapic; | |
7225 | extern int acpi_ioapic; | |
7226 | extern int acpi_noirq; | |
7227 | extern int acpi_strict; | |
7228 | extern int acpi_disabled; | |
7229 | extern int acpi_pci_disabled; | |
7230 | extern int acpi_skip_timer_override; | |
7231 | extern int acpi_use_timer_override; | |
7232 | extern int acpi_fix_pin2_polarity; | |
7233 | extern u8 acpi_sci_flags; | |
7234 | extern int acpi_sci_override_gsi; | |
7235 | void acpi_pic_sci_set_trigger(unsigned int, u16); | |
7236 | extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi, | |
7237 | int trigger, int polarity); | |
7238 | static inline __attribute__((always_inline)) void disable_acpi(void) | |
7239 | { | |
7240 | acpi_disabled = 1; | |
7241 | acpi_pci_disabled = 1; | |
7242 | acpi_noirq = 1; | |
7243 | } | |
7244 | extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); | |
7245 | static inline __attribute__((always_inline)) void acpi_noirq_set(void) { acpi_noirq = 1; } | |
7246 | static inline __attribute__((always_inline)) void acpi_disable_pci(void) | |
7247 | { | |
7248 | acpi_pci_disabled = 1; | |
7249 | acpi_noirq_set(); | |
7250 | } | |
7251 | extern int acpi_suspend_lowlevel(void); | |
7252 | extern const unsigned char acpi_wakeup_code[]; | |
7253 | extern void acpi_reserve_wakeup_memory(void); | |
7254 | static inline __attribute__((always_inline)) unsigned int acpi_processor_cstate_check(unsigned int max_cstate) | |
7255 | { | |
7256 | if (__builtin_constant_p(((boot_cpu_data.x86 == 0x0F && boot_cpu_data.x86_vendor == 2 && boot_cpu_data.x86_model <= 0x05 && boot_cpu_data.x86_mask < 0x0A))) ? !!((boot_cpu_data.x86 == 0x0F && boot_cpu_data.x86_vendor == 2 && boot_cpu_data.x86_model <= 0x05 && boot_cpu_data.x86_mask < 0x0A)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
7257 | "/data/exp/linux-3.0.4/arch/x86/include/asm/acpi.h" | |
7258 | , .line = | |
7259 | 140 | |
7260 | , }; ______r = !!((boot_cpu_data.x86 == 0x0F && boot_cpu_data.x86_vendor == 2 && boot_cpu_data.x86_model <= 0x05 && boot_cpu_data.x86_mask < 0x0A)); ______f.miss_hit[______r]++; ______r; })) | |
7261 | return 1; | |
7262 | else if (__builtin_constant_p(((amd_e400_c1e_detected))) ? !!((amd_e400_c1e_detected)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/acpi.h", .line = 142, }; ______r = !!((amd_e400_c1e_detected)); ______f.miss_hit[______r]++; ______r; })) | |
7263 | return 1; | |
7264 | else | |
7265 | return max_cstate; | |
7266 | } | |
7267 | static inline __attribute__((always_inline)) bool arch_has_acpi_pdc(void) | |
7268 | { | |
7269 | struct cpuinfo_x86 *c = &(*({ do { const void *__vpp_verify = (typeof((&(cpu_info))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(cpu_info))) *)(&(cpu_info)))); (typeof((typeof(*(&(cpu_info))) *)(&(cpu_info)))) (__ptr + (((__per_cpu_offset[0])))); }); })); | |
7270 | return (c->x86_vendor == 0 || | |
7271 | c->x86_vendor == 5); | |
7272 | } | |
7273 | static inline __attribute__((always_inline)) void arch_acpi_set_pdc_bits(u32 *buf) | |
7274 | { | |
7275 | struct cpuinfo_x86 *c = &(*({ do { const void *__vpp_verify = (typeof((&(cpu_info))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(cpu_info))) *)(&(cpu_info)))); (typeof((typeof(*(&(cpu_info))) *)(&(cpu_info)))) (__ptr + (((__per_cpu_offset[0])))); }); })); | |
7276 | buf[2] |= ((0x0010) | (0x0008) | (0x0002) | (0x0100) | (0x0200)); | |
7277 | if (__builtin_constant_p((((__builtin_constant_p((4*32+ 7)) && ( ((((4*32+ 7))>>5)==0 && (1UL<<(((4*32+ 7))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+ 7))>>5)==1 && (1UL<<(((4*32+ 7))&31) & (0|0))) || ((((4*32+ 7))>>5)==2 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==3 && (1UL<<(((4*32+ 7))&31) & (0))) || ((((4*32+ 7))>>5)==4 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==5 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==6 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==7 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==8 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==9 && (1UL<<(((4*32+ 7))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+ 7))) ? constant_test_bit(((4*32+ 7)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((4*32+ 7)), ((unsigned long *)((c)->x86_capability)))))))) ? !!(((__builtin_constant_p((4*32+ 7)) && ( ((((4*32+ 7))>>5)==0 && (1UL<<(((4*32+ 7))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+ 7))>>5)==1 && (1UL<<(((4*32+ 7))&31) & (0|0))) || ((((4*32+ 7))>>5)==2 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==3 && (1UL<<(((4*32+ 7))&31) & (0))) || ((((4*32+ 7))>>5)==4 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==5 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==6 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==7 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==8 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==9 && (1UL<<(((4*32+ 7))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+ 7))) ? constant_test_bit(((4*32+ 7)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((4*32+ 7)), ((unsigned long *)((c)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/acpi.h", .line = 161, }; ______r = !!(((__builtin_constant_p((4*32+ 7)) && ( ((((4*32+ 7))>>5)==0 && (1UL<<(((4*32+ 7))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+ 7))>>5)==1 && (1UL<<(((4*32+ 7))&31) & (0|0))) || ((((4*32+ 7))>>5)==2 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==3 && (1UL<<(((4*32+ 7))&31) & (0))) || ((((4*32+ 7))>>5)==4 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==5 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==6 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==7 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==8 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==9 && (1UL<<(((4*32+ 7))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+ 7))) ? constant_test_bit(((4*32+ 7)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((4*32+ 7)), ((unsigned long *)((c)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; })) | |
7278 | buf[2] |= ((0x0008) | (0x0002) | (0x0020) | (0x0800) | (0x0001)); | |
7279 | if (__builtin_constant_p((((__builtin_constant_p((0*32+22)) && ( ((((0*32+22))>>5)==0 && (1UL<<(((0*32+22))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+22))>>5)==1 && (1UL<<(((0*32+22))&31) & (0|0))) || ((((0*32+22))>>5)==2 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==3 && (1UL<<(((0*32+22))&31) & (0))) || ((((0*32+22))>>5)==4 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==5 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==6 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==7 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==8 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==9 && (1UL<<(((0*32+22))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+22))) ? constant_test_bit(((0*32+22)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((0*32+22)), ((unsigned long *)((c)->x86_capability)))))))) ? !!(((__builtin_constant_p((0*32+22)) && ( ((((0*32+22))>>5)==0 && (1UL<<(((0*32+22))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+22))>>5)==1 && (1UL<<(((0*32+22))&31) & (0|0))) || ((((0*32+22))>>5)==2 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==3 && (1UL<<(((0*32+22))&31) & (0))) || ((((0*32+22))>>5)==4 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==5 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==6 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==7 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==8 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==9 && (1UL<<(((0*32+22))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+22))) ? constant_test_bit(((0*32+22)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((0*32+22)), ((unsigned long *)((c)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/acpi.h", .line = 164, }; ______r = !!(((__builtin_constant_p((0*32+22)) && ( ((((0*32+22))>>5)==0 && (1UL<<(((0*32+22))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+22))>>5)==1 && (1UL<<(((0*32+22))&31) & (0|0))) || ((((0*32+22))>>5)==2 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==3 && (1UL<<(((0*32+22))&31) & (0))) || ((((0*32+22))>>5)==4 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==5 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==6 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==7 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==8 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==9 && (1UL<<(((0*32+22))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+22))) ? constant_test_bit(((0*32+22)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((0*32+22)), ((unsigned long *)((c)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; })) | |
7280 | buf[2] |= (0x0004); | |
7281 | if (__builtin_constant_p(((!(__builtin_constant_p((4*32+ 3)) && ( ((((4*32+ 3))>>5)==0 && (1UL<<(((4*32+ 3))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+ 3))>>5)==1 && (1UL<<(((4*32+ 3))&31) & (0|0))) || ((((4*32+ 3))>>5)==2 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==3 && (1UL<<(((4*32+ 3))&31) & (0))) || ((((4*32+ 3))>>5)==4 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==5 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==6 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==7 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==8 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==9 && (1UL<<(((4*32+ 3))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+ 3))) ? constant_test_bit(((4*32+ 3)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((4*32+ 3)), ((unsigned long *)((c)->x86_capability)))))))) ? !!((!(__builtin_constant_p((4*32+ 3)) && ( ((((4*32+ 3))>>5)==0 && (1UL<<(((4*32+ 3))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+ 3))>>5)==1 && (1UL<<(((4*32+ 3))&31) & (0|0))) || ((((4*32+ 3))>>5)==2 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==3 && (1UL<<(((4*32+ 3))&31) & (0))) || ((((4*32+ 3))>>5)==4 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==5 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==6 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==7 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==8 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==9 && (1UL<<(((4*32+ 3))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+ 3))) ? constant_test_bit(((4*32+ 3)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((4*32+ 3)), ((unsigned long *)((c)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/acpi.h", .line = 170, }; ______r = !!((!(__builtin_constant_p((4*32+ 3)) && ( ((((4*32+ 3))>>5)==0 && (1UL<<(((4*32+ 3))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+ 3))>>5)==1 && (1UL<<(((4*32+ 3))&31) & (0|0))) || ((((4*32+ 3))>>5)==2 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==3 && (1UL<<(((4*32+ 3))&31) & (0))) || ((((4*32+ 3))>>5)==4 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==5 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==6 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==7 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==8 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==9 && (1UL<<(((4*32+ 3))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+ 3))) ? constant_test_bit(((4*32+ 3)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((4*32+ 3)), ((unsigned long *)((c)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; })) | |
7282 | buf[2] &= ~((0x0200)); | |
7283 | } | |
7284 | extern unsigned long __FIXADDR_TOP; | |
7285 | enum fixed_addresses { | |
7286 | FIX_HOLE, | |
7287 | FIX_VDSO, | |
7288 | FIX_DBGP_BASE, | |
7289 | FIX_EARLYCON_MEM_BASE, | |
7290 | FIX_APIC_BASE, | |
7291 | FIX_IO_APIC_BASE_0, | |
7292 | FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + 64 - 1, | |
7293 | FIX_KMAP_BEGIN, | |
7294 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*8)-1, | |
7295 | FIX_PCIE_MCFG, | |
7296 | FIX_PARAVIRT_BOOTMAP, | |
7297 | FIX_TEXT_POKE1, | |
7298 | FIX_TEXT_POKE0, | |
7299 | __end_of_permanent_fixed_addresses, | |
7300 | FIX_BTMAP_END = | |
7301 | (__end_of_permanent_fixed_addresses ^ | |
7302 | (__end_of_permanent_fixed_addresses + (64 * 4) - 1)) & | |
7303 | -512 | |
7304 | ? __end_of_permanent_fixed_addresses + (64 * 4) - | |
7305 | (__end_of_permanent_fixed_addresses & ((64 * 4) - 1)) | |
7306 | : __end_of_permanent_fixed_addresses, | |
7307 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + (64 * 4) - 1, | |
7308 | FIX_WP_TEST, | |
7309 | __end_of_fixed_addresses | |
7310 | }; | |
7311 | extern void reserve_top_address(unsigned long reserve); | |
7312 | extern int fixmaps_set; | |
7313 | extern pte_t *kmap_pte; | |
7314 | extern pgprot_t kmap_prot; | |
7315 | extern pte_t *pkmap_page_table; | |
7316 | void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); | |
7317 | void native_set_fixmap(enum fixed_addresses idx, | |
7318 | phys_addr_t phys, pgprot_t flags); | |
7319 | extern void __this_fixmap_does_not_exist(void); | |
7320 | static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long fix_to_virt(const unsigned int idx) | |
7321 | { | |
7322 | if (__builtin_constant_p(((idx >= __end_of_fixed_addresses))) ? !!((idx >= __end_of_fixed_addresses)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/fixmap.h", .line = 210, }; ______r = !!((idx >= __end_of_fixed_addresses)); ______f.miss_hit[______r]++; ______r; })) | |
7323 | __this_fixmap_does_not_exist(); | |
7324 | return (((unsigned long)__FIXADDR_TOP) - ((idx) << 12)); | |
7325 | } | |
7326 | static inline __attribute__((always_inline)) unsigned long virt_to_fix(const unsigned long vaddr) | |
7327 | { | |
7328 | do { if (__builtin_constant_p((((__builtin_constant_p(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))) ? !!(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/fixmap.h", .line = 218, }; ______r = __builtin_expect(!!(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))) ? !!(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/fixmap.h", .line = 218, }; ______r = __builtin_expect(!!(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/fixmap.h", .line = 218, }; ______r = !!(((__builtin_constant_p(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))) ? !!(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/fixmap.h", .line = 218, }; ______r = __builtin_expect(!!(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/fixmap.h"), "i" (218), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
7329 | return ((((unsigned long)__FIXADDR_TOP) - ((vaddr)&(~(((1UL) << 12)-1)))) >> 12); | |
7330 | } | |
7331 | static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long | |
7332 | __set_fixmap_offset(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) | |
7333 | { | |
7334 | __set_fixmap(idx, phys, flags); | |
7335 | return fix_to_virt(idx) + (phys & (((1UL) << 12) - 1)); | |
7336 | } | |
7337 | extern void generic_apic_probe(void); | |
7338 | extern unsigned int apic_verbosity; | |
7339 | extern int local_apic_timer_c2_ok; | |
7340 | extern int disable_apic; | |
7341 | extern void __inquire_remote_apic(int apicid); | |
7342 | static inline __attribute__((always_inline)) void default_inquire_remote_apic(int apicid) | |
7343 | { | |
7344 | if (__builtin_constant_p(((apic_verbosity >= 2))) ? !!((apic_verbosity >= 2)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/apic.h", .line = 63, }; ______r = !!((apic_verbosity >= 2)); ______f.miss_hit[______r]++; ______r; })) | |
7345 | __inquire_remote_apic(apicid); | |
7346 | } | |
7347 | static inline __attribute__((always_inline)) bool apic_from_smp_config(void) | |
7348 | { | |
7349 | return smp_found_config && !disable_apic; | |
7350 | } | |
7351 | static inline __attribute__((always_inline)) int is_vsmp_box(void) | |
7352 | { | |
7353 | return 0; | |
7354 | } | |
7355 | extern void xapic_wait_icr_idle(void); | |
7356 | extern u32 safe_xapic_wait_icr_idle(void); | |
7357 | extern void xapic_icr_write(u32, u32); | |
7358 | extern int setup_profiling_timer(unsigned int); | |
7359 | static inline __attribute__((always_inline)) void native_apic_mem_write(u32 reg, u32 v) | |
7360 | { | |
7361 | volatile u32 *addr = (volatile u32 *)((fix_to_virt(FIX_APIC_BASE)) + reg); | |
7362 | asm volatile ("661:\n\t" "movl %0, %1" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(3*32+19)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "xchgl %0, %1" "\n664:\n" ".previous" : "=r" (v), "=m" (*addr) : "i" (0), "0" (v), "m" (*addr)) | |
7363 | ; | |
7364 | } | |
7365 | static inline __attribute__((always_inline)) u32 native_apic_mem_read(u32 reg) | |
7366 | { | |
7367 | return *((volatile u32 *)((fix_to_virt(FIX_APIC_BASE)) + reg)); | |
7368 | } | |
7369 | extern void native_apic_wait_icr_idle(void); | |
7370 | extern u32 native_safe_apic_wait_icr_idle(void); | |
7371 | extern void native_apic_icr_write(u32 low, u32 id); | |
7372 | extern u64 native_apic_icr_read(void); | |
7373 | extern int x2apic_mode; | |
7374 | static inline __attribute__((always_inline)) void check_x2apic(void) | |
7375 | { | |
7376 | } | |
7377 | static inline __attribute__((always_inline)) void enable_x2apic(void) | |
7378 | { | |
7379 | } | |
7380 | static inline __attribute__((always_inline)) int x2apic_enabled(void) | |
7381 | { | |
7382 | return 0; | |
7383 | } | |
7384 | static inline __attribute__((always_inline)) void x2apic_force_phys(void) | |
7385 | { | |
7386 | } | |
7387 | extern void enable_IR_x2apic(void); | |
7388 | extern int get_physical_broadcast(void); | |
7389 | extern int lapic_get_maxlvt(void); | |
7390 | extern void clear_local_APIC(void); | |
7391 | extern void connect_bsp_APIC(void); | |
7392 | extern void disconnect_bsp_APIC(int virt_wire_setup); | |
7393 | extern void disable_local_APIC(void); | |
7394 | extern void lapic_shutdown(void); | |
7395 | extern int verify_local_APIC(void); | |
7396 | extern void sync_Arb_IDs(void); | |
7397 | extern void init_bsp_APIC(void); | |
7398 | extern void setup_local_APIC(void); | |
7399 | extern void end_local_APIC_setup(void); | |
7400 | extern void bsp_end_local_APIC_setup(void); | |
7401 | extern void init_apic_mappings(void); | |
7402 | void register_lapic_address(unsigned long address); | |
7403 | extern void setup_boot_APIC_clock(void); | |
7404 | extern void setup_secondary_APIC_clock(void); | |
7405 | extern int APIC_init_uniprocessor(void); | |
7406 | extern int apic_force_enable(unsigned long addr); | |
7407 | static inline __attribute__((always_inline)) int apic_is_clustered_box(void) | |
7408 | { | |
7409 | return 0; | |
7410 | } | |
7411 | extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask); | |
7412 | struct apic { | |
7413 | char *name; | |
7414 | int (*probe)(void); | |
7415 | int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); | |
7416 | int (*apic_id_registered)(void); | |
7417 | u32 irq_delivery_mode; | |
7418 | u32 irq_dest_mode; | |
7419 | const struct cpumask *(*target_cpus)(void); | |
7420 | int disable_esr; | |
7421 | int dest_logical; | |
7422 | unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid); | |
7423 | unsigned long (*check_apicid_present)(int apicid); | |
7424 | void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); | |
7425 | void (*init_apic_ldr)(void); | |
7426 | void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); | |
7427 | void (*setup_apic_routing)(void); | |
7428 | int (*multi_timer_check)(int apic, int irq); | |
7429 | int (*cpu_present_to_apicid)(int mps_cpu); | |
7430 | void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap); | |
7431 | void (*setup_portio_remap)(void); | |
7432 | int (*check_phys_apicid_present)(int phys_apicid); | |
7433 | void (*enable_apic_mode)(void); | |
7434 | int (*phys_pkg_id)(int cpuid_apic, int index_msb); | |
7435 | int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid); | |
7436 | unsigned int (*get_apic_id)(unsigned long x); | |
7437 | unsigned long (*set_apic_id)(unsigned int id); | |
7438 | unsigned long apic_id_mask; | |
7439 | unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); | |
7440 | unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, | |
7441 | const struct cpumask *andmask); | |
7442 | void (*send_IPI_mask)(const struct cpumask *mask, int vector); | |
7443 | void (*send_IPI_mask_allbutself)(const struct cpumask *mask, | |
7444 | int vector); | |
7445 | void (*send_IPI_allbutself)(int vector); | |
7446 | void (*send_IPI_all)(int vector); | |
7447 | void (*send_IPI_self)(int vector); | |
7448 | int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip); | |
7449 | int trampoline_phys_low; | |
7450 | int trampoline_phys_high; | |
7451 | void (*wait_for_init_deassert)(atomic_t *deassert); | |
7452 | void (*smp_callin_clear_local_apic)(void); | |
7453 | void (*inquire_remote_apic)(int apicid); | |
7454 | u32 (*read)(u32 reg); | |
7455 | void (*write)(u32 reg, u32 v); | |
7456 | u64 (*icr_read)(void); | |
7457 | void (*icr_write)(u32 low, u32 high); | |
7458 | void (*wait_icr_idle)(void); | |
7459 | u32 (*safe_wait_icr_idle)(void); | |
7460 | int (*x86_32_early_logical_apicid)(int cpu); | |
7461 | int (*x86_32_numa_cpu_node)(int cpu); | |
7462 | }; | |
7463 | extern struct apic *apic; | |
7464 | extern struct apic *__apicdrivers[], *__apicdrivers_end[]; | |
7465 | extern atomic_t init_deasserted; | |
7466 | extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip); | |
7467 | static inline __attribute__((always_inline)) u32 apic_read(u32 reg) | |
7468 | { | |
7469 | return apic->read(reg); | |
7470 | } | |
7471 | static inline __attribute__((always_inline)) void apic_write(u32 reg, u32 val) | |
7472 | { | |
7473 | apic->write(reg, val); | |
7474 | } | |
7475 | static inline __attribute__((always_inline)) u64 apic_icr_read(void) | |
7476 | { | |
7477 | return apic->icr_read(); | |
7478 | } | |
7479 | static inline __attribute__((always_inline)) void apic_icr_write(u32 low, u32 high) | |
7480 | { | |
7481 | apic->icr_write(low, high); | |
7482 | } | |
7483 | static inline __attribute__((always_inline)) void apic_wait_icr_idle(void) | |
7484 | { | |
7485 | apic->wait_icr_idle(); | |
7486 | } | |
7487 | static inline __attribute__((always_inline)) u32 safe_apic_wait_icr_idle(void) | |
7488 | { | |
7489 | return apic->safe_wait_icr_idle(); | |
7490 | } | |
7491 | static inline __attribute__((always_inline)) void ack_APIC_irq(void) | |
7492 | { | |
7493 | apic_write(0xB0, 0); | |
7494 | } | |
7495 | static inline __attribute__((always_inline)) unsigned default_get_apic_id(unsigned long x) | |
7496 | { | |
7497 | unsigned int ver = ((apic_read(0x30)) & 0xFFu); | |
7498 | if (__builtin_constant_p(((((ver) >= 0x14) || (__builtin_constant_p((3*32+26)) && ( ((((3*32+26))>>5)==0 && (1UL<<(((3*32+26))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((3*32+26))>>5)==1 && (1UL<<(((3*32+26))&31) & (0|0))) || ((((3*32+26))>>5)==2 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==3 && (1UL<<(((3*32+26))&31) & (0))) || ((((3*32+26))>>5)==4 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==5 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==6 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==7 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==8 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==9 && (1UL<<(((3*32+26))&31) & 0)) ) ? 1 : (__builtin_constant_p(((3*32+26))) ? constant_test_bit(((3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))))) ? !!((((ver) >= 0x14) || (__builtin_constant_p((3*32+26)) && ( ((((3*32+26))>>5)==0 && (1UL<<(((3*32+26))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((3*32+26))>>5)==1 && (1UL<<(((3*32+26))&31) & (0|0))) || ((((3*32+26))>>5)==2 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==3 && (1UL<<(((3*32+26))&31) & (0))) || ((((3*32+26))>>5)==4 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==5 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==6 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==7 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==8 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==9 && (1UL<<(((3*32+26))&31) & 0)) ) ? 1 : (__builtin_constant_p(((3*32+26))) ? constant_test_bit(((3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/apic.h", .line = 468, }; ______r = !!((((ver) >= 0x14) || (__builtin_constant_p((3*32+26)) && ( ((((3*32+26))>>5)==0 && (1UL<<(((3*32+26))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((3*32+26))>>5)==1 && (1UL<<(((3*32+26))&31) & (0|0))) || ((((3*32+26))>>5)==2 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==3 && (1UL<<(((3*32+26))&31) & (0))) || ((((3*32+26))>>5)==4 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==5 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==6 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==7 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==8 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==9 && (1UL<<(((3*32+26))&31) & 0)) ) ? 1 : (__builtin_constant_p(((3*32+26))) ? constant_test_bit(((3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; })) | |
7499 | return (x >> 24) & 0xFF; | |
7500 | else | |
7501 | return (x >> 24) & 0x0F; | |
7502 | } | |
7503 | static inline __attribute__((always_inline)) void default_wait_for_init_deassert(atomic_t *deassert) | |
7504 | { | |
7505 | while (!atomic_read(deassert)) | |
7506 | cpu_relax(); | |
7507 | return; | |
7508 | } | |
7509 | extern struct apic *generic_bigsmp_probe(void); | |
7510 | static inline __attribute__((always_inline)) const struct cpumask *default_target_cpus(void) | |
7511 | { | |
7512 | return cpu_online_mask; | |
7513 | } | |
7514 | extern __attribute__((section(".data..percpu" ""))) __typeof__(u16) x86_bios_cpu_apicid; extern __typeof__(u16) *x86_bios_cpu_apicid_early_ptr; extern __typeof__(u16) x86_bios_cpu_apicid_early_map[]; | |
7515 | static inline __attribute__((always_inline)) unsigned int read_apic_id(void) | |
7516 | { | |
7517 | unsigned int reg; | |
7518 | reg = apic_read(0x20); | |
7519 | return apic->get_apic_id(reg); | |
7520 | } | |
7521 | extern void default_setup_apic_routing(void); | |
7522 | extern struct apic apic_noop; | |
7523 | static inline __attribute__((always_inline)) int noop_x86_32_early_logical_apicid(int cpu) | |
7524 | { | |
7525 | return 0xFFu; | |
7526 | } | |
7527 | extern void default_init_apic_ldr(void); | |
7528 | static inline __attribute__((always_inline)) int default_apic_id_registered(void) | |
7529 | { | |
7530 | return (__builtin_constant_p((read_apic_id())) ? constant_test_bit((read_apic_id()), ((phys_cpu_present_map).mask)) : variable_test_bit((read_apic_id()), ((phys_cpu_present_map).mask))); | |
7531 | } | |
7532 | static inline __attribute__((always_inline)) int default_phys_pkg_id(int cpuid_apic, int index_msb) | |
7533 | { | |
7534 | return cpuid_apic >> index_msb; | |
7535 | } | |
7536 | static inline __attribute__((always_inline)) unsigned int | |
7537 | default_cpu_mask_to_apicid(const struct cpumask *cpumask) | |
7538 | { | |
7539 | return ((cpumask)->bits)[0] & 0xFFu; | |
7540 | } | |
7541 | static inline __attribute__((always_inline)) unsigned int | |
7542 | default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | |
7543 | const struct cpumask *andmask) | |
7544 | { | |
7545 | unsigned long mask1 = ((cpumask)->bits)[0]; | |
7546 | unsigned long mask2 = ((andmask)->bits)[0]; | |
7547 | unsigned long mask3 = ((cpu_online_mask)->bits)[0]; | |
7548 | return (unsigned int)(mask1 & mask2 & mask3); | |
7549 | } | |
7550 | static inline __attribute__((always_inline)) unsigned long default_check_apicid_used(physid_mask_t *map, int apicid) | |
7551 | { | |
7552 | return (__builtin_constant_p((apicid)) ? constant_test_bit((apicid), ((*map).mask)) : variable_test_bit((apicid), ((*map).mask))); | |
7553 | } | |
7554 | static inline __attribute__((always_inline)) unsigned long default_check_apicid_present(int bit) | |
7555 | { | |
7556 | return (__builtin_constant_p((bit)) ? constant_test_bit((bit), ((phys_cpu_present_map).mask)) : variable_test_bit((bit), ((phys_cpu_present_map).mask))); | |
7557 | } | |
7558 | static inline __attribute__((always_inline)) void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) | |
7559 | { | |
7560 | *retmap = *phys_map; | |
7561 | } | |
7562 | static inline __attribute__((always_inline)) int __default_cpu_present_to_apicid(int mps_cpu) | |
7563 | { | |
7564 | if (__builtin_constant_p(((mps_cpu < nr_cpu_ids && (__builtin_constant_p((cpumask_check((mps_cpu)))) ? constant_test_bit((cpumask_check((mps_cpu))), ((((cpu_present_mask))->bits))) : variable_test_bit((cpumask_check((mps_cpu))), ((((cpu_present_mask))->bits))))))) ? !!((mps_cpu < nr_cpu_ids && (__builtin_constant_p((cpumask_check((mps_cpu)))) ? constant_test_bit((cpumask_check((mps_cpu))), ((((cpu_present_mask))->bits))) : variable_test_bit((cpumask_check((mps_cpu))), ((((cpu_present_mask))->bits)))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/apic.h", .line = 594, }; ______r = !!((mps_cpu < nr_cpu_ids && (__builtin_constant_p((cpumask_check((mps_cpu)))) ? constant_test_bit((cpumask_check((mps_cpu))), ((((cpu_present_mask))->bits))) : variable_test_bit((cpumask_check((mps_cpu))), ((((cpu_present_mask))->bits)))))); ______f.miss_hit[______r]++; ______r; })) | |
7565 | return (int)(*({ do { const void *__vpp_verify = (typeof((&(x86_bios_cpu_apicid))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(x86_bios_cpu_apicid))) *)(&(x86_bios_cpu_apicid)))); (typeof((typeof(*(&(x86_bios_cpu_apicid))) *)(&(x86_bios_cpu_apicid)))) (__ptr + (((__per_cpu_offset[mps_cpu])))); }); })); | |
7566 | else | |
7567 | return 0xFFu; | |
7568 | } | |
7569 | static inline __attribute__((always_inline)) int | |
7570 | __default_check_phys_apicid_present(int phys_apicid) | |
7571 | { | |
7572 | return (__builtin_constant_p((phys_apicid)) ? constant_test_bit((phys_apicid), ((phys_cpu_present_map).mask)) : variable_test_bit((phys_apicid), ((phys_cpu_present_map).mask))); | |
7573 | } | |
7574 | static inline __attribute__((always_inline)) int default_cpu_present_to_apicid(int mps_cpu) | |
7575 | { | |
7576 | return __default_cpu_present_to_apicid(mps_cpu); | |
7577 | } | |
7578 | static inline __attribute__((always_inline)) int | |
7579 | default_check_phys_apicid_present(int phys_apicid) | |
7580 | { | |
7581 | return __default_check_phys_apicid_present(phys_apicid); | |
7582 | } | |
7583 | static inline __attribute__((always_inline)) int invalid_vm86_irq(int irq) | |
7584 | { | |
7585 | return irq < 3 || irq > 15; | |
7586 | } | |
7587 | union IO_APIC_reg_00 { | |
7588 | u32 raw; | |
7589 | struct { | |
7590 | u32 __reserved_2 : 14, | |
7591 | LTS : 1, | |
7592 | delivery_type : 1, | |
7593 | __reserved_1 : 8, | |
7594 | ID : 8; | |
7595 | } __attribute__ ((packed)) bits; | |
7596 | }; | |
7597 | union IO_APIC_reg_01 { | |
7598 | u32 raw; | |
7599 | struct { | |
7600 | u32 version : 8, | |
7601 | __reserved_2 : 7, | |
7602 | PRQ : 1, | |
7603 | entries : 8, | |
7604 | __reserved_1 : 8; | |
7605 | } __attribute__ ((packed)) bits; | |
7606 | }; | |
7607 | union IO_APIC_reg_02 { | |
7608 | u32 raw; | |
7609 | struct { | |
7610 | u32 __reserved_2 : 24, | |
7611 | arbitration : 4, | |
7612 | __reserved_1 : 4; | |
7613 | } __attribute__ ((packed)) bits; | |
7614 | }; | |
7615 | union IO_APIC_reg_03 { | |
7616 | u32 raw; | |
7617 | struct { | |
7618 | u32 boot_DT : 1, | |
7619 | __reserved_1 : 31; | |
7620 | } __attribute__ ((packed)) bits; | |
7621 | }; | |
7622 | struct IO_APIC_route_entry { | |
7623 | __u32 vector : 8, | |
7624 | delivery_mode : 3, | |
7625 | dest_mode : 1, | |
7626 | delivery_status : 1, | |
7627 | polarity : 1, | |
7628 | irr : 1, | |
7629 | trigger : 1, | |
7630 | mask : 1, | |
7631 | __reserved_2 : 15; | |
7632 | __u32 __reserved_3 : 24, | |
7633 | dest : 8; | |
7634 | } __attribute__ ((packed)); | |
7635 | struct IR_IO_APIC_route_entry { | |
7636 | __u64 vector : 8, | |
7637 | zero : 3, | |
7638 | index2 : 1, | |
7639 | delivery_status : 1, | |
7640 | polarity : 1, | |
7641 | irr : 1, | |
7642 | trigger : 1, | |
7643 | mask : 1, | |
7644 | reserved : 31, | |
7645 | format : 1, | |
7646 | index : 15; | |
7647 | } __attribute__ ((packed)); | |
7648 | extern int nr_ioapics; | |
7649 | extern int mpc_ioapic_id(int ioapic); | |
7650 | extern unsigned int mpc_ioapic_addr(int ioapic); | |
7651 | extern struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic); | |
7652 | extern int mp_irq_entries; | |
7653 | extern struct mpc_intsrc mp_irqs[256]; | |
7654 | extern int mpc_default_type; | |
7655 | extern int sis_apic_bug; | |
7656 | extern int skip_ioapic_setup; | |
7657 | extern int noioapicquirk; | |
7658 | extern int noioapicreroute; | |
7659 | extern int timer_through_8259; | |
7660 | struct io_apic_irq_attr; | |
7661 | extern int io_apic_set_pci_routing(struct device *dev, int irq, | |
7662 | struct io_apic_irq_attr *irq_attr); | |
7663 | void setup_IO_APIC_irq_extra(u32 gsi); | |
7664 | extern void ioapic_and_gsi_init(void); | |
7665 | extern void ioapic_insert_resources(void); | |
7666 | int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr); | |
7667 | extern int save_ioapic_entries(void); | |
7668 | extern void mask_ioapic_entries(void); | |
7669 | extern int restore_ioapic_entries(void); | |
7670 | extern int get_nr_irqs_gsi(void); | |
7671 | extern void setup_ioapic_ids_from_mpc(void); | |
7672 | extern void setup_ioapic_ids_from_mpc_nocheck(void); | |
7673 | struct mp_ioapic_gsi{ | |
7674 | u32 gsi_base; | |
7675 | u32 gsi_end; | |
7676 | }; | |
7677 | extern struct mp_ioapic_gsi mp_gsi_routing[]; | |
7678 | extern u32 gsi_top; | |
7679 | int mp_find_ioapic(u32 gsi); | |
7680 | int mp_find_ioapic_pin(int ioapic, u32 gsi); | |
7681 | void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) mp_register_ioapic(int id, u32 address, u32 gsi_base); | |
7682 | extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pre_init_apic_IRQ0(void); | |
7683 | extern void mp_save_irq(struct mpc_intsrc *m); | |
7684 | extern void disable_ioapic_support(void); | |
7685 | extern int smp_num_siblings; | |
7686 | extern unsigned int num_processors; | |
7687 | static inline __attribute__((always_inline)) bool cpu_has_ht_siblings(void) | |
7688 | { | |
7689 | bool has_siblings = false; | |
7690 | has_siblings = (__builtin_constant_p((0*32+28)) && ( ((((0*32+28))>>5)==0 && (1UL<<(((0*32+28))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+28))>>5)==1 && (1UL<<(((0*32+28))&31) & (0|0))) || ((((0*32+28))>>5)==2 && (1UL<<(((0*32+28))&31) & 0)) || ((((0*32+28))>>5)==3 && (1UL<<(((0*32+28))&31) & (0))) || ((((0*32+28))>>5)==4 && (1UL<<(((0*32+28))&31) & 0)) || ((((0*32+28))>>5)==5 && (1UL<<(((0*32+28))&31) & 0)) || ((((0*32+28))>>5)==6 && (1UL<<(((0*32+28))&31) & 0)) || ((((0*32+28))>>5)==7 && (1UL<<(((0*32+28))&31) & 0)) || ((((0*32+28))>>5)==8 && (1UL<<(((0*32+28))&31) & 0)) || ((((0*32+28))>>5)==9 && (1UL<<(((0*32+28))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+28))) ? constant_test_bit(((0*32+28)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+28)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) && smp_num_siblings > 1; | |
7691 | return has_siblings; | |
7692 | } | |
7693 | extern __attribute__((section(".data..percpu" ""))) __typeof__(cpumask_var_t) cpu_sibling_map; | |
7694 | extern __attribute__((section(".data..percpu" ""))) __typeof__(cpumask_var_t) cpu_core_map; | |
7695 | extern __attribute__((section(".data..percpu" ""))) __typeof__(cpumask_var_t) cpu_llc_shared_map; | |
7696 | extern __attribute__((section(".data..percpu" ""))) __typeof__(u16) cpu_llc_id; | |
7697 | extern __attribute__((section(".data..percpu" ""))) __typeof__(int) cpu_number; | |
7698 | static inline __attribute__((always_inline)) struct cpumask *cpu_sibling_mask(int cpu) | |
7699 | { | |
7700 | return (*({ do { const void *__vpp_verify = (typeof((&(cpu_sibling_map))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(cpu_sibling_map))) *)(&(cpu_sibling_map)))); (typeof((typeof(*(&(cpu_sibling_map))) *)(&(cpu_sibling_map)))) (__ptr + (((__per_cpu_offset[cpu])))); }); })); | |
7701 | } | |
7702 | static inline __attribute__((always_inline)) struct cpumask *cpu_core_mask(int cpu) | |
7703 | { | |
7704 | return (*({ do { const void *__vpp_verify = (typeof((&(cpu_core_map))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(cpu_core_map))) *)(&(cpu_core_map)))); (typeof((typeof(*(&(cpu_core_map))) *)(&(cpu_core_map)))) (__ptr + (((__per_cpu_offset[cpu])))); }); })); | |
7705 | } | |
7706 | static inline __attribute__((always_inline)) struct cpumask *cpu_llc_shared_mask(int cpu) | |
7707 | { | |
7708 | return (*({ do { const void *__vpp_verify = (typeof((&(cpu_llc_shared_map))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(cpu_llc_shared_map))) *)(&(cpu_llc_shared_map)))); (typeof((typeof(*(&(cpu_llc_shared_map))) *)(&(cpu_llc_shared_map)))) (__ptr + (((__per_cpu_offset[cpu])))); }); })); | |
7709 | } | |
7710 | extern __attribute__((section(".data..percpu" ""))) __typeof__(u16) x86_cpu_to_apicid; extern __typeof__(u16) *x86_cpu_to_apicid_early_ptr; extern __typeof__(u16) x86_cpu_to_apicid_early_map[]; | |
7711 | extern __attribute__((section(".data..percpu" ""))) __typeof__(u16) x86_bios_cpu_apicid; extern __typeof__(u16) *x86_bios_cpu_apicid_early_ptr; extern __typeof__(u16) x86_bios_cpu_apicid_early_map[]; | |
7712 | extern __attribute__((section(".data..percpu" ""))) __typeof__(int) x86_cpu_to_logical_apicid; extern __typeof__(int) *x86_cpu_to_logical_apicid_early_ptr; extern __typeof__(int) x86_cpu_to_logical_apicid_early_map[]; | |
7713 | extern unsigned long stack_start; | |
7714 | struct smp_ops { | |
7715 | void (*smp_prepare_boot_cpu)(void); | |
7716 | void (*smp_prepare_cpus)(unsigned max_cpus); | |
7717 | void (*smp_cpus_done)(unsigned max_cpus); | |
7718 | void (*stop_other_cpus)(int wait); | |
7719 | void (*smp_send_reschedule)(int cpu); | |
7720 | int (*cpu_up)(unsigned cpu); | |
7721 | int (*cpu_disable)(void); | |
7722 | void (*cpu_die)(unsigned int cpu); | |
7723 | void (*play_dead)(void); | |
7724 | void (*send_call_func_ipi)(const struct cpumask *mask); | |
7725 | void (*send_call_func_single_ipi)(int cpu); | |
7726 | }; | |
7727 | extern void set_cpu_sibling_map(int cpu); | |
7728 | extern struct smp_ops smp_ops; | |
7729 | static inline __attribute__((always_inline)) void smp_send_stop(void) | |
7730 | { | |
7731 | smp_ops.stop_other_cpus(0); | |
7732 | } | |
7733 | static inline __attribute__((always_inline)) void stop_other_cpus(void) | |
7734 | { | |
7735 | smp_ops.stop_other_cpus(1); | |
7736 | } | |
7737 | static inline __attribute__((always_inline)) void smp_prepare_boot_cpu(void) | |
7738 | { | |
7739 | smp_ops.smp_prepare_boot_cpu(); | |
7740 | } | |
7741 | static inline __attribute__((always_inline)) void smp_prepare_cpus(unsigned int max_cpus) | |
7742 | { | |
7743 | smp_ops.smp_prepare_cpus(max_cpus); | |
7744 | } | |
7745 | static inline __attribute__((always_inline)) void smp_cpus_done(unsigned int max_cpus) | |
7746 | { | |
7747 | smp_ops.smp_cpus_done(max_cpus); | |
7748 | } | |
7749 | static inline __attribute__((always_inline)) int __cpu_up(unsigned int cpu) | |
7750 | { | |
7751 | return smp_ops.cpu_up(cpu); | |
7752 | } | |
7753 | static inline __attribute__((always_inline)) int __cpu_disable(void) | |
7754 | { | |
7755 | return smp_ops.cpu_disable(); | |
7756 | } | |
7757 | static inline __attribute__((always_inline)) void __cpu_die(unsigned int cpu) | |
7758 | { | |
7759 | smp_ops.cpu_die(cpu); | |
7760 | } | |
7761 | static inline __attribute__((always_inline)) void play_dead(void) | |
7762 | { | |
7763 | smp_ops.play_dead(); | |
7764 | } | |
7765 | static inline __attribute__((always_inline)) void smp_send_reschedule(int cpu) | |
7766 | { | |
7767 | smp_ops.smp_send_reschedule(cpu); | |
7768 | } | |
7769 | static inline __attribute__((always_inline)) void arch_send_call_function_single_ipi(int cpu) | |
7770 | { | |
7771 | smp_ops.send_call_func_single_ipi(cpu); | |
7772 | } | |
7773 | static inline __attribute__((always_inline)) void arch_send_call_function_ipi_mask(const struct cpumask *mask) | |
7774 | { | |
7775 | smp_ops.send_call_func_ipi(mask); | |
7776 | } | |
7777 | void cpu_disable_common(void); | |
7778 | void native_smp_prepare_boot_cpu(void); | |
7779 | void native_smp_prepare_cpus(unsigned int max_cpus); | |
7780 | void native_smp_cpus_done(unsigned int max_cpus); | |
7781 | int native_cpu_up(unsigned int cpunum); | |
7782 | int native_cpu_disable(void); | |
7783 | void native_cpu_die(unsigned int cpu); | |
7784 | void native_play_dead(void); | |
7785 | void play_dead_common(void); | |
7786 | void wbinvd_on_cpu(int cpu); | |
7787 | int wbinvd_on_all_cpus(void); | |
7788 | void native_send_call_func_ipi(const struct cpumask *mask); | |
7789 | void native_send_call_func_single_ipi(int cpu); | |
7790 | void smp_store_cpu_info(int id); | |
7791 | static inline __attribute__((always_inline)) int num_booting_cpus(void) | |
7792 | { | |
7793 | return cpumask_weight(cpu_callout_mask); | |
7794 | } | |
7795 | extern unsigned disabled_cpus __attribute__ ((__section__(".cpuinit.data"))); | |
7796 | extern int safe_smp_processor_id(void); | |
7797 | static inline __attribute__((always_inline)) int logical_smp_processor_id(void) | |
7798 | { | |
7799 | return (((apic_read(0xD0)) >> 24) & 0xFFu); | |
7800 | } | |
7801 | extern int hard_smp_processor_id(void); | |
7802 | extern void smp_send_stop(void); | |
7803 | extern void smp_send_reschedule(int cpu); | |
7804 | extern void smp_prepare_cpus(unsigned int max_cpus); | |
7805 | extern int __cpu_up(unsigned int cpunum); | |
7806 | extern void smp_cpus_done(unsigned int max_cpus); | |
7807 | int smp_call_function(smp_call_func_t func, void *info, int wait); | |
7808 | void smp_call_function_many(const struct cpumask *mask, | |
7809 | smp_call_func_t func, void *info, bool wait); | |
7810 | void __smp_call_function_single(int cpuid, struct call_single_data *data, | |
7811 | int wait); | |
7812 | int smp_call_function_any(const struct cpumask *mask, | |
7813 | smp_call_func_t func, void *info, int wait); | |
7814 | void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) call_function_init(void); | |
7815 | void generic_smp_call_function_single_interrupt(void); | |
7816 | void generic_smp_call_function_interrupt(void); | |
7817 | void ipi_call_lock(void); | |
7818 | void ipi_call_unlock(void); | |
7819 | void ipi_call_lock_irq(void); | |
7820 | void ipi_call_unlock_irq(void); | |
7821 | int on_each_cpu(smp_call_func_t func, void *info, int wait); | |
7822 | void smp_prepare_boot_cpu(void); | |
7823 | extern unsigned int setup_max_cpus; | |
7824 | extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) setup_nr_cpu_ids(void); | |
7825 | extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) smp_init(void); | |
7826 | extern unsigned int debug_smp_processor_id(void); | |
7827 | extern void arch_disable_smp_support(void); | |
7828 | void smp_setup_processor_id(void); | |
7829 | enum pageblock_bits { | |
7830 | PB_migrate, | |
7831 | PB_migrate_end = PB_migrate + 3 - 1, | |
7832 | NR_PAGEBLOCK_BITS | |
7833 | }; | |
7834 | struct page; | |
7835 | unsigned long get_pageblock_flags_group(struct page *page, | |
7836 | int start_bitidx, int end_bitidx); | |
7837 | void set_pageblock_flags_group(struct page *page, unsigned long flags, | |
7838 | int start_bitidx, int end_bitidx); | |
7839 | extern int page_group_by_mobility_disabled; | |
7840 | static inline __attribute__((always_inline)) int get_pageblock_migratetype(struct page *page) | |
7841 | { | |
7842 | return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); | |
7843 | } | |
7844 | struct free_area { | |
7845 | struct list_head free_list[5]; | |
7846 | unsigned long nr_free; | |
7847 | }; | |
7848 | struct pglist_data; | |
7849 | struct zone_padding { | |
7850 | char x[0]; | |
7851 | } __attribute__((__aligned__(1 << (6)))); | |
7852 | enum zone_stat_item { | |
7853 | NR_FREE_PAGES, | |
7854 | NR_LRU_BASE, | |
7855 | NR_INACTIVE_ANON = NR_LRU_BASE, | |
7856 | NR_ACTIVE_ANON, | |
7857 | NR_INACTIVE_FILE, | |
7858 | NR_ACTIVE_FILE, | |
7859 | NR_UNEVICTABLE, | |
7860 | NR_MLOCK, | |
7861 | NR_ANON_PAGES, | |
7862 | NR_FILE_MAPPED, | |
7863 | NR_FILE_PAGES, | |
7864 | NR_FILE_DIRTY, | |
7865 | NR_WRITEBACK, | |
7866 | NR_SLAB_RECLAIMABLE, | |
7867 | NR_SLAB_UNRECLAIMABLE, | |
7868 | NR_PAGETABLE, | |
7869 | NR_KERNEL_STACK, | |
7870 | NR_UNSTABLE_NFS, | |
7871 | NR_BOUNCE, | |
7872 | NR_VMSCAN_WRITE, | |
7873 | NR_WRITEBACK_TEMP, | |
7874 | NR_ISOLATED_ANON, | |
7875 | NR_ISOLATED_FILE, | |
7876 | NR_SHMEM, | |
7877 | NR_DIRTIED, | |
7878 | NR_WRITTEN, | |
7879 | NR_ANON_TRANSPARENT_HUGEPAGES, | |
7880 | NR_VM_ZONE_STAT_ITEMS }; | |
7881 | enum lru_list { | |
7882 | LRU_INACTIVE_ANON = 0, | |
7883 | LRU_ACTIVE_ANON = 0 + 1, | |
7884 | LRU_INACTIVE_FILE = 0 + 2, | |
7885 | LRU_ACTIVE_FILE = 0 + 2 + 1, | |
7886 | LRU_UNEVICTABLE, | |
7887 | NR_LRU_LISTS | |
7888 | }; | |
7889 | static inline __attribute__((always_inline)) int is_file_lru(enum lru_list l) | |
7890 | { | |
7891 | return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE); | |
7892 | } | |
7893 | static inline __attribute__((always_inline)) int is_active_lru(enum lru_list l) | |
7894 | { | |
7895 | return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE); | |
7896 | } | |
7897 | static inline __attribute__((always_inline)) int is_unevictable_lru(enum lru_list l) | |
7898 | { | |
7899 | return (l == LRU_UNEVICTABLE); | |
7900 | } | |
7901 | enum zone_watermarks { | |
7902 | WMARK_MIN, | |
7903 | WMARK_LOW, | |
7904 | WMARK_HIGH, | |
7905 | NR_WMARK | |
7906 | }; | |
7907 | struct per_cpu_pages { | |
7908 | int count; | |
7909 | int high; | |
7910 | int batch; | |
7911 | struct list_head lists[3]; | |
7912 | }; | |
7913 | struct per_cpu_pageset { | |
7914 | struct per_cpu_pages pcp; | |
7915 | s8 stat_threshold; | |
7916 | s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; | |
7917 | }; | |
7918 | enum zone_type { | |
7919 | ZONE_DMA, | |
7920 | ZONE_NORMAL, | |
7921 | ZONE_HIGHMEM, | |
7922 | ZONE_MOVABLE, | |
7923 | __MAX_NR_ZONES | |
7924 | }; | |
7925 | struct zone_reclaim_stat { | |
7926 | unsigned long recent_rotated[2]; | |
7927 | unsigned long recent_scanned[2]; | |
7928 | }; | |
7929 | struct zone { | |
7930 | unsigned long watermark[NR_WMARK]; | |
7931 | unsigned long percpu_drift_mark; | |
7932 | unsigned long lowmem_reserve[4]; | |
7933 | struct per_cpu_pageset *pageset; | |
7934 | spinlock_t lock; | |
7935 | int all_unreclaimable; | |
7936 | struct free_area free_area[11]; | |
7937 | unsigned long *pageblock_flags; | |
7938 | unsigned int compact_considered; | |
7939 | unsigned int compact_defer_shift; | |
7940 | struct zone_padding _pad1_; | |
7941 | spinlock_t lru_lock; | |
7942 | struct zone_lru { | |
7943 | struct list_head list; | |
7944 | } lru[NR_LRU_LISTS]; | |
7945 | struct zone_reclaim_stat reclaim_stat; | |
7946 | unsigned long pages_scanned; | |
7947 | unsigned long flags; | |
7948 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | |
7949 | unsigned int inactive_ratio; | |
7950 | struct zone_padding _pad2_; | |
7951 | wait_queue_head_t * wait_table; | |
7952 | unsigned long wait_table_hash_nr_entries; | |
7953 | unsigned long wait_table_bits; | |
7954 | struct pglist_data *zone_pgdat; | |
7955 | unsigned long zone_start_pfn; | |
7956 | unsigned long spanned_pages; | |
7957 | unsigned long present_pages; | |
7958 | const char *name; | |
7959 | } __attribute__((__aligned__(1 << (6)))); | |
7960 | typedef enum { | |
7961 | ZONE_RECLAIM_LOCKED, | |
7962 | ZONE_OOM_LOCKED, | |
7963 | ZONE_CONGESTED, | |
7964 | } zone_flags_t; | |
7965 | static inline __attribute__((always_inline)) void zone_set_flag(struct zone *zone, zone_flags_t flag) | |
7966 | { | |
7967 | set_bit(flag, &zone->flags); | |
7968 | } | |
7969 | static inline __attribute__((always_inline)) int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag) | |
7970 | { | |
7971 | return test_and_set_bit(flag, &zone->flags); | |
7972 | } | |
7973 | static inline __attribute__((always_inline)) void zone_clear_flag(struct zone *zone, zone_flags_t flag) | |
7974 | { | |
7975 | clear_bit(flag, &zone->flags); | |
7976 | } | |
7977 | static inline __attribute__((always_inline)) int zone_is_reclaim_congested(const struct zone *zone) | |
7978 | { | |
7979 | return (__builtin_constant_p((ZONE_CONGESTED)) ? constant_test_bit((ZONE_CONGESTED), (&zone->flags)) : variable_test_bit((ZONE_CONGESTED), (&zone->flags))); | |
7980 | } | |
7981 | static inline __attribute__((always_inline)) int zone_is_reclaim_locked(const struct zone *zone) | |
7982 | { | |
7983 | return (__builtin_constant_p((ZONE_RECLAIM_LOCKED)) ? constant_test_bit((ZONE_RECLAIM_LOCKED), (&zone->flags)) : variable_test_bit((ZONE_RECLAIM_LOCKED), (&zone->flags))); | |
7984 | } | |
7985 | static inline __attribute__((always_inline)) int zone_is_oom_locked(const struct zone *zone) | |
7986 | { | |
7987 | return (__builtin_constant_p((ZONE_OOM_LOCKED)) ? constant_test_bit((ZONE_OOM_LOCKED), (&zone->flags)) : variable_test_bit((ZONE_OOM_LOCKED), (&zone->flags))); | |
7988 | } | |
7989 | struct zonelist_cache; | |
7990 | struct zoneref { | |
7991 | struct zone *zone; | |
7992 | int zone_idx; | |
7993 | }; | |
7994 | struct zonelist { | |
7995 | struct zonelist_cache *zlcache_ptr; | |
7996 | struct zoneref _zonerefs[((1 << 0) * 4) + 1]; | |
7997 | }; | |
7998 | struct node_active_region { | |
7999 | unsigned long start_pfn; | |
8000 | unsigned long end_pfn; | |
8001 | int nid; | |
8002 | }; | |
8003 | extern struct page *mem_map; | |
8004 | struct bootmem_data; | |
8005 | typedef struct pglist_data { | |
8006 | struct zone node_zones[4]; | |
8007 | struct zonelist node_zonelists[1]; | |
8008 | int nr_zones; | |
8009 | struct page *node_mem_map; | |
8010 | unsigned long node_start_pfn; | |
8011 | unsigned long node_present_pages; | |
8012 | unsigned long node_spanned_pages; | |
8013 | int node_id; | |
8014 | wait_queue_head_t kswapd_wait; | |
8015 | struct task_struct *kswapd; | |
8016 | int kswapd_max_order; | |
8017 | enum zone_type classzone_idx; | |
8018 | } pg_data_t; | |
8019 | struct rw_semaphore; | |
8020 | struct rw_semaphore { | |
8021 | long count; | |
8022 | spinlock_t wait_lock; | |
8023 | struct list_head wait_list; | |
8024 | struct lockdep_map dep_map; | |
8025 | }; | |
8026 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | |
8027 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | |
8028 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); | |
8029 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | |
8030 | static inline __attribute__((always_inline)) void __down_read(struct rw_semaphore *sem) | |
8031 | { | |
8032 | asm volatile("# beginning down_read\n\t" | |
8033 | ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " " "incl" " " "(%1)\n\t" | |
8034 | " jns 1f\n" | |
8035 | " call call_rwsem_down_read_failed\n" | |
8036 | "1:\n\t" | |
8037 | "# ending down_read\n\t" | |
8038 | : "+m" (sem->count) | |
8039 | : "a" (sem) | |
8040 | : "memory", "cc"); | |
8041 | } | |
8042 | static inline __attribute__((always_inline)) int __down_read_trylock(struct rw_semaphore *sem) | |
8043 | { | |
8044 | long result, tmp; | |
8045 | asm volatile("# beginning __down_read_trylock\n\t" | |
8046 | " mov %0,%1\n\t" | |
8047 | "1:\n\t" | |
8048 | " mov %1,%2\n\t" | |
8049 | " add %3,%2\n\t" | |
8050 | " jle 2f\n\t" | |
8051 | ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " cmpxchg %2,%0\n\t" | |
8052 | " jnz 1b\n\t" | |
8053 | "2:\n\t" | |
8054 | "# ending __down_read_trylock\n\t" | |
8055 | : "+m" (sem->count), "=&a" (result), "=&r" (tmp) | |
8056 | : "i" (0x00000001L) | |
8057 | : "memory", "cc"); | |
8058 | return result >= 0 ? 1 : 0; | |
8059 | } | |
8060 | static inline __attribute__((always_inline)) void __down_write_nested(struct rw_semaphore *sem, int subclass) | |
8061 | { | |
8062 | long tmp; | |
8063 | asm volatile("# beginning down_write\n\t" | |
8064 | ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " xadd %1,(%2)\n\t" | |
8065 | " test %1,%1\n\t" | |
8066 | " jz 1f\n" | |
8067 | " call call_rwsem_down_write_failed\n" | |
8068 | "1:\n" | |
8069 | "# ending down_write" | |
8070 | : "+m" (sem->count), "=d" (tmp) | |
8071 | : "a" (sem), "1" (((-0x0000ffffL -1) + 0x00000001L)) | |
8072 | : "memory", "cc"); | |
8073 | } | |
8074 | static inline __attribute__((always_inline)) void __down_write(struct rw_semaphore *sem) | |
8075 | { | |
8076 | __down_write_nested(sem, 0); | |
8077 | } | |
8078 | static inline __attribute__((always_inline)) int __down_write_trylock(struct rw_semaphore *sem) | |
8079 | { | |
8080 | long ret = ({ __typeof__(*(((&sem->count)))) __ret; __typeof__(*(((&sem->count)))) __old = (((0x00000000L))); __typeof__(*(((&sem->count)))) __new = (((((-0x0000ffffL -1) + 0x00000001L)))); switch ((sizeof(*&sem->count))) { case 1: { volatile u8 *__ptr = (volatile u8 *)(((&sem->count))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgb %2,%1" : "=a" (__ret), "+m" (*__ptr) : "q" (__new), "0" (__old) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)(((&sem->count))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgw %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)(((&sem->count))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgl %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } default: __cmpxchg_wrong_size(); } __ret; }) | |
8081 | ; | |
8082 | if (__builtin_constant_p(((ret == 0x00000000L))) ? !!((ret == 0x00000000L)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/rwsem.h", .line = 131, }; ______r = !!((ret == 0x00000000L)); ______f.miss_hit[______r]++; ______r; })) | |
8083 | return 1; | |
8084 | return 0; | |
8085 | } | |
8086 | static inline __attribute__((always_inline)) void __up_read(struct rw_semaphore *sem) | |
8087 | { | |
8088 | long tmp; | |
8089 | asm volatile("# beginning __up_read\n\t" | |
8090 | ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " xadd %1,(%2)\n\t" | |
8091 | " jns 1f\n\t" | |
8092 | " call call_rwsem_wake\n" | |
8093 | "1:\n" | |
8094 | "# ending __up_read\n" | |
8095 | : "+m" (sem->count), "=d" (tmp) | |
8096 | : "a" (sem), "1" (-0x00000001L) | |
8097 | : "memory", "cc"); | |
8098 | } | |
8099 | static inline __attribute__((always_inline)) void __up_write(struct rw_semaphore *sem) | |
8100 | { | |
8101 | long tmp; | |
8102 | asm volatile("# beginning __up_write\n\t" | |
8103 | ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " xadd %1,(%2)\n\t" | |
8104 | " jns 1f\n\t" | |
8105 | " call call_rwsem_wake\n" | |
8106 | "1:\n\t" | |
8107 | "# ending __up_write\n" | |
8108 | : "+m" (sem->count), "=d" (tmp) | |
8109 | : "a" (sem), "1" (-((-0x0000ffffL -1) + 0x00000001L)) | |
8110 | : "memory", "cc"); | |
8111 | } | |
8112 | static inline __attribute__((always_inline)) void __downgrade_write(struct rw_semaphore *sem) | |
8113 | { | |
8114 | asm volatile("# beginning __downgrade_write\n\t" | |
8115 | ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " " "addl" " " "%2,(%1)\n\t" | |
8116 | " jns 1f\n\t" | |
8117 | " call call_rwsem_downgrade_wake\n" | |
8118 | "1:\n\t" | |
8119 | "# ending __downgrade_write\n" | |
8120 | : "+m" (sem->count) | |
8121 | : "a" (sem), "er" (-(-0x0000ffffL -1)) | |
8122 | : "memory", "cc"); | |
8123 | } | |
8124 | static inline __attribute__((always_inline)) void rwsem_atomic_add(long delta, struct rw_semaphore *sem) | |
8125 | { | |
8126 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " " "addl" " " "%1,%0" | |
8127 | : "+m" (sem->count) | |
8128 | : "er" (delta)); | |
8129 | } | |
8130 | static inline __attribute__((always_inline)) long rwsem_atomic_update(long delta, struct rw_semaphore *sem) | |
8131 | { | |
8132 | long tmp = delta; | |
8133 | asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "xadd %0,%1" | |
8134 | : "+r" (tmp), "+m" (sem->count) | |
8135 | : : "memory"); | |
8136 | return tmp + delta; | |
8137 | } | |
8138 | static inline __attribute__((always_inline)) int rwsem_is_locked(struct rw_semaphore *sem) | |
8139 | { | |
8140 | return sem->count != 0; | |
8141 | } | |
8142 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, | |
8143 | struct lock_class_key *key); | |
8144 | extern void down_read(struct rw_semaphore *sem); | |
8145 | extern int down_read_trylock(struct rw_semaphore *sem); | |
8146 | extern void down_write(struct rw_semaphore *sem); | |
8147 | extern int down_write_trylock(struct rw_semaphore *sem); | |
8148 | extern void up_read(struct rw_semaphore *sem); | |
8149 | extern void up_write(struct rw_semaphore *sem); | |
8150 | extern void downgrade_write(struct rw_semaphore *sem); | |
8151 | extern void down_read_nested(struct rw_semaphore *sem, int subclass); | |
8152 | extern void down_write_nested(struct rw_semaphore *sem, int subclass); | |
8153 | extern void down_read_non_owner(struct rw_semaphore *sem); | |
8154 | extern void up_read_non_owner(struct rw_semaphore *sem); | |
8155 | struct srcu_struct_array { | |
8156 | int c[2]; | |
8157 | }; | |
8158 | struct srcu_struct { | |
8159 | int completed; | |
8160 | struct srcu_struct_array *per_cpu_ref; | |
8161 | struct mutex mutex; | |
8162 | struct lockdep_map dep_map; | |
8163 | }; | |
8164 | int __init_srcu_struct(struct srcu_struct *sp, const char *name, | |
8165 | struct lock_class_key *key); | |
8166 | void cleanup_srcu_struct(struct srcu_struct *sp); | |
8167 | int __srcu_read_lock(struct srcu_struct *sp) ; | |
8168 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) ; | |
8169 | void synchronize_srcu(struct srcu_struct *sp); | |
8170 | void synchronize_srcu_expedited(struct srcu_struct *sp); | |
8171 | long srcu_batches_completed(struct srcu_struct *sp); | |
8172 | static inline __attribute__((always_inline)) int srcu_read_lock_held(struct srcu_struct *sp) | |
8173 | { | |
8174 | if (__builtin_constant_p(((debug_locks))) ? !!((debug_locks)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/srcu.h", .line = 96, }; ______r = !!((debug_locks)); ______f.miss_hit[______r]++; ______r; })) | |
8175 | return lock_is_held(&sp->dep_map); | |
8176 | return 1; | |
8177 | } | |
8178 | static inline __attribute__((always_inline)) int srcu_read_lock(struct srcu_struct *sp) | |
8179 | { | |
8180 | int retval = __srcu_read_lock(sp); | |
8181 | lock_acquire(&(sp)->dep_map, 0, 0, 2, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; })); | |
8182 | return retval; | |
8183 | } | |
8184 | static inline __attribute__((always_inline)) void srcu_read_unlock(struct srcu_struct *sp, int idx) | |
8185 | { | |
8186 | lock_release(&(sp)->dep_map, 1, ({ __label__ __here; __here: (unsigned long)&&__here; })); | |
8187 | __srcu_read_unlock(sp, idx); | |
8188 | } | |
8189 | struct notifier_block { | |
8190 | int (*notifier_call)(struct notifier_block *, unsigned long, void *); | |
8191 | struct notifier_block *next; | |
8192 | int priority; | |
8193 | }; | |
8194 | struct atomic_notifier_head { | |
8195 | spinlock_t lock; | |
8196 | struct notifier_block *head; | |
8197 | }; | |
8198 | struct blocking_notifier_head { | |
8199 | struct rw_semaphore rwsem; | |
8200 | struct notifier_block *head; | |
8201 | }; | |
8202 | struct raw_notifier_head { | |
8203 | struct notifier_block *head; | |
8204 | }; | |
8205 | struct srcu_notifier_head { | |
8206 | struct mutex mutex; | |
8207 | struct srcu_struct srcu; | |
8208 | struct notifier_block *head; | |
8209 | }; | |
8210 | extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); | |
8211 | extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, | |
8212 | struct notifier_block *nb); | |
8213 | extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh, | |
8214 | struct notifier_block *nb); | |
8215 | extern int raw_notifier_chain_register(struct raw_notifier_head *nh, | |
8216 | struct notifier_block *nb); | |
8217 | extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh, | |
8218 | struct notifier_block *nb); | |
8219 | extern int blocking_notifier_chain_cond_register( | |
8220 | struct blocking_notifier_head *nh, | |
8221 | struct notifier_block *nb); | |
8222 | extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, | |
8223 | struct notifier_block *nb); | |
8224 | extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, | |
8225 | struct notifier_block *nb); | |
8226 | extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh, | |
8227 | struct notifier_block *nb); | |
8228 | extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, | |
8229 | struct notifier_block *nb); | |
8230 | extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh, | |
8231 | unsigned long val, void *v); | |
8232 | extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh, | |
8233 | unsigned long val, void *v, int nr_to_call, int *nr_calls); | |
8234 | extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh, | |
8235 | unsigned long val, void *v); | |
8236 | extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, | |
8237 | unsigned long val, void *v, int nr_to_call, int *nr_calls); | |
8238 | extern int raw_notifier_call_chain(struct raw_notifier_head *nh, | |
8239 | unsigned long val, void *v); | |
8240 | extern int __raw_notifier_call_chain(struct raw_notifier_head *nh, | |
8241 | unsigned long val, void *v, int nr_to_call, int *nr_calls); | |
8242 | extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh, | |
8243 | unsigned long val, void *v); | |
8244 | extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, | |
8245 | unsigned long val, void *v, int nr_to_call, int *nr_calls); | |
8246 | static inline __attribute__((always_inline)) int notifier_from_errno(int err) | |
8247 | { | |
8248 | if (__builtin_constant_p(((err))) ? !!((err)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/notifier.h", .line = 167, }; ______r = !!((err)); ______f.miss_hit[______r]++; ______r; })) | |
8249 | return 0x8000 | (0x0001 - err); | |
8250 | return 0x0001; | |
8251 | } | |
8252 | static inline __attribute__((always_inline)) int notifier_to_errno(int ret) | |
8253 | { | |
8254 | ret &= ~0x8000; | |
8255 | return ret > 0x0001 ? 0x0001 - ret : 0; | |
8256 | } | |
8257 | extern struct blocking_notifier_head reboot_notifier_list; | |
8258 | struct page; | |
8259 | struct zone; | |
8260 | struct pglist_data; | |
8261 | struct mem_section; | |
8262 | static inline __attribute__((always_inline)) void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} | |
8263 | static inline __attribute__((always_inline)) void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} | |
8264 | static inline __attribute__((always_inline)) void pgdat_resize_init(struct pglist_data *pgdat) {} | |
8265 | static inline __attribute__((always_inline)) unsigned zone_span_seqbegin(struct zone *zone) | |
8266 | { | |
8267 | return 0; | |
8268 | } | |
8269 | static inline __attribute__((always_inline)) int zone_span_seqretry(struct zone *zone, unsigned iv) | |
8270 | { | |
8271 | return 0; | |
8272 | } | |
8273 | static inline __attribute__((always_inline)) void zone_span_writelock(struct zone *zone) {} | |
8274 | static inline __attribute__((always_inline)) void zone_span_writeunlock(struct zone *zone) {} | |
8275 | static inline __attribute__((always_inline)) void zone_seqlock_init(struct zone *zone) {} | |
8276 | static inline __attribute__((always_inline)) int mhp_notimplemented(const char *func) | |
8277 | { | |
8278 | printk("<4>" "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); | |
8279 | dump_stack(); | |
8280 | return -38; | |
8281 | } | |
8282 | static inline __attribute__((always_inline)) void register_page_bootmem_info_node(struct pglist_data *pgdat) | |
8283 | { | |
8284 | } | |
8285 | static inline __attribute__((always_inline)) void lock_memory_hotplug(void) {} | |
8286 | static inline __attribute__((always_inline)) void unlock_memory_hotplug(void) {} | |
8287 | static inline __attribute__((always_inline)) int is_mem_section_removable(unsigned long pfn, | |
8288 | unsigned long nr_pages) | |
8289 | { | |
8290 | return 0; | |
8291 | } | |
8292 | extern int mem_online_node(int nid); | |
8293 | extern int add_memory(int nid, u64 start, u64 size); | |
8294 | extern int arch_add_memory(int nid, u64 start, u64 size); | |
8295 | extern int remove_memory(u64 start, u64 size); | |
8296 | extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, | |
8297 | int nr_pages); | |
8298 | extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms); | |
8299 | extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, | |
8300 | unsigned long pnum); | |
8301 | extern struct mutex zonelists_mutex; | |
8302 | void build_all_zonelists(void *data); | |
8303 | void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); | |
8304 | bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, | |
8305 | int classzone_idx, int alloc_flags); | |
8306 | bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, | |
8307 | int classzone_idx, int alloc_flags); | |
8308 | enum memmap_context { | |
8309 | MEMMAP_EARLY, | |
8310 | MEMMAP_HOTPLUG, | |
8311 | }; | |
8312 | extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, | |
8313 | unsigned long size, | |
8314 | enum memmap_context context); | |
8315 | static inline __attribute__((always_inline)) void memory_present(int nid, unsigned long start, unsigned long end) {} | |
8316 | static inline __attribute__((always_inline)) int local_memory_node(int node_id) { return node_id; }; | |
8317 | static inline __attribute__((always_inline)) int populated_zone(struct zone *zone) | |
8318 | { | |
8319 | return (!!zone->present_pages); | |
8320 | } | |
8321 | extern int movable_zone; | |
8322 | static inline __attribute__((always_inline)) int zone_movable_is_highmem(void) | |
8323 | { | |
8324 | return movable_zone == ZONE_HIGHMEM; | |
8325 | } | |
8326 | static inline __attribute__((always_inline)) int is_highmem_idx(enum zone_type idx) | |
8327 | { | |
8328 | return (idx == ZONE_HIGHMEM || | |
8329 | (idx == ZONE_MOVABLE && zone_movable_is_highmem())); | |
8330 | } | |
8331 | static inline __attribute__((always_inline)) int is_normal_idx(enum zone_type idx) | |
8332 | { | |
8333 | return (idx == ZONE_NORMAL); | |
8334 | } | |
8335 | static inline __attribute__((always_inline)) int is_highmem(struct zone *zone) | |
8336 | { | |
8337 | int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones; | |
8338 | return zone_off == ZONE_HIGHMEM * sizeof(*zone) || | |
8339 | (zone_off == ZONE_MOVABLE * sizeof(*zone) && | |
8340 | zone_movable_is_highmem()); | |
8341 | } | |
8342 | static inline __attribute__((always_inline)) int is_normal(struct zone *zone) | |
8343 | { | |
8344 | return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; | |
8345 | } | |
8346 | static inline __attribute__((always_inline)) int is_dma32(struct zone *zone) | |
8347 | { | |
8348 | return 0; | |
8349 | } | |
8350 | static inline __attribute__((always_inline)) int is_dma(struct zone *zone) | |
8351 | { | |
8352 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA; | |
8353 | } | |
8354 | struct ctl_table; | |
8355 | int min_free_kbytes_sysctl_handler(struct ctl_table *, int, | |
8356 | void *, size_t *, loff_t *); | |
8357 | extern int sysctl_lowmem_reserve_ratio[4 -1]; | |
8358 | int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, | |
8359 | void *, size_t *, loff_t *); | |
8360 | int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, | |
8361 | void *, size_t *, loff_t *); | |
8362 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, | |
8363 | void *, size_t *, loff_t *); | |
8364 | int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, | |
8365 | void *, size_t *, loff_t *); | |
8366 | extern int numa_zonelist_order_handler(struct ctl_table *, int, | |
8367 | void *, size_t *, loff_t *); | |
8368 | extern char numa_zonelist_order[]; | |
8369 | extern struct pglist_data contig_page_data; | |
8370 | extern struct pglist_data *first_online_pgdat(void); | |
8371 | extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); | |
8372 | extern struct zone *next_zone(struct zone *zone); | |
8373 | static inline __attribute__((always_inline)) struct zone *zonelist_zone(struct zoneref *zoneref) | |
8374 | { | |
8375 | return zoneref->zone; | |
8376 | } | |
8377 | static inline __attribute__((always_inline)) int zonelist_zone_idx(struct zoneref *zoneref) | |
8378 | { | |
8379 | return zoneref->zone_idx; | |
8380 | } | |
8381 | static inline __attribute__((always_inline)) int zonelist_node_idx(struct zoneref *zoneref) | |
8382 | { | |
8383 | return 0; | |
8384 | } | |
8385 | struct zoneref *next_zones_zonelist(struct zoneref *z, | |
8386 | enum zone_type highest_zoneidx, | |
8387 | nodemask_t *nodes, | |
8388 | struct zone **zone); | |
8389 | static inline __attribute__((always_inline)) struct zoneref *first_zones_zonelist(struct zonelist *zonelist, | |
8390 | enum zone_type highest_zoneidx, | |
8391 | nodemask_t *nodes, | |
8392 | struct zone **zone) | |
8393 | { | |
8394 | return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes, | |
8395 | zone); | |
8396 | } | |
8397 | void memory_present(int nid, unsigned long start, unsigned long end); | |
8398 | unsigned long __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) node_memmap_size_bytes(int, unsigned long, unsigned long); | |
8399 | static inline __attribute__((always_inline)) int memmap_valid_within(unsigned long pfn, | |
8400 | struct page *page, struct zone *zone) | |
8401 | { | |
8402 | return 1; | |
8403 | } | |
8404 | extern void *pcpu_base_addr; | |
8405 | extern const unsigned long *pcpu_unit_offsets; | |
8406 | struct pcpu_group_info { | |
8407 | int nr_units; | |
8408 | unsigned long base_offset; | |
8409 | unsigned int *cpu_map; | |
8410 | }; | |
8411 | struct pcpu_alloc_info { | |
8412 | size_t static_size; | |
8413 | size_t reserved_size; | |
8414 | size_t dyn_size; | |
8415 | size_t unit_size; | |
8416 | size_t atom_size; | |
8417 | size_t alloc_size; | |
8418 | size_t __ai_size; | |
8419 | int nr_groups; | |
8420 | struct pcpu_group_info groups[]; | |
8421 | }; | |
8422 | enum pcpu_fc { | |
8423 | PCPU_FC_AUTO, | |
8424 | PCPU_FC_EMBED, | |
8425 | PCPU_FC_PAGE, | |
8426 | PCPU_FC_NR, | |
8427 | }; | |
8428 | extern const char *pcpu_fc_names[PCPU_FC_NR]; | |
8429 | extern enum pcpu_fc pcpu_chosen_fc; | |
8430 | typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size, | |
8431 | size_t align); | |
8432 | typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size); | |
8433 | typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr); | |
8434 | typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to); | |
8435 | extern struct pcpu_alloc_info * __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_alloc_alloc_info(int nr_groups, | |
8436 | int nr_units); | |
8437 | extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_free_alloc_info(struct pcpu_alloc_info *ai); | |
8438 | extern int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |
8439 | void *base_addr); | |
8440 | extern int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, | |
8441 | size_t atom_size, | |
8442 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn, | |
8443 | pcpu_fc_alloc_fn_t alloc_fn, | |
8444 | pcpu_fc_free_fn_t free_fn); | |
8445 | extern int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_page_first_chunk(size_t reserved_size, | |
8446 | pcpu_fc_alloc_fn_t alloc_fn, | |
8447 | pcpu_fc_free_fn_t free_fn, | |
8448 | pcpu_fc_populate_pte_fn_t populate_pte_fn); | |
8449 | extern void *__alloc_reserved_percpu(size_t size, size_t align); | |
8450 | extern bool is_kernel_percpu_address(unsigned long addr); | |
8451 | extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) percpu_init_late(void); | |
8452 | extern void *__alloc_percpu(size_t size, size_t align); | |
8453 | extern void free_percpu(void *__pdata); | |
8454 | extern phys_addr_t per_cpu_ptr_to_phys(void *addr); | |
8455 | extern void __bad_size_call_parameter(void); | |
8456 | int arch_update_cpu_topology(void); | |
8457 | static inline __attribute__((always_inline)) int numa_mem_id(void) | |
8458 | { | |
8459 | return numa_node_id(); | |
8460 | } | |
8461 | struct vm_area_struct; | |
8462 | static inline __attribute__((always_inline)) int allocflags_to_migratetype(gfp_t gfp_flags) | |
8463 | { | |
8464 | ({ int __ret_warn_on = !!((gfp_flags & ((( gfp_t)0x80000u)|(( gfp_t)0x08u))) == ((( gfp_t)0x80000u)|(( gfp_t)0x08u))); if (__builtin_constant_p((((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 152, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 152, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 152, }; ______r = !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 152, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) warn_slowpath_null("include/linux/gfp.h", 152); (__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 152, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); }); | |
8465 | if (__builtin_constant_p((((__builtin_constant_p(page_group_by_mobility_disabled) ? !!(page_group_by_mobility_disabled) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 154, }; ______r = __builtin_expect(!!(page_group_by_mobility_disabled), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(page_group_by_mobility_disabled) ? !!(page_group_by_mobility_disabled) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 154, }; ______r = __builtin_expect(!!(page_group_by_mobility_disabled), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 154, }; ______r = !!(((__builtin_constant_p(page_group_by_mobility_disabled) ? !!(page_group_by_mobility_disabled) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 154, }; ______r = __builtin_expect(!!(page_group_by_mobility_disabled), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
8466 | return 0; | |
8467 | return (((gfp_flags & (( gfp_t)0x08u)) != 0) << 1) | | |
8468 | ((gfp_flags & (( gfp_t)0x80000u)) != 0); | |
8469 | } | |
8470 | static inline __attribute__((always_inline)) enum zone_type gfp_zone(gfp_t flags) | |
8471 | { | |
8472 | enum zone_type z; | |
8473 | int bit = ( int) (flags & ((( gfp_t)0x01u)|(( gfp_t)0x02u)|(( gfp_t)0x04u)|(( gfp_t)0x08u))); | |
8474 | z = (( (ZONE_NORMAL << 0 * 2) | (ZONE_DMA << 0x01u * 2) | (ZONE_HIGHMEM << 0x02u * 2) | (ZONE_NORMAL << 0x04u * 2) | (ZONE_NORMAL << 0x08u * 2) | (ZONE_DMA << (0x08u | 0x01u) * 2) | (ZONE_MOVABLE << (0x08u | 0x02u) * 2) | (ZONE_NORMAL << (0x08u | 0x04u) * 2) ) >> (bit * 2)) & | |
8475 | ((1 << 2) - 1); | |
8476 | do { (void)((( 1 << (0x01u | 0x02u) | 1 << (0x01u | 0x04u) | 1 << (0x04u | 0x02u) | 1 << (0x01u | 0x04u | 0x02u) | 1 << (0x08u | 0x02u | 0x01u) | 1 << (0x08u | 0x04u | 0x01u) | 1 << (0x08u | 0x04u | 0x02u) | 1 << (0x08u | 0x04u | 0x01u | 0x02u) ) >> bit) & 1); } while (0); | |
8477 | return z; | |
8478 | } | |
8479 | static inline __attribute__((always_inline)) int gfp_zonelist(gfp_t flags) | |
8480 | { | |
8481 | if (__builtin_constant_p(((0 && (__builtin_constant_p(flags & (( gfp_t)0x40000u)) ? !!(flags & (( gfp_t)0x40000u)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 265, }; ______r = __builtin_expect(!!(flags & (( gfp_t)0x40000u)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!((0 && (__builtin_constant_p(flags & (( gfp_t)0x40000u)) ? !!(flags & (( gfp_t)0x40000u)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 265, }; ______r = __builtin_expect(!!(flags & (( gfp_t)0x40000u)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 265, }; ______r = !!((0 && (__builtin_constant_p(flags & (( gfp_t)0x40000u)) ? !!(flags & (( gfp_t)0x40000u)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 265, }; ______r = __builtin_expect(!!(flags & (( gfp_t)0x40000u)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
8482 | return 1; | |
8483 | return 0; | |
8484 | } | |
8485 | static inline __attribute__((always_inline)) struct zonelist *node_zonelist(int nid, gfp_t flags) | |
8486 | { | |
8487 | return (&contig_page_data)->node_zonelists + gfp_zonelist(flags); | |
8488 | } | |
8489 | static inline __attribute__((always_inline)) void arch_free_page(struct page *page, int order) { } | |
8490 | static inline __attribute__((always_inline)) void arch_alloc_page(struct page *page, int order) { } | |
8491 | struct page * | |
8492 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | |
8493 | struct zonelist *zonelist, nodemask_t *nodemask); | |
8494 | static inline __attribute__((always_inline)) struct page * | |
8495 | __alloc_pages(gfp_t gfp_mask, unsigned int order, | |
8496 | struct zonelist *zonelist) | |
8497 | { | |
8498 | return __alloc_pages_nodemask(gfp_mask, order, zonelist, ((void *)0)); | |
8499 | } | |
8500 | static inline __attribute__((always_inline)) struct page *alloc_pages_node(int nid, gfp_t gfp_mask, | |
8501 | unsigned int order) | |
8502 | { | |
8503 | if (__builtin_constant_p(((nid < 0))) ? !!((nid < 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 307, }; ______r = !!((nid < 0)); ______f.miss_hit[______r]++; ______r; })) | |
8504 | nid = numa_node_id(); | |
8505 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); | |
8506 | } | |
8507 | static inline __attribute__((always_inline)) struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, | |
8508 | unsigned int order) | |
8509 | { | |
8510 | do { (void)(nid < 0 || nid >= (1 << 0)); } while (0); | |
8511 | return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); | |
8512 | } | |
8513 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); | |
8514 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); | |
8515 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask); | |
8516 | void free_pages_exact(void *virt, size_t size); | |
8517 | void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); | |
8518 | extern void __free_pages(struct page *page, unsigned int order); | |
8519 | extern void free_pages(unsigned long addr, unsigned int order); | |
8520 | extern void free_hot_cold_page(struct page *page, int cold); | |
8521 | void page_alloc_init(void); | |
8522 | void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); | |
8523 | void drain_all_pages(void); | |
8524 | void drain_local_pages(void *dummy); | |
8525 | extern gfp_t gfp_allowed_mask; | |
8526 | extern void pm_restrict_gfp_mask(void); | |
8527 | extern void pm_restore_gfp_mask(void); | |
8528 | enum irqreturn { | |
8529 | IRQ_NONE = (0 << 0), | |
8530 | IRQ_HANDLED = (1 << 0), | |
8531 | IRQ_WAKE_THREAD = (1 << 1), | |
8532 | }; | |
8533 | typedef enum irqreturn irqreturn_t; | |
8534 | extern int nr_irqs; | |
8535 | extern struct irq_desc *irq_to_desc(unsigned int irq); | |
8536 | unsigned int irq_get_next_irq(unsigned int offset); | |
8537 | static inline __attribute__((always_inline)) int irq_canonicalize(int irq) | |
8538 | { | |
8539 | return ((irq == 2) ? 9 : irq); | |
8540 | } | |
8541 | extern void irq_ctx_init(int cpu); | |
8542 | extern void fixup_irqs(void); | |
8543 | extern void irq_force_complete_move(int); | |
8544 | extern void (*x86_platform_ipi_callback)(void); | |
8545 | extern void native_init_IRQ(void); | |
8546 | extern bool handle_irq(unsigned irq, struct pt_regs *regs); | |
8547 | extern unsigned int do_IRQ(struct pt_regs *regs); | |
8548 | extern unsigned long used_vectors[(((256) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; | |
8549 | extern int vector_used_by_percpu_irq(unsigned int vector); | |
8550 | extern void init_ISA_irqs(void); | |
8551 | extern __attribute__((section(".data..percpu" ""))) __typeof__(struct pt_regs *) irq_regs; | |
8552 | static inline __attribute__((always_inline)) struct pt_regs *get_irq_regs(void) | |
8553 | { | |
8554 | return ({ typeof(irq_regs) pfo_ret__; switch (sizeof(irq_regs)) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m" (irq_regs)); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_regs)); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_regs)); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_regs)); break; default: __bad_percpu_size(); } pfo_ret__; }); | |
8555 | } | |
8556 | static inline __attribute__((always_inline)) struct pt_regs *set_irq_regs(struct pt_regs *new_regs) | |
8557 | { | |
8558 | struct pt_regs *old_regs; | |
8559 | old_regs = get_irq_regs(); | |
8560 | do { typedef typeof(irq_regs) pto_T__; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/irq_regs.h", .line = 26, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pto_T__ pto_tmp__; pto_tmp__ = (new_regs); (void)pto_tmp__; } switch (sizeof(irq_regs)) { case 1: asm("mov" "b %1,""%%""fs"":" "%P" "0" : "+m" (irq_regs) : "qi" ((pto_T__)(new_regs))); break; case 2: asm("mov" "w %1,""%%""fs"":" "%P" "0" : "+m" (irq_regs) : "ri" ((pto_T__)(new_regs))); break; case 4: asm("mov" "l %1,""%%""fs"":" "%P" "0" : "+m" (irq_regs) : "ri" ((pto_T__)(new_regs))); break; case 8: asm("mov" "q %1,""%%""fs"":" "%P" "0" : "+m" (irq_regs) : "re" ((pto_T__)(new_regs))); break; default: __bad_percpu_size(); } } while (0); | |
8561 | return old_regs; | |
8562 | } | |
8563 | struct seq_file; | |
8564 | struct irq_desc; | |
8565 | struct irq_data; | |
8566 | typedef void (*irq_flow_handler_t)(unsigned int irq, | |
8567 | struct irq_desc *desc); | |
8568 | typedef void (*irq_preflow_handler_t)(struct irq_data *data); | |
8569 | enum { | |
8570 | IRQ_TYPE_NONE = 0x00000000, | |
8571 | IRQ_TYPE_EDGE_RISING = 0x00000001, | |
8572 | IRQ_TYPE_EDGE_FALLING = 0x00000002, | |
8573 | IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING), | |
8574 | IRQ_TYPE_LEVEL_HIGH = 0x00000004, | |
8575 | IRQ_TYPE_LEVEL_LOW = 0x00000008, | |
8576 | IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), | |
8577 | IRQ_TYPE_SENSE_MASK = 0x0000000f, | |
8578 | IRQ_TYPE_PROBE = 0x00000010, | |
8579 | IRQ_LEVEL = (1 << 8), | |
8580 | IRQ_PER_CPU = (1 << 9), | |
8581 | IRQ_NOPROBE = (1 << 10), | |
8582 | IRQ_NOREQUEST = (1 << 11), | |
8583 | IRQ_NOAUTOEN = (1 << 12), | |
8584 | IRQ_NO_BALANCING = (1 << 13), | |
8585 | IRQ_MOVE_PCNTXT = (1 << 14), | |
8586 | IRQ_NESTED_THREAD = (1 << 15), | |
8587 | IRQ_NOTHREAD = (1 << 16), | |
8588 | }; | |
8589 | static inline __attribute__((always_inline)) __attribute__((deprecated)) bool CHECK_IRQ_PER_CPU(unsigned int status) | |
8590 | { | |
8591 | return status & IRQ_PER_CPU; | |
8592 | } | |
8593 | enum { | |
8594 | IRQ_SET_MASK_OK = 0, | |
8595 | IRQ_SET_MASK_OK_NOCOPY, | |
8596 | }; | |
8597 | struct msi_desc; | |
8598 | struct irq_data { | |
8599 | unsigned int irq; | |
8600 | unsigned int node; | |
8601 | unsigned int state_use_accessors; | |
8602 | struct irq_chip *chip; | |
8603 | void *handler_data; | |
8604 | void *chip_data; | |
8605 | struct msi_desc *msi_desc; | |
8606 | cpumask_var_t affinity; | |
8607 | }; | |
8608 | enum { | |
8609 | IRQD_TRIGGER_MASK = 0xf, | |
8610 | IRQD_SETAFFINITY_PENDING = (1 << 8), | |
8611 | IRQD_NO_BALANCING = (1 << 10), | |
8612 | IRQD_PER_CPU = (1 << 11), | |
8613 | IRQD_AFFINITY_SET = (1 << 12), | |
8614 | IRQD_LEVEL = (1 << 13), | |
8615 | IRQD_WAKEUP_STATE = (1 << 14), | |
8616 | IRQD_MOVE_PCNTXT = (1 << 15), | |
8617 | IRQD_IRQ_DISABLED = (1 << 16), | |
8618 | IRQD_IRQ_MASKED = (1 << 17), | |
8619 | IRQD_IRQ_INPROGRESS = (1 << 18), | |
8620 | }; | |
8621 | static inline __attribute__((always_inline)) bool irqd_is_setaffinity_pending(struct irq_data *d) | |
8622 | { | |
8623 | return d->state_use_accessors & IRQD_SETAFFINITY_PENDING; | |
8624 | } | |
8625 | static inline __attribute__((always_inline)) bool irqd_is_per_cpu(struct irq_data *d) | |
8626 | { | |
8627 | return d->state_use_accessors & IRQD_PER_CPU; | |
8628 | } | |
8629 | static inline __attribute__((always_inline)) bool irqd_can_balance(struct irq_data *d) | |
8630 | { | |
8631 | return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING)); | |
8632 | } | |
8633 | static inline __attribute__((always_inline)) bool irqd_affinity_was_set(struct irq_data *d) | |
8634 | { | |
8635 | return d->state_use_accessors & IRQD_AFFINITY_SET; | |
8636 | } | |
8637 | static inline __attribute__((always_inline)) void irqd_mark_affinity_was_set(struct irq_data *d) | |
8638 | { | |
8639 | d->state_use_accessors |= IRQD_AFFINITY_SET; | |
8640 | } | |
8641 | static inline __attribute__((always_inline)) u32 irqd_get_trigger_type(struct irq_data *d) | |
8642 | { | |
8643 | return d->state_use_accessors & IRQD_TRIGGER_MASK; | |
8644 | } | |
8645 | static inline __attribute__((always_inline)) void irqd_set_trigger_type(struct irq_data *d, u32 type) | |
8646 | { | |
8647 | d->state_use_accessors &= ~IRQD_TRIGGER_MASK; | |
8648 | d->state_use_accessors |= type & IRQD_TRIGGER_MASK; | |
8649 | } | |
8650 | static inline __attribute__((always_inline)) bool irqd_is_level_type(struct irq_data *d) | |
8651 | { | |
8652 | return d->state_use_accessors & IRQD_LEVEL; | |
8653 | } | |
8654 | static inline __attribute__((always_inline)) bool irqd_is_wakeup_set(struct irq_data *d) | |
8655 | { | |
8656 | return d->state_use_accessors & IRQD_WAKEUP_STATE; | |
8657 | } | |
8658 | static inline __attribute__((always_inline)) bool irqd_can_move_in_process_context(struct irq_data *d) | |
8659 | { | |
8660 | return d->state_use_accessors & IRQD_MOVE_PCNTXT; | |
8661 | } | |
8662 | static inline __attribute__((always_inline)) bool irqd_irq_disabled(struct irq_data *d) | |
8663 | { | |
8664 | return d->state_use_accessors & IRQD_IRQ_DISABLED; | |
8665 | } | |
8666 | static inline __attribute__((always_inline)) bool irqd_irq_masked(struct irq_data *d) | |
8667 | { | |
8668 | return d->state_use_accessors & IRQD_IRQ_MASKED; | |
8669 | } | |
8670 | static inline __attribute__((always_inline)) bool irqd_irq_inprogress(struct irq_data *d) | |
8671 | { | |
8672 | return d->state_use_accessors & IRQD_IRQ_INPROGRESS; | |
8673 | } | |
8674 | static inline __attribute__((always_inline)) void irqd_set_chained_irq_inprogress(struct irq_data *d) | |
8675 | { | |
8676 | d->state_use_accessors |= IRQD_IRQ_INPROGRESS; | |
8677 | } | |
8678 | static inline __attribute__((always_inline)) void irqd_clr_chained_irq_inprogress(struct irq_data *d) | |
8679 | { | |
8680 | d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS; | |
8681 | } | |
8682 | struct irq_chip { | |
8683 | const char *name; | |
8684 | unsigned int (*irq_startup)(struct irq_data *data); | |
8685 | void (*irq_shutdown)(struct irq_data *data); | |
8686 | void (*irq_enable)(struct irq_data *data); | |
8687 | void (*irq_disable)(struct irq_data *data); | |
8688 | void (*irq_ack)(struct irq_data *data); | |
8689 | void (*irq_mask)(struct irq_data *data); | |
8690 | void (*irq_mask_ack)(struct irq_data *data); | |
8691 | void (*irq_unmask)(struct irq_data *data); | |
8692 | void (*irq_eoi)(struct irq_data *data); | |
8693 | int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); | |
8694 | int (*irq_retrigger)(struct irq_data *data); | |
8695 | int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); | |
8696 | int (*irq_set_wake)(struct irq_data *data, unsigned int on); | |
8697 | void (*irq_bus_lock)(struct irq_data *data); | |
8698 | void (*irq_bus_sync_unlock)(struct irq_data *data); | |
8699 | void (*irq_cpu_online)(struct irq_data *data); | |
8700 | void (*irq_cpu_offline)(struct irq_data *data); | |
8701 | void (*irq_suspend)(struct irq_data *data); | |
8702 | void (*irq_resume)(struct irq_data *data); | |
8703 | void (*irq_pm_shutdown)(struct irq_data *data); | |
8704 | void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); | |
8705 | unsigned long flags; | |
8706 | }; | |
8707 | enum { | |
8708 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), | |
8709 | IRQCHIP_EOI_IF_HANDLED = (1 << 1), | |
8710 | IRQCHIP_MASK_ON_SUSPEND = (1 << 2), | |
8711 | IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), | |
8712 | }; | |
8713 | struct irq_affinity_notify; | |
8714 | struct proc_dir_entry; | |
8715 | struct timer_rand_state; | |
8716 | struct irq_desc { | |
8717 | struct irq_data irq_data; | |
8718 | struct timer_rand_state *timer_rand_state; | |
8719 | unsigned int *kstat_irqs; | |
8720 | irq_flow_handler_t handle_irq; | |
8721 | struct irqaction *action; | |
8722 | unsigned int status_use_accessors; | |
8723 | unsigned int core_internal_state__do_not_mess_with_it; | |
8724 | unsigned int depth; | |
8725 | unsigned int wake_depth; | |
8726 | unsigned int irq_count; | |
8727 | unsigned long last_unhandled; | |
8728 | unsigned int irqs_unhandled; | |
8729 | raw_spinlock_t lock; | |
8730 | const struct cpumask *affinity_hint; | |
8731 | struct irq_affinity_notify *affinity_notify; | |
8732 | cpumask_var_t pending_mask; | |
8733 | unsigned long threads_oneshot; | |
8734 | atomic_t threads_active; | |
8735 | wait_queue_head_t wait_for_threads; | |
8736 | struct proc_dir_entry *dir; | |
8737 | const char *name; | |
8738 | } __attribute__((__aligned__(1 << (6)))); | |
8739 | extern struct irq_desc irq_desc[((32 * 8) < ( 32 * 64 ) ? (256 + (32 * 8)) : (256 + ( 32 * 64 )))]; | |
8740 | static inline __attribute__((always_inline)) struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) | |
8741 | { | |
8742 | return &desc->irq_data; | |
8743 | } | |
8744 | static inline __attribute__((always_inline)) struct irq_chip *irq_desc_get_chip(struct irq_desc *desc) | |
8745 | { | |
8746 | return desc->irq_data.chip; | |
8747 | } | |
8748 | static inline __attribute__((always_inline)) void *irq_desc_get_chip_data(struct irq_desc *desc) | |
8749 | { | |
8750 | return desc->irq_data.chip_data; | |
8751 | } | |
8752 | static inline __attribute__((always_inline)) void *irq_desc_get_handler_data(struct irq_desc *desc) | |
8753 | { | |
8754 | return desc->irq_data.handler_data; | |
8755 | } | |
8756 | static inline __attribute__((always_inline)) struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) | |
8757 | { | |
8758 | return desc->irq_data.msi_desc; | |
8759 | } | |
8760 | static inline __attribute__((always_inline)) void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | |
8761 | { | |
8762 | desc->handle_irq(irq, desc); | |
8763 | } | |
8764 | int generic_handle_irq(unsigned int irq); | |
8765 | static inline __attribute__((always_inline)) int irq_has_action(unsigned int irq) | |
8766 | { | |
8767 | struct irq_desc *desc = irq_to_desc(irq); | |
8768 | return desc->action != ((void *)0); | |
8769 | } | |
8770 | static inline __attribute__((always_inline)) void __irq_set_handler_locked(unsigned int irq, | |
8771 | irq_flow_handler_t handler) | |
8772 | { | |
8773 | struct irq_desc *desc; | |
8774 | desc = irq_to_desc(irq); | |
8775 | desc->handle_irq = handler; | |
8776 | } | |
8777 | static inline __attribute__((always_inline)) void | |
8778 | __irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip, | |
8779 | irq_flow_handler_t handler, const char *name) | |
8780 | { | |
8781 | struct irq_desc *desc; | |
8782 | desc = irq_to_desc(irq); | |
8783 | irq_desc_get_irq_data(desc)->chip = chip; | |
8784 | desc->handle_irq = handler; | |
8785 | desc->name = name; | |
8786 | } | |
8787 | static inline __attribute__((always_inline)) int irq_balancing_disabled(unsigned int irq) | |
8788 | { | |
8789 | struct irq_desc *desc; | |
8790 | desc = irq_to_desc(irq); | |
8791 | return desc->status_use_accessors & (IRQ_PER_CPU | IRQ_NO_BALANCING); | |
8792 | } | |
8793 | static inline __attribute__((always_inline)) void | |
8794 | irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class) | |
8795 | { | |
8796 | struct irq_desc *desc = irq_to_desc(irq); | |
8797 | if (__builtin_constant_p(((desc))) ? !!((desc)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/irqdesc.h", .line = 159, }; ______r = !!((desc)); ______f.miss_hit[______r]++; ______r; })) | |
8798 | lockdep_init_map(&(&desc->lock)->dep_map, "class", class, 0); | |
8799 | } | |
8800 | struct proc_dir_entry; | |
8801 | struct pt_regs; | |
8802 | struct notifier_block; | |
8803 | void create_prof_cpu_mask(struct proc_dir_entry *de); | |
8804 | int create_proc_profile(void); | |
8805 | enum profile_type { | |
8806 | PROFILE_TASK_EXIT, | |
8807 | PROFILE_MUNMAP | |
8808 | }; | |
8809 | extern int prof_on __attribute__((__section__(".data..read_mostly"))); | |
8810 | int profile_init(void); | |
8811 | int profile_setup(char *str); | |
8812 | void profile_tick(int type); | |
8813 | void profile_hits(int type, void *ip, unsigned int nr_hits); | |
8814 | static inline __attribute__((always_inline)) void profile_hit(int type, void *ip) | |
8815 | { | |
8816 | if (__builtin_constant_p((((__builtin_constant_p(prof_on == type) ? !!(prof_on == type) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/profile.h", .line = 61, }; ______r = __builtin_expect(!!(prof_on == type), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(prof_on == type) ? !!(prof_on == type) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/profile.h", .line = 61, }; ______r = __builtin_expect(!!(prof_on == type), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/profile.h", .line = 61, }; ______r = !!(((__builtin_constant_p(prof_on == type) ? !!(prof_on == type) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/profile.h", .line = 61, }; ______r = __builtin_expect(!!(prof_on == type), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
8817 | profile_hits(type, ip, 1); | |
8818 | } | |
8819 | struct task_struct; | |
8820 | struct mm_struct; | |
8821 | void profile_task_exit(struct task_struct * task); | |
8822 | int profile_handoff_task(struct task_struct * task); | |
8823 | void profile_munmap(unsigned long addr); | |
8824 | int task_handoff_register(struct notifier_block * n); | |
8825 | int task_handoff_unregister(struct notifier_block * n); | |
8826 | int profile_event_register(enum profile_type, struct notifier_block * n); | |
8827 | int profile_event_unregister(enum profile_type, struct notifier_block * n); | |
8828 | int register_timer_hook(int (*hook)(struct pt_regs *)); | |
8829 | void unregister_timer_hook(int (*hook)(struct pt_regs *)); | |
8830 | struct pt_regs; | |
8831 | extern char _text[], _stext[], _etext[]; | |
8832 | extern char _data[], _sdata[], _edata[]; | |
8833 | extern char __bss_start[], __bss_stop[]; | |
8834 | extern char __init_begin[], __init_end[]; | |
8835 | extern char _sinittext[], _einittext[]; | |
8836 | extern char _end[]; | |
8837 | extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; | |
8838 | extern char __kprobes_text_start[], __kprobes_text_end[]; | |
8839 | extern char __entry_text_start[], __entry_text_end[]; | |
8840 | extern char __initdata_begin[], __initdata_end[]; | |
8841 | extern char __start_rodata[], __end_rodata[]; | |
8842 | extern char __ctors_start[], __ctors_end[]; | |
8843 | static inline __attribute__((always_inline)) int arch_is_kernel_text(unsigned long addr) | |
8844 | { | |
8845 | return 0; | |
8846 | } | |
8847 | static inline __attribute__((always_inline)) int arch_is_kernel_data(unsigned long addr) | |
8848 | { | |
8849 | return 0; | |
8850 | } | |
8851 | struct exception_table_entry { | |
8852 | unsigned long insn, fixup; | |
8853 | }; | |
8854 | extern int fixup_exception(struct pt_regs *regs); | |
8855 | extern int __get_user_1(void); | |
8856 | extern int __get_user_2(void); | |
8857 | extern int __get_user_4(void); | |
8858 | extern int __get_user_8(void); | |
8859 | extern int __get_user_bad(void); | |
8860 | extern void __put_user_bad(void); | |
8861 | extern void __put_user_1(void); | |
8862 | extern void __put_user_2(void); | |
8863 | extern void __put_user_4(void); | |
8864 | extern void __put_user_8(void); | |
8865 | struct __large_struct { unsigned long buf[100]; }; | |
8866 | extern struct movsl_mask { | |
8867 | int mask; | |
8868 | } __attribute__((__aligned__((1 << (6))))) movsl_mask; | |
8869 | unsigned long __attribute__((warn_unused_result)) __copy_to_user_ll | |
8870 | (void *to, const void *from, unsigned long n); | |
8871 | unsigned long __attribute__((warn_unused_result)) __copy_from_user_ll | |
8872 | (void *to, const void *from, unsigned long n); | |
8873 | unsigned long __attribute__((warn_unused_result)) __copy_from_user_ll_nozero | |
8874 | (void *to, const void *from, unsigned long n); | |
8875 | unsigned long __attribute__((warn_unused_result)) __copy_from_user_ll_nocache | |
8876 | (void *to, const void *from, unsigned long n); | |
8877 | unsigned long __attribute__((warn_unused_result)) __copy_from_user_ll_nocache_nozero | |
8878 | (void *to, const void *from, unsigned long n); | |
8879 | static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result)) | |
8880 | __copy_to_user_inatomic(void *to, const void *from, unsigned long n) | |
8881 | { | |
8882 | if (__builtin_constant_p(((__builtin_constant_p(n)))) ? !!((__builtin_constant_p(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 46, }; ______r = !!((__builtin_constant_p(n))); ______f.miss_hit[______r]++; ______r; })) { | |
8883 | unsigned long ret; | |
8884 | switch (n) { | |
8885 | case 1: | |
8886 | do { ret = 0; (void)0; switch (1) { case 1: asm volatile("1: mov""b"" %""b""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "iq"(*(u8 *)from), "m" ((*(struct __large_struct *)((u8 *)to))), "i" (1), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %""w""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "ir"(*(u8 *)from), "m" ((*(struct __large_struct *)((u8 *)to))), "i" (1), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %""k""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "ir"(*(u8 *)from), "m" ((*(struct __large_struct *)((u8 *)to))), "i" (1), "0" (ret)); break; case 8: asm volatile("1: movl %%eax,0(%2)\n" "2: movl %%edx,4(%2)\n" "3:\n" ".section .fixup,\"ax\"\n" "4: movl %3,%0\n" " jmp 3b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "4b" "\n" " .previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "2b" "," "4b" "\n" " .previous\n" : "=r" (ret) : "A" ((__typeof__(*(u8 *)to))(*(u8 *)from)), "r" ((u8 *)to), "i" (1), "0" (ret)); break; default: __put_user_bad(); } } while (0) | |
8887 | ; | |
8888 | return ret; | |
8889 | case 2: | |
8890 | do { ret = 0; (void)0; switch (2) { case 1: asm volatile("1: mov""b"" %""b""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "iq"(*(u16 *)from), "m" ((*(struct __large_struct *)((u16 *)to))), "i" (2), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %""w""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "ir"(*(u16 *)from), "m" ((*(struct __large_struct *)((u16 *)to))), "i" (2), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %""k""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "ir"(*(u16 *)from), "m" ((*(struct __large_struct *)((u16 *)to))), "i" (2), "0" (ret)); break; case 8: asm volatile("1: movl %%eax,0(%2)\n" "2: movl %%edx,4(%2)\n" "3:\n" ".section .fixup,\"ax\"\n" "4: movl %3,%0\n" " jmp 3b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "4b" "\n" " .previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "2b" "," "4b" "\n" " .previous\n" : "=r" (ret) : "A" ((__typeof__(*(u16 *)to))(*(u16 *)from)), "r" ((u16 *)to), "i" (2), "0" (ret)); break; default: __put_user_bad(); } } while (0) | |
8891 | ; | |
8892 | return ret; | |
8893 | case 4: | |
8894 | do { ret = 0; (void)0; switch (4) { case 1: asm volatile("1: mov""b"" %""b""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "iq"(*(u32 *)from), "m" ((*(struct __large_struct *)((u32 *)to))), "i" (4), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %""w""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "ir"(*(u32 *)from), "m" ((*(struct __large_struct *)((u32 *)to))), "i" (4), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %""k""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "ir"(*(u32 *)from), "m" ((*(struct __large_struct *)((u32 *)to))), "i" (4), "0" (ret)); break; case 8: asm volatile("1: movl %%eax,0(%2)\n" "2: movl %%edx,4(%2)\n" "3:\n" ".section .fixup,\"ax\"\n" "4: movl %3,%0\n" " jmp 3b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "4b" "\n" " .previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "2b" "," "4b" "\n" " .previous\n" : "=r" (ret) : "A" ((__typeof__(*(u32 *)to))(*(u32 *)from)), "r" ((u32 *)to), "i" (4), "0" (ret)); break; default: __put_user_bad(); } } while (0) | |
8895 | ; | |
8896 | return ret; | |
8897 | } | |
8898 | } | |
8899 | return __copy_to_user_ll(to, from, n); | |
8900 | } | |
8901 | static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result)) | |
8902 | __copy_to_user(void *to, const void *from, unsigned long n) | |
8903 | { | |
8904 | might_fault(); | |
8905 | return __copy_to_user_inatomic(to, from, n); | |
8906 | } | |
8907 | static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long | |
8908 | __copy_from_user_inatomic(void *to, const void *from, unsigned long n) | |
8909 | { | |
8910 | if (__builtin_constant_p(((__builtin_constant_p(n)))) ? !!((__builtin_constant_p(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 96, }; ______r = !!((__builtin_constant_p(n))); ______f.miss_hit[______r]++; ______r; })) { | |
8911 | unsigned long ret; | |
8912 | switch (n) { | |
8913 | case 1: | |
8914 | do { ret = 0; (void)0; switch (1) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 8: (*(u8 *)to) = __get_user_bad(); break; default: (*(u8 *)to) = __get_user_bad(); } } while (0); | |
8915 | return ret; | |
8916 | case 2: | |
8917 | do { ret = 0; (void)0; switch (2) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 8: (*(u16 *)to) = __get_user_bad(); break; default: (*(u16 *)to) = __get_user_bad(); } } while (0); | |
8918 | return ret; | |
8919 | case 4: | |
8920 | do { ret = 0; (void)0; switch (4) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 8: (*(u32 *)to) = __get_user_bad(); break; default: (*(u32 *)to) = __get_user_bad(); } } while (0); | |
8921 | return ret; | |
8922 | } | |
8923 | } | |
8924 | return __copy_from_user_ll_nozero(to, from, n); | |
8925 | } | |
8926 | static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long | |
8927 | __copy_from_user(void *to, const void *from, unsigned long n) | |
8928 | { | |
8929 | might_fault(); | |
8930 | if (__builtin_constant_p(((__builtin_constant_p(n)))) ? !!((__builtin_constant_p(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 140, }; ______r = !!((__builtin_constant_p(n))); ______f.miss_hit[______r]++; ______r; })) { | |
8931 | unsigned long ret; | |
8932 | switch (n) { | |
8933 | case 1: | |
8934 | do { ret = 0; (void)0; switch (1) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 8: (*(u8 *)to) = __get_user_bad(); break; default: (*(u8 *)to) = __get_user_bad(); } } while (0); | |
8935 | return ret; | |
8936 | case 2: | |
8937 | do { ret = 0; (void)0; switch (2) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 8: (*(u16 *)to) = __get_user_bad(); break; default: (*(u16 *)to) = __get_user_bad(); } } while (0); | |
8938 | return ret; | |
8939 | case 4: | |
8940 | do { ret = 0; (void)0; switch (4) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 8: (*(u32 *)to) = __get_user_bad(); break; default: (*(u32 *)to) = __get_user_bad(); } } while (0); | |
8941 | return ret; | |
8942 | } | |
8943 | } | |
8944 | return __copy_from_user_ll(to, from, n); | |
8945 | } | |
8946 | static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long __copy_from_user_nocache(void *to, | |
8947 | const void *from, unsigned long n) | |
8948 | { | |
8949 | might_fault(); | |
8950 | if (__builtin_constant_p(((__builtin_constant_p(n)))) ? !!((__builtin_constant_p(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 162, }; ______r = !!((__builtin_constant_p(n))); ______f.miss_hit[______r]++; ______r; })) { | |
8951 | unsigned long ret; | |
8952 | switch (n) { | |
8953 | case 1: | |
8954 | do { ret = 0; (void)0; switch (1) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 8: (*(u8 *)to) = __get_user_bad(); break; default: (*(u8 *)to) = __get_user_bad(); } } while (0); | |
8955 | return ret; | |
8956 | case 2: | |
8957 | do { ret = 0; (void)0; switch (2) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 8: (*(u16 *)to) = __get_user_bad(); break; default: (*(u16 *)to) = __get_user_bad(); } } while (0); | |
8958 | return ret; | |
8959 | case 4: | |
8960 | do { ret = 0; (void)0; switch (4) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 8: (*(u32 *)to) = __get_user_bad(); break; default: (*(u32 *)to) = __get_user_bad(); } } while (0); | |
8961 | return ret; | |
8962 | } | |
8963 | } | |
8964 | return __copy_from_user_ll_nocache(to, from, n); | |
8965 | } | |
8966 | static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long | |
8967 | __copy_from_user_inatomic_nocache(void *to, const void *from, | |
8968 | unsigned long n) | |
8969 | { | |
8970 | return __copy_from_user_ll_nocache_nozero(to, from, n); | |
8971 | } | |
8972 | unsigned long __attribute__((warn_unused_result)) copy_to_user(void *to, | |
8973 | const void *from, unsigned long n); | |
8974 | unsigned long __attribute__((warn_unused_result)) _copy_from_user(void *to, | |
8975 | const void *from, | |
8976 | unsigned long n); | |
8977 | extern void copy_from_user_overflow(void) | |
8978 | __attribute__((warning("copy_from_user() buffer size is not provably correct"))) | |
8979 | ; | |
8980 | static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result)) copy_from_user(void *to, | |
8981 | const void *from, | |
8982 | unsigned long n) | |
8983 | { | |
8984 | int sz = __builtin_object_size(to, 0); | |
8985 | if (__builtin_constant_p((((__builtin_constant_p(sz == -1 || sz >= n) ? !!(sz == -1 || sz >= n) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 208, }; ______r = __builtin_expect(!!(sz == -1 || sz >= n), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(sz == -1 || sz >= n) ? !!(sz == -1 || sz >= n) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 208, }; ______r = __builtin_expect(!!(sz == -1 || sz >= n), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 208, }; ______r = !!(((__builtin_constant_p(sz == -1 || sz >= n) ? !!(sz == -1 || sz >= n) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 208, }; ______r = __builtin_expect(!!(sz == -1 || sz >= n), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
8986 | n = _copy_from_user(to, from, n); | |
8987 | else | |
8988 | copy_from_user_overflow(); | |
8989 | return n; | |
8990 | } | |
8991 | long __attribute__((warn_unused_result)) strncpy_from_user(char *dst, const char *src, | |
8992 | long count); | |
8993 | long __attribute__((warn_unused_result)) __strncpy_from_user(char *dst, | |
8994 | const char *src, long count); | |
8995 | long strnlen_user(const char *str, long n); | |
8996 | unsigned long __attribute__((warn_unused_result)) clear_user(void *mem, unsigned long len); | |
8997 | unsigned long __attribute__((warn_unused_result)) __clear_user(void *mem, unsigned long len); | |
8998 | extern char __brk_base[], __brk_limit[]; | |
8999 | extern struct exception_table_entry __stop___ex_table[]; | |
9000 | extern void apic_timer_interrupt(void); | |
9001 | extern void x86_platform_ipi(void); | |
9002 | extern void error_interrupt(void); | |
9003 | extern void irq_work_interrupt(void); | |
9004 | extern void spurious_interrupt(void); | |
9005 | extern void thermal_interrupt(void); | |
9006 | extern void reschedule_interrupt(void); | |
9007 | extern void mce_self_interrupt(void); | |
9008 | extern void invalidate_interrupt(void); | |
9009 | extern void invalidate_interrupt0(void); | |
9010 | extern void invalidate_interrupt1(void); | |
9011 | extern void invalidate_interrupt2(void); | |
9012 | extern void invalidate_interrupt3(void); | |
9013 | extern void invalidate_interrupt4(void); | |
9014 | extern void invalidate_interrupt5(void); | |
9015 | extern void invalidate_interrupt6(void); | |
9016 | extern void invalidate_interrupt7(void); | |
9017 | extern void invalidate_interrupt8(void); | |
9018 | extern void invalidate_interrupt9(void); | |
9019 | extern void invalidate_interrupt10(void); | |
9020 | extern void invalidate_interrupt11(void); | |
9021 | extern void invalidate_interrupt12(void); | |
9022 | extern void invalidate_interrupt13(void); | |
9023 | extern void invalidate_interrupt14(void); | |
9024 | extern void invalidate_interrupt15(void); | |
9025 | extern void invalidate_interrupt16(void); | |
9026 | extern void invalidate_interrupt17(void); | |
9027 | extern void invalidate_interrupt18(void); | |
9028 | extern void invalidate_interrupt19(void); | |
9029 | extern void invalidate_interrupt20(void); | |
9030 | extern void invalidate_interrupt21(void); | |
9031 | extern void invalidate_interrupt22(void); | |
9032 | extern void invalidate_interrupt23(void); | |
9033 | extern void invalidate_interrupt24(void); | |
9034 | extern void invalidate_interrupt25(void); | |
9035 | extern void invalidate_interrupt26(void); | |
9036 | extern void invalidate_interrupt27(void); | |
9037 | extern void invalidate_interrupt28(void); | |
9038 | extern void invalidate_interrupt29(void); | |
9039 | extern void invalidate_interrupt30(void); | |
9040 | extern void invalidate_interrupt31(void); | |
9041 | extern void irq_move_cleanup_interrupt(void); | |
9042 | extern void reboot_interrupt(void); | |
9043 | extern void threshold_interrupt(void); | |
9044 | extern void call_function_interrupt(void); | |
9045 | extern void call_function_single_interrupt(void); | |
9046 | extern unsigned long io_apic_irqs; | |
9047 | extern void init_VISWS_APIC_irqs(void); | |
9048 | extern void setup_IO_APIC(void); | |
9049 | extern void disable_IO_APIC(void); | |
9050 | struct io_apic_irq_attr { | |
9051 | int ioapic; | |
9052 | int ioapic_pin; | |
9053 | int trigger; | |
9054 | int polarity; | |
9055 | }; | |
9056 | static inline __attribute__((always_inline)) void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, | |
9057 | int ioapic, int ioapic_pin, | |
9058 | int trigger, int polarity) | |
9059 | { | |
9060 | irq_attr->ioapic = ioapic; | |
9061 | irq_attr->ioapic_pin = ioapic_pin; | |
9062 | irq_attr->trigger = trigger; | |
9063 | irq_attr->polarity = polarity; | |
9064 | } | |
9065 | struct irq_2_iommu { | |
9066 | struct intel_iommu *iommu; | |
9067 | u16 irte_index; | |
9068 | u16 sub_handle; | |
9069 | u8 irte_mask; | |
9070 | }; | |
9071 | struct irq_cfg { | |
9072 | struct irq_pin_list *irq_2_pin; | |
9073 | cpumask_var_t domain; | |
9074 | cpumask_var_t old_domain; | |
9075 | u8 vector; | |
9076 | u8 move_in_progress : 1; | |
9077 | }; | |
9078 | extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); | |
9079 | extern void send_cleanup_vector(struct irq_cfg *); | |
9080 | struct irq_data; | |
9081 | int __ioapic_set_affinity(struct irq_data *, const struct cpumask *, | |
9082 | unsigned int *dest_id); | |
9083 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); | |
9084 | extern void setup_ioapic_dest(void); | |
9085 | extern void enable_IO_APIC(void); | |
9086 | extern atomic_t irq_err_count; | |
9087 | extern atomic_t irq_mis_count; | |
9088 | extern void eisa_set_level_irq(unsigned int irq); | |
9089 | extern void smp_apic_timer_interrupt(struct pt_regs *); | |
9090 | extern void smp_spurious_interrupt(struct pt_regs *); | |
9091 | extern void smp_x86_platform_ipi(struct pt_regs *); | |
9092 | extern void smp_error_interrupt(struct pt_regs *); | |
9093 | extern __attribute__((regparm(0))) void smp_irq_move_cleanup_interrupt(void); | |
9094 | extern void smp_reschedule_interrupt(struct pt_regs *); | |
9095 | extern void smp_call_function_interrupt(struct pt_regs *); | |
9096 | extern void smp_call_function_single_interrupt(struct pt_regs *); | |
9097 | extern void smp_invalidate_interrupt(struct pt_regs *); | |
9098 | extern void (*__attribute__ ((__section__(".init.rodata"))) interrupt[256 -0x20])(void); | |
9099 | typedef int vector_irq_t[256]; | |
9100 | extern __attribute__((section(".data..percpu" ""))) __typeof__(vector_irq_t) vector_irq; | |
9101 | extern void setup_vector_irq(int cpu); | |
9102 | extern void lock_vector_lock(void); | |
9103 | extern void unlock_vector_lock(void); | |
9104 | extern void __setup_vector_irq(int cpu); | |
9105 | struct irqaction; | |
9106 | extern int setup_irq(unsigned int irq, struct irqaction *new); | |
9107 | extern void remove_irq(unsigned int irq, struct irqaction *act); | |
9108 | extern void irq_cpu_online(void); | |
9109 | extern void irq_cpu_offline(void); | |
9110 | extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask); | |
9111 | void irq_move_irq(struct irq_data *data); | |
9112 | void irq_move_masked_irq(struct irq_data *data); | |
9113 | extern int no_irq_affinity; | |
9114 | extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); | |
9115 | extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); | |
9116 | extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); | |
9117 | extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); | |
9118 | extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); | |
9119 | extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); | |
9120 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); | |
9121 | extern void handle_nested_irq(unsigned int irq); | |
9122 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, | |
9123 | irqreturn_t action_ret); | |
9124 | extern int noirqdebug_setup(char *str); | |
9125 | extern int can_request_irq(unsigned int irq, unsigned long irqflags); | |
9126 | extern struct irq_chip no_irq_chip; | |
9127 | extern struct irq_chip dummy_irq_chip; | |
9128 | extern void | |
9129 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | |
9130 | irq_flow_handler_t handle, const char *name); | |
9131 | static inline __attribute__((always_inline)) void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, | |
9132 | irq_flow_handler_t handle) | |
9133 | { | |
9134 | irq_set_chip_and_handler_name(irq, chip, handle, ((void *)0)); | |
9135 | } | |
9136 | extern void | |
9137 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |
9138 | const char *name); | |
9139 | static inline __attribute__((always_inline)) void | |
9140 | irq_set_handler(unsigned int irq, irq_flow_handler_t handle) | |
9141 | { | |
9142 | __irq_set_handler(irq, handle, 0, ((void *)0)); | |
9143 | } | |
9144 | static inline __attribute__((always_inline)) void | |
9145 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) | |
9146 | { | |
9147 | __irq_set_handler(irq, handle, 1, ((void *)0)); | |
9148 | } | |
9149 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); | |
9150 | static inline __attribute__((always_inline)) void irq_set_status_flags(unsigned int irq, unsigned long set) | |
9151 | { | |
9152 | irq_modify_status(irq, 0, set); | |
9153 | } | |
9154 | static inline __attribute__((always_inline)) void irq_clear_status_flags(unsigned int irq, unsigned long clr) | |
9155 | { | |
9156 | irq_modify_status(irq, clr, 0); | |
9157 | } | |
9158 | static inline __attribute__((always_inline)) void irq_set_noprobe(unsigned int irq) | |
9159 | { | |
9160 | irq_modify_status(irq, 0, IRQ_NOPROBE); | |
9161 | } | |
9162 | static inline __attribute__((always_inline)) void irq_set_probe(unsigned int irq) | |
9163 | { | |
9164 | irq_modify_status(irq, IRQ_NOPROBE, 0); | |
9165 | } | |
9166 | static inline __attribute__((always_inline)) void irq_set_nothread(unsigned int irq) | |
9167 | { | |
9168 | irq_modify_status(irq, 0, IRQ_NOTHREAD); | |
9169 | } | |
9170 | static inline __attribute__((always_inline)) void irq_set_thread(unsigned int irq) | |
9171 | { | |
9172 | irq_modify_status(irq, IRQ_NOTHREAD, 0); | |
9173 | } | |
9174 | static inline __attribute__((always_inline)) void irq_set_nested_thread(unsigned int irq, bool nest) | |
9175 | { | |
9176 | if (__builtin_constant_p(((nest))) ? !!((nest)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/irq.h", .line = 476, }; ______r = !!((nest)); ______f.miss_hit[______r]++; ______r; })) | |
9177 | irq_set_status_flags(irq, IRQ_NESTED_THREAD); | |
9178 | else | |
9179 | irq_clear_status_flags(irq, IRQ_NESTED_THREAD); | |
9180 | } | |
9181 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); | |
9182 | extern int create_irq(void); | |
9183 | extern void destroy_irq(unsigned int irq); | |
9184 | extern void dynamic_irq_cleanup(unsigned int irq); | |
9185 | static inline __attribute__((always_inline)) void dynamic_irq_init(unsigned int irq) | |
9186 | { | |
9187 | dynamic_irq_cleanup(irq); | |
9188 | } | |
9189 | extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); | |
9190 | extern int irq_set_handler_data(unsigned int irq, void *data); | |
9191 | extern int irq_set_chip_data(unsigned int irq, void *data); | |
9192 | extern int irq_set_irq_type(unsigned int irq, unsigned int type); | |
9193 | extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); | |
9194 | extern struct irq_data *irq_get_irq_data(unsigned int irq); | |
9195 | static inline __attribute__((always_inline)) struct irq_chip *irq_get_chip(unsigned int irq) | |
9196 | { | |
9197 | struct irq_data *d = irq_get_irq_data(irq); | |
9198 | return d ? d->chip : ((void *)0); | |
9199 | } | |
9200 | static inline __attribute__((always_inline)) struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) | |
9201 | { | |
9202 | return d->chip; | |
9203 | } | |
9204 | static inline __attribute__((always_inline)) void *irq_get_chip_data(unsigned int irq) | |
9205 | { | |
9206 | struct irq_data *d = irq_get_irq_data(irq); | |
9207 | return d ? d->chip_data : ((void *)0); | |
9208 | } | |
9209 | static inline __attribute__((always_inline)) void *irq_data_get_irq_chip_data(struct irq_data *d) | |
9210 | { | |
9211 | return d->chip_data; | |
9212 | } | |
9213 | static inline __attribute__((always_inline)) void *irq_get_handler_data(unsigned int irq) | |
9214 | { | |
9215 | struct irq_data *d = irq_get_irq_data(irq); | |
9216 | return d ? d->handler_data : ((void *)0); | |
9217 | } | |
9218 | static inline __attribute__((always_inline)) void *irq_data_get_irq_handler_data(struct irq_data *d) | |
9219 | { | |
9220 | return d->handler_data; | |
9221 | } | |
9222 | static inline __attribute__((always_inline)) struct msi_desc *irq_get_msi_desc(unsigned int irq) | |
9223 | { | |
9224 | struct irq_data *d = irq_get_irq_data(irq); | |
9225 | return d ? d->msi_desc : ((void *)0); | |
9226 | } | |
9227 | static inline __attribute__((always_inline)) struct msi_desc *irq_data_get_msi(struct irq_data *d) | |
9228 | { | |
9229 | return d->msi_desc; | |
9230 | } | |
9231 | int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); | |
9232 | void irq_free_descs(unsigned int irq, unsigned int cnt); | |
9233 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); | |
9234 | static inline __attribute__((always_inline)) int irq_alloc_desc(int node) | |
9235 | { | |
9236 | return irq_alloc_descs(-1, 0, 1, node); | |
9237 | } | |
9238 | static inline __attribute__((always_inline)) int irq_alloc_desc_at(unsigned int at, int node) | |
9239 | { | |
9240 | return irq_alloc_descs(at, at, 1, node); | |
9241 | } | |
9242 | static inline __attribute__((always_inline)) int irq_alloc_desc_from(unsigned int from, int node) | |
9243 | { | |
9244 | return irq_alloc_descs(-1, from, 1, node); | |
9245 | } | |
9246 | static inline __attribute__((always_inline)) void irq_free_desc(unsigned int irq) | |
9247 | { | |
9248 | irq_free_descs(irq, 1); | |
9249 | } | |
9250 | static inline __attribute__((always_inline)) int irq_reserve_irq(unsigned int irq) | |
9251 | { | |
9252 | return irq_reserve_irqs(irq, 1); | |
9253 | } | |
9254 | struct irq_chip_regs { | |
9255 | unsigned long enable; | |
9256 | unsigned long disable; | |
9257 | unsigned long mask; | |
9258 | unsigned long ack; | |
9259 | unsigned long eoi; | |
9260 | unsigned long type; | |
9261 | unsigned long polarity; | |
9262 | }; | |
9263 | struct irq_chip_type { | |
9264 | struct irq_chip chip; | |
9265 | struct irq_chip_regs regs; | |
9266 | irq_flow_handler_t handler; | |
9267 | u32 type; | |
9268 | }; | |
9269 | struct irq_chip_generic { | |
9270 | raw_spinlock_t lock; | |
9271 | void *reg_base; | |
9272 | unsigned int irq_base; | |
9273 | unsigned int irq_cnt; | |
9274 | u32 mask_cache; | |
9275 | u32 type_cache; | |
9276 | u32 polarity_cache; | |
9277 | u32 wake_enabled; | |
9278 | u32 wake_active; | |
9279 | unsigned int num_ct; | |
9280 | void *private; | |
9281 | struct list_head list; | |
9282 | struct irq_chip_type chip_types[0]; | |
9283 | }; | |
9284 | enum irq_gc_flags { | |
9285 | IRQ_GC_INIT_MASK_CACHE = 1 << 0, | |
9286 | IRQ_GC_INIT_NESTED_LOCK = 1 << 1, | |
9287 | }; | |
9288 | void irq_gc_noop(struct irq_data *d); | |
9289 | void irq_gc_mask_disable_reg(struct irq_data *d); | |
9290 | void irq_gc_mask_set_bit(struct irq_data *d); | |
9291 | void irq_gc_mask_clr_bit(struct irq_data *d); | |
9292 | void irq_gc_unmask_enable_reg(struct irq_data *d); | |
9293 | void irq_gc_ack_set_bit(struct irq_data *d); | |
9294 | void irq_gc_ack_clr_bit(struct irq_data *d); | |
9295 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); | |
9296 | void irq_gc_eoi(struct irq_data *d); | |
9297 | int irq_gc_set_wake(struct irq_data *d, unsigned int on); | |
9298 | struct irq_chip_generic * | |
9299 | irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, | |
9300 | void *reg_base, irq_flow_handler_t handler); | |
9301 | void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, | |
9302 | enum irq_gc_flags flags, unsigned int clr, | |
9303 | unsigned int set); | |
9304 | int irq_setup_alt_chip(struct irq_data *d, unsigned int type); | |
9305 | void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, | |
9306 | unsigned int clr, unsigned int set); | |
9307 | static inline __attribute__((always_inline)) struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) | |
9308 | { | |
9309 | return ({ const typeof( ((struct irq_chip_type *)0)->chip ) *__mptr = (d->chip); (struct irq_chip_type *)( (char *)__mptr - __builtin_offsetof(struct irq_chip_type,chip) );}); | |
9310 | } | |
9311 | static inline __attribute__((always_inline)) void irq_gc_lock(struct irq_chip_generic *gc) | |
9312 | { | |
9313 | _raw_spin_lock(&gc->lock); | |
9314 | } | |
9315 | static inline __attribute__((always_inline)) void irq_gc_unlock(struct irq_chip_generic *gc) | |
9316 | { | |
9317 | _raw_spin_unlock(&gc->lock); | |
9318 | } | |
9319 | typedef struct { | |
9320 | unsigned int __softirq_pending; | |
9321 | unsigned int __nmi_count; | |
9322 | unsigned int irq0_irqs; | |
9323 | unsigned int apic_timer_irqs; | |
9324 | unsigned int irq_spurious_count; | |
9325 | unsigned int x86_platform_ipis; | |
9326 | unsigned int apic_perf_irqs; | |
9327 | unsigned int apic_irq_work_irqs; | |
9328 | unsigned int irq_resched_count; | |
9329 | unsigned int irq_call_count; | |
9330 | unsigned int irq_tlb_count; | |
9331 | unsigned int irq_thermal_count; | |
9332 | unsigned int irq_threshold_count; | |
9333 | } __attribute__((__aligned__((1 << (6))))) irq_cpustat_t; | |
9334 | extern __attribute__((section(".data..percpu" ""))) __typeof__(irq_cpustat_t) irq_stat __attribute__((__aligned__((1 << (6))))); | |
9335 | extern void ack_bad_irq(unsigned int irq); | |
9336 | extern u64 arch_irq_stat_cpu(unsigned int cpu); | |
9337 | extern u64 arch_irq_stat(void); | |
9338 | extern void synchronize_irq(unsigned int irq); | |
9339 | struct task_struct; | |
9340 | extern void account_system_vtime(struct task_struct *tsk); | |
9341 | extern void irq_enter(void); | |
9342 | extern void irq_exit(void); | |
9343 | struct stat { | |
9344 | unsigned long st_dev; | |
9345 | unsigned long st_ino; | |
9346 | unsigned short st_mode; | |
9347 | unsigned short st_nlink; | |
9348 | unsigned short st_uid; | |
9349 | unsigned short st_gid; | |
9350 | unsigned long st_rdev; | |
9351 | unsigned long st_size; | |
9352 | unsigned long st_blksize; | |
9353 | unsigned long st_blocks; | |
9354 | unsigned long st_atime; | |
9355 | unsigned long st_atime_nsec; | |
9356 | unsigned long st_mtime; | |
9357 | unsigned long st_mtime_nsec; | |
9358 | unsigned long st_ctime; | |
9359 | unsigned long st_ctime_nsec; | |
9360 | unsigned long __unused4; | |
9361 | unsigned long __unused5; | |
9362 | }; | |
9363 | struct stat64 { | |
9364 | unsigned long long st_dev; | |
9365 | unsigned char __pad0[4]; | |
9366 | unsigned long __st_ino; | |
9367 | unsigned int st_mode; | |
9368 | unsigned int st_nlink; | |
9369 | unsigned long st_uid; | |
9370 | unsigned long st_gid; | |
9371 | unsigned long long st_rdev; | |
9372 | unsigned char __pad3[4]; | |
9373 | long long st_size; | |
9374 | unsigned long st_blksize; | |
9375 | unsigned long long st_blocks; | |
9376 | unsigned long st_atime; | |
9377 | unsigned long st_atime_nsec; | |
9378 | unsigned long st_mtime; | |
9379 | unsigned int st_mtime_nsec; | |
9380 | unsigned long st_ctime; | |
9381 | unsigned long st_ctime_nsec; | |
9382 | unsigned long long st_ino; | |
9383 | }; | |
9384 | struct __old_kernel_stat { | |
9385 | unsigned short st_dev; | |
9386 | unsigned short st_ino; | |
9387 | unsigned short st_mode; | |
9388 | unsigned short st_nlink; | |
9389 | unsigned short st_uid; | |
9390 | unsigned short st_gid; | |
9391 | unsigned short st_rdev; | |
9392 | unsigned long st_size; | |
9393 | unsigned long st_atime; | |
9394 | unsigned long st_mtime; | |
9395 | unsigned long st_ctime; | |
9396 | }; | |
9397 | struct kstat { | |
9398 | u64 ino; | |
9399 | dev_t dev; | |
9400 | umode_t mode; | |
9401 | unsigned int nlink; | |
9402 | uid_t uid; | |
9403 | gid_t gid; | |
9404 | dev_t rdev; | |
9405 | loff_t size; | |
9406 | struct timespec atime; | |
9407 | struct timespec mtime; | |
9408 | struct timespec ctime; | |
9409 | unsigned long blksize; | |
9410 | unsigned long long blocks; | |
9411 | }; | |
9412 | struct completion; | |
9413 | struct __sysctl_args { | |
9414 | int *name; | |
9415 | int nlen; | |
9416 | void *oldval; | |
9417 | size_t *oldlenp; | |
9418 | void *newval; | |
9419 | size_t newlen; | |
9420 | unsigned long __unused[4]; | |
9421 | }; | |
9422 | enum | |
9423 | { | |
9424 | CTL_KERN=1, | |
9425 | CTL_VM=2, | |
9426 | CTL_NET=3, | |
9427 | CTL_PROC=4, | |
9428 | CTL_FS=5, | |
9429 | CTL_DEBUG=6, | |
9430 | CTL_DEV=7, | |
9431 | CTL_BUS=8, | |
9432 | CTL_ABI=9, | |
9433 | CTL_CPU=10, | |
9434 | CTL_ARLAN=254, | |
9435 | CTL_S390DBF=5677, | |
9436 | CTL_SUNRPC=7249, | |
9437 | CTL_PM=9899, | |
9438 | CTL_FRV=9898, | |
9439 | }; | |
9440 | enum | |
9441 | { | |
9442 | CTL_BUS_ISA=1 | |
9443 | }; | |
9444 | enum | |
9445 | { | |
9446 | INOTIFY_MAX_USER_INSTANCES=1, | |
9447 | INOTIFY_MAX_USER_WATCHES=2, | |
9448 | INOTIFY_MAX_QUEUED_EVENTS=3 | |
9449 | }; | |
9450 | enum | |
9451 | { | |
9452 | KERN_OSTYPE=1, | |
9453 | KERN_OSRELEASE=2, | |
9454 | KERN_OSREV=3, | |
9455 | KERN_VERSION=4, | |
9456 | KERN_SECUREMASK=5, | |
9457 | KERN_PROF=6, | |
9458 | KERN_NODENAME=7, | |
9459 | KERN_DOMAINNAME=8, | |
9460 | KERN_PANIC=15, | |
9461 | KERN_REALROOTDEV=16, | |
9462 | KERN_SPARC_REBOOT=21, | |
9463 | KERN_CTLALTDEL=22, | |
9464 | KERN_PRINTK=23, | |
9465 | KERN_NAMETRANS=24, | |
9466 | KERN_PPC_HTABRECLAIM=25, | |
9467 | KERN_PPC_ZEROPAGED=26, | |
9468 | KERN_PPC_POWERSAVE_NAP=27, | |
9469 | KERN_MODPROBE=28, | |
9470 | KERN_SG_BIG_BUFF=29, | |
9471 | KERN_ACCT=30, | |
9472 | KERN_PPC_L2CR=31, | |
9473 | KERN_RTSIGNR=32, | |
9474 | KERN_RTSIGMAX=33, | |
9475 | KERN_SHMMAX=34, | |
9476 | KERN_MSGMAX=35, | |
9477 | KERN_MSGMNB=36, | |
9478 | KERN_MSGPOOL=37, | |
9479 | KERN_SYSRQ=38, | |
9480 | KERN_MAX_THREADS=39, | |
9481 | KERN_RANDOM=40, | |
9482 | KERN_SHMALL=41, | |
9483 | KERN_MSGMNI=42, | |
9484 | KERN_SEM=43, | |
9485 | KERN_SPARC_STOP_A=44, | |
9486 | KERN_SHMMNI=45, | |
9487 | KERN_OVERFLOWUID=46, | |
9488 | KERN_OVERFLOWGID=47, | |
9489 | KERN_SHMPATH=48, | |
9490 | KERN_HOTPLUG=49, | |
9491 | KERN_IEEE_EMULATION_WARNINGS=50, | |
9492 | KERN_S390_USER_DEBUG_LOGGING=51, | |
9493 | KERN_CORE_USES_PID=52, | |
9494 | KERN_TAINTED=53, | |
9495 | KERN_CADPID=54, | |
9496 | KERN_PIDMAX=55, | |
9497 | KERN_CORE_PATTERN=56, | |
9498 | KERN_PANIC_ON_OOPS=57, | |
9499 | KERN_HPPA_PWRSW=58, | |
9500 | KERN_HPPA_UNALIGNED=59, | |
9501 | KERN_PRINTK_RATELIMIT=60, | |
9502 | KERN_PRINTK_RATELIMIT_BURST=61, | |
9503 | KERN_PTY=62, | |
9504 | KERN_NGROUPS_MAX=63, | |
9505 | KERN_SPARC_SCONS_PWROFF=64, | |
9506 | KERN_HZ_TIMER=65, | |
9507 | KERN_UNKNOWN_NMI_PANIC=66, | |
9508 | KERN_BOOTLOADER_TYPE=67, | |
9509 | KERN_RANDOMIZE=68, | |
9510 | KERN_SETUID_DUMPABLE=69, | |
9511 | KERN_SPIN_RETRY=70, | |
9512 | KERN_ACPI_VIDEO_FLAGS=71, | |
9513 | KERN_IA64_UNALIGNED=72, | |
9514 | KERN_COMPAT_LOG=73, | |
9515 | KERN_MAX_LOCK_DEPTH=74, | |
9516 | KERN_NMI_WATCHDOG=75, | |
9517 | KERN_PANIC_ON_NMI=76, | |
9518 | }; | |
9519 | enum | |
9520 | { | |
9521 | VM_UNUSED1=1, | |
9522 | VM_UNUSED2=2, | |
9523 | VM_UNUSED3=3, | |
9524 | VM_UNUSED4=4, | |
9525 | VM_OVERCOMMIT_MEMORY=5, | |
9526 | VM_UNUSED5=6, | |
9527 | VM_UNUSED7=7, | |
9528 | VM_UNUSED8=8, | |
9529 | VM_UNUSED9=9, | |
9530 | VM_PAGE_CLUSTER=10, | |
9531 | VM_DIRTY_BACKGROUND=11, | |
9532 | VM_DIRTY_RATIO=12, | |
9533 | VM_DIRTY_WB_CS=13, | |
9534 | VM_DIRTY_EXPIRE_CS=14, | |
9535 | VM_NR_PDFLUSH_THREADS=15, | |
9536 | VM_OVERCOMMIT_RATIO=16, | |
9537 | VM_PAGEBUF=17, | |
9538 | VM_HUGETLB_PAGES=18, | |
9539 | VM_SWAPPINESS=19, | |
9540 | VM_LOWMEM_RESERVE_RATIO=20, | |
9541 | VM_MIN_FREE_KBYTES=21, | |
9542 | VM_MAX_MAP_COUNT=22, | |
9543 | VM_LAPTOP_MODE=23, | |
9544 | VM_BLOCK_DUMP=24, | |
9545 | VM_HUGETLB_GROUP=25, | |
9546 | VM_VFS_CACHE_PRESSURE=26, | |
9547 | VM_LEGACY_VA_LAYOUT=27, | |
9548 | VM_SWAP_TOKEN_TIMEOUT=28, | |
9549 | VM_DROP_PAGECACHE=29, | |
9550 | VM_PERCPU_PAGELIST_FRACTION=30, | |
9551 | VM_ZONE_RECLAIM_MODE=31, | |
9552 | VM_MIN_UNMAPPED=32, | |
9553 | VM_PANIC_ON_OOM=33, | |
9554 | VM_VDSO_ENABLED=34, | |
9555 | VM_MIN_SLAB=35, | |
9556 | }; | |
9557 | enum | |
9558 | { | |
9559 | NET_CORE=1, | |
9560 | NET_ETHER=2, | |
9561 | NET_802=3, | |
9562 | NET_UNIX=4, | |
9563 | NET_IPV4=5, | |
9564 | NET_IPX=6, | |
9565 | NET_ATALK=7, | |
9566 | NET_NETROM=8, | |
9567 | NET_AX25=9, | |
9568 | NET_BRIDGE=10, | |
9569 | NET_ROSE=11, | |
9570 | NET_IPV6=12, | |
9571 | NET_X25=13, | |
9572 | NET_TR=14, | |
9573 | NET_DECNET=15, | |
9574 | NET_ECONET=16, | |
9575 | NET_SCTP=17, | |
9576 | NET_LLC=18, | |
9577 | NET_NETFILTER=19, | |
9578 | NET_DCCP=20, | |
9579 | NET_IRDA=412, | |
9580 | }; | |
9581 | enum | |
9582 | { | |
9583 | RANDOM_POOLSIZE=1, | |
9584 | RANDOM_ENTROPY_COUNT=2, | |
9585 | RANDOM_READ_THRESH=3, | |
9586 | RANDOM_WRITE_THRESH=4, | |
9587 | RANDOM_BOOT_ID=5, | |
9588 | RANDOM_UUID=6 | |
9589 | }; | |
9590 | enum | |
9591 | { | |
9592 | PTY_MAX=1, | |
9593 | PTY_NR=2 | |
9594 | }; | |
9595 | enum | |
9596 | { | |
9597 | BUS_ISA_MEM_BASE=1, | |
9598 | BUS_ISA_PORT_BASE=2, | |
9599 | BUS_ISA_PORT_SHIFT=3 | |
9600 | }; | |
9601 | enum | |
9602 | { | |
9603 | NET_CORE_WMEM_MAX=1, | |
9604 | NET_CORE_RMEM_MAX=2, | |
9605 | NET_CORE_WMEM_DEFAULT=3, | |
9606 | NET_CORE_RMEM_DEFAULT=4, | |
9607 | NET_CORE_MAX_BACKLOG=6, | |
9608 | NET_CORE_FASTROUTE=7, | |
9609 | NET_CORE_MSG_COST=8, | |
9610 | NET_CORE_MSG_BURST=9, | |
9611 | NET_CORE_OPTMEM_MAX=10, | |
9612 | NET_CORE_HOT_LIST_LENGTH=11, | |
9613 | NET_CORE_DIVERT_VERSION=12, | |
9614 | NET_CORE_NO_CONG_THRESH=13, | |
9615 | NET_CORE_NO_CONG=14, | |
9616 | NET_CORE_LO_CONG=15, | |
9617 | NET_CORE_MOD_CONG=16, | |
9618 | NET_CORE_DEV_WEIGHT=17, | |
9619 | NET_CORE_SOMAXCONN=18, | |
9620 | NET_CORE_BUDGET=19, | |
9621 | NET_CORE_AEVENT_ETIME=20, | |
9622 | NET_CORE_AEVENT_RSEQTH=21, | |
9623 | NET_CORE_WARNINGS=22, | |
9624 | }; | |
9625 | enum | |
9626 | { | |
9627 | NET_UNIX_DESTROY_DELAY=1, | |
9628 | NET_UNIX_DELETE_DELAY=2, | |
9629 | NET_UNIX_MAX_DGRAM_QLEN=3, | |
9630 | }; | |
9631 | enum | |
9632 | { | |
9633 | NET_NF_CONNTRACK_MAX=1, | |
9634 | NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2, | |
9635 | NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3, | |
9636 | NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4, | |
9637 | NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5, | |
9638 | NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6, | |
9639 | NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7, | |
9640 | NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8, | |
9641 | NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9, | |
9642 | NET_NF_CONNTRACK_UDP_TIMEOUT=10, | |
9643 | NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11, | |
9644 | NET_NF_CONNTRACK_ICMP_TIMEOUT=12, | |
9645 | NET_NF_CONNTRACK_GENERIC_TIMEOUT=13, | |
9646 | NET_NF_CONNTRACK_BUCKETS=14, | |
9647 | NET_NF_CONNTRACK_LOG_INVALID=15, | |
9648 | NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16, | |
9649 | NET_NF_CONNTRACK_TCP_LOOSE=17, | |
9650 | NET_NF_CONNTRACK_TCP_BE_LIBERAL=18, | |
9651 | NET_NF_CONNTRACK_TCP_MAX_RETRANS=19, | |
9652 | NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20, | |
9653 | NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21, | |
9654 | NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22, | |
9655 | NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23, | |
9656 | NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24, | |
9657 | NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25, | |
9658 | NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26, | |
9659 | NET_NF_CONNTRACK_COUNT=27, | |
9660 | NET_NF_CONNTRACK_ICMPV6_TIMEOUT=28, | |
9661 | NET_NF_CONNTRACK_FRAG6_TIMEOUT=29, | |
9662 | NET_NF_CONNTRACK_FRAG6_LOW_THRESH=30, | |
9663 | NET_NF_CONNTRACK_FRAG6_HIGH_THRESH=31, | |
9664 | NET_NF_CONNTRACK_CHECKSUM=32, | |
9665 | }; | |
9666 | enum | |
9667 | { | |
9668 | NET_IPV4_FORWARD=8, | |
9669 | NET_IPV4_DYNADDR=9, | |
9670 | NET_IPV4_CONF=16, | |
9671 | NET_IPV4_NEIGH=17, | |
9672 | NET_IPV4_ROUTE=18, | |
9673 | NET_IPV4_FIB_HASH=19, | |
9674 | NET_IPV4_NETFILTER=20, | |
9675 | NET_IPV4_TCP_TIMESTAMPS=33, | |
9676 | NET_IPV4_TCP_WINDOW_SCALING=34, | |
9677 | NET_IPV4_TCP_SACK=35, | |
9678 | NET_IPV4_TCP_RETRANS_COLLAPSE=36, | |
9679 | NET_IPV4_DEFAULT_TTL=37, | |
9680 | NET_IPV4_AUTOCONFIG=38, | |
9681 | NET_IPV4_NO_PMTU_DISC=39, | |
9682 | NET_IPV4_TCP_SYN_RETRIES=40, | |
9683 | NET_IPV4_IPFRAG_HIGH_THRESH=41, | |
9684 | NET_IPV4_IPFRAG_LOW_THRESH=42, | |
9685 | NET_IPV4_IPFRAG_TIME=43, | |
9686 | NET_IPV4_TCP_MAX_KA_PROBES=44, | |
9687 | NET_IPV4_TCP_KEEPALIVE_TIME=45, | |
9688 | NET_IPV4_TCP_KEEPALIVE_PROBES=46, | |
9689 | NET_IPV4_TCP_RETRIES1=47, | |
9690 | NET_IPV4_TCP_RETRIES2=48, | |
9691 | NET_IPV4_TCP_FIN_TIMEOUT=49, | |
9692 | NET_IPV4_IP_MASQ_DEBUG=50, | |
9693 | NET_TCP_SYNCOOKIES=51, | |
9694 | NET_TCP_STDURG=52, | |
9695 | NET_TCP_RFC1337=53, | |
9696 | NET_TCP_SYN_TAILDROP=54, | |
9697 | NET_TCP_MAX_SYN_BACKLOG=55, | |
9698 | NET_IPV4_LOCAL_PORT_RANGE=56, | |
9699 | NET_IPV4_ICMP_ECHO_IGNORE_ALL=57, | |
9700 | NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS=58, | |
9701 | NET_IPV4_ICMP_SOURCEQUENCH_RATE=59, | |
9702 | NET_IPV4_ICMP_DESTUNREACH_RATE=60, | |
9703 | NET_IPV4_ICMP_TIMEEXCEED_RATE=61, | |
9704 | NET_IPV4_ICMP_PARAMPROB_RATE=62, | |
9705 | NET_IPV4_ICMP_ECHOREPLY_RATE=63, | |
9706 | NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES=64, | |
9707 | NET_IPV4_IGMP_MAX_MEMBERSHIPS=65, | |
9708 | NET_TCP_TW_RECYCLE=66, | |
9709 | NET_IPV4_ALWAYS_DEFRAG=67, | |
9710 | NET_IPV4_TCP_KEEPALIVE_INTVL=68, | |
9711 | NET_IPV4_INET_PEER_THRESHOLD=69, | |
9712 | NET_IPV4_INET_PEER_MINTTL=70, | |
9713 | NET_IPV4_INET_PEER_MAXTTL=71, | |
9714 | NET_IPV4_INET_PEER_GC_MINTIME=72, | |
9715 | NET_IPV4_INET_PEER_GC_MAXTIME=73, | |
9716 | NET_TCP_ORPHAN_RETRIES=74, | |
9717 | NET_TCP_ABORT_ON_OVERFLOW=75, | |
9718 | NET_TCP_SYNACK_RETRIES=76, | |
9719 | NET_TCP_MAX_ORPHANS=77, | |
9720 | NET_TCP_MAX_TW_BUCKETS=78, | |
9721 | NET_TCP_FACK=79, | |
9722 | NET_TCP_REORDERING=80, | |
9723 | NET_TCP_ECN=81, | |
9724 | NET_TCP_DSACK=82, | |
9725 | NET_TCP_MEM=83, | |
9726 | NET_TCP_WMEM=84, | |
9727 | NET_TCP_RMEM=85, | |
9728 | NET_TCP_APP_WIN=86, | |
9729 | NET_TCP_ADV_WIN_SCALE=87, | |
9730 | NET_IPV4_NONLOCAL_BIND=88, | |
9731 | NET_IPV4_ICMP_RATELIMIT=89, | |
9732 | NET_IPV4_ICMP_RATEMASK=90, | |
9733 | NET_TCP_TW_REUSE=91, | |
9734 | NET_TCP_FRTO=92, | |
9735 | NET_TCP_LOW_LATENCY=93, | |
9736 | NET_IPV4_IPFRAG_SECRET_INTERVAL=94, | |
9737 | NET_IPV4_IGMP_MAX_MSF=96, | |
9738 | NET_TCP_NO_METRICS_SAVE=97, | |
9739 | NET_TCP_DEFAULT_WIN_SCALE=105, | |
9740 | NET_TCP_MODERATE_RCVBUF=106, | |
9741 | NET_TCP_TSO_WIN_DIVISOR=107, | |
9742 | NET_TCP_BIC_BETA=108, | |
9743 | NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR=109, | |
9744 | NET_TCP_CONG_CONTROL=110, | |
9745 | NET_TCP_ABC=111, | |
9746 | NET_IPV4_IPFRAG_MAX_DIST=112, | |
9747 | NET_TCP_MTU_PROBING=113, | |
9748 | NET_TCP_BASE_MSS=114, | |
9749 | NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS=115, | |
9750 | NET_TCP_DMA_COPYBREAK=116, | |
9751 | NET_TCP_SLOW_START_AFTER_IDLE=117, | |
9752 | NET_CIPSOV4_CACHE_ENABLE=118, | |
9753 | NET_CIPSOV4_CACHE_BUCKET_SIZE=119, | |
9754 | NET_CIPSOV4_RBM_OPTFMT=120, | |
9755 | NET_CIPSOV4_RBM_STRICTVALID=121, | |
9756 | NET_TCP_AVAIL_CONG_CONTROL=122, | |
9757 | NET_TCP_ALLOWED_CONG_CONTROL=123, | |
9758 | NET_TCP_MAX_SSTHRESH=124, | |
9759 | NET_TCP_FRTO_RESPONSE=125, | |
9760 | }; | |
9761 | enum { | |
9762 | NET_IPV4_ROUTE_FLUSH=1, | |
9763 | NET_IPV4_ROUTE_MIN_DELAY=2, | |
9764 | NET_IPV4_ROUTE_MAX_DELAY=3, | |
9765 | NET_IPV4_ROUTE_GC_THRESH=4, | |
9766 | NET_IPV4_ROUTE_MAX_SIZE=5, | |
9767 | NET_IPV4_ROUTE_GC_MIN_INTERVAL=6, | |
9768 | NET_IPV4_ROUTE_GC_TIMEOUT=7, | |
9769 | NET_IPV4_ROUTE_GC_INTERVAL=8, | |
9770 | NET_IPV4_ROUTE_REDIRECT_LOAD=9, | |
9771 | NET_IPV4_ROUTE_REDIRECT_NUMBER=10, | |
9772 | NET_IPV4_ROUTE_REDIRECT_SILENCE=11, | |
9773 | NET_IPV4_ROUTE_ERROR_COST=12, | |
9774 | NET_IPV4_ROUTE_ERROR_BURST=13, | |
9775 | NET_IPV4_ROUTE_GC_ELASTICITY=14, | |
9776 | NET_IPV4_ROUTE_MTU_EXPIRES=15, | |
9777 | NET_IPV4_ROUTE_MIN_PMTU=16, | |
9778 | NET_IPV4_ROUTE_MIN_ADVMSS=17, | |
9779 | NET_IPV4_ROUTE_SECRET_INTERVAL=18, | |
9780 | NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS=19, | |
9781 | }; | |
9782 | enum | |
9783 | { | |
9784 | NET_PROTO_CONF_ALL=-2, | |
9785 | NET_PROTO_CONF_DEFAULT=-3 | |
9786 | }; | |
9787 | enum | |
9788 | { | |
9789 | NET_IPV4_CONF_FORWARDING=1, | |
9790 | NET_IPV4_CONF_MC_FORWARDING=2, | |
9791 | NET_IPV4_CONF_PROXY_ARP=3, | |
9792 | NET_IPV4_CONF_ACCEPT_REDIRECTS=4, | |
9793 | NET_IPV4_CONF_SECURE_REDIRECTS=5, | |
9794 | NET_IPV4_CONF_SEND_REDIRECTS=6, | |
9795 | NET_IPV4_CONF_SHARED_MEDIA=7, | |
9796 | NET_IPV4_CONF_RP_FILTER=8, | |
9797 | NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE=9, | |
9798 | NET_IPV4_CONF_BOOTP_RELAY=10, | |
9799 | NET_IPV4_CONF_LOG_MARTIANS=11, | |
9800 | NET_IPV4_CONF_TAG=12, | |
9801 | NET_IPV4_CONF_ARPFILTER=13, | |
9802 | NET_IPV4_CONF_MEDIUM_ID=14, | |
9803 | NET_IPV4_CONF_NOXFRM=15, | |
9804 | NET_IPV4_CONF_NOPOLICY=16, | |
9805 | NET_IPV4_CONF_FORCE_IGMP_VERSION=17, | |
9806 | NET_IPV4_CONF_ARP_ANNOUNCE=18, | |
9807 | NET_IPV4_CONF_ARP_IGNORE=19, | |
9808 | NET_IPV4_CONF_PROMOTE_SECONDARIES=20, | |
9809 | NET_IPV4_CONF_ARP_ACCEPT=21, | |
9810 | NET_IPV4_CONF_ARP_NOTIFY=22, | |
9811 | }; | |
9812 | enum | |
9813 | { | |
9814 | NET_IPV4_NF_CONNTRACK_MAX=1, | |
9815 | NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2, | |
9816 | NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3, | |
9817 | NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4, | |
9818 | NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5, | |
9819 | NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6, | |
9820 | NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7, | |
9821 | NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8, | |
9822 | NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9, | |
9823 | NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT=10, | |
9824 | NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11, | |
9825 | NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT=12, | |
9826 | NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT=13, | |
9827 | NET_IPV4_NF_CONNTRACK_BUCKETS=14, | |
9828 | NET_IPV4_NF_CONNTRACK_LOG_INVALID=15, | |
9829 | NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16, | |
9830 | NET_IPV4_NF_CONNTRACK_TCP_LOOSE=17, | |
9831 | NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL=18, | |
9832 | NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS=19, | |
9833 | NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20, | |
9834 | NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21, | |
9835 | NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22, | |
9836 | NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23, | |
9837 | NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24, | |
9838 | NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25, | |
9839 | NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26, | |
9840 | NET_IPV4_NF_CONNTRACK_COUNT=27, | |
9841 | NET_IPV4_NF_CONNTRACK_CHECKSUM=28, | |
9842 | }; | |
9843 | enum { | |
9844 | NET_IPV6_CONF=16, | |
9845 | NET_IPV6_NEIGH=17, | |
9846 | NET_IPV6_ROUTE=18, | |
9847 | NET_IPV6_ICMP=19, | |
9848 | NET_IPV6_BINDV6ONLY=20, | |
9849 | NET_IPV6_IP6FRAG_HIGH_THRESH=21, | |
9850 | NET_IPV6_IP6FRAG_LOW_THRESH=22, | |
9851 | NET_IPV6_IP6FRAG_TIME=23, | |
9852 | NET_IPV6_IP6FRAG_SECRET_INTERVAL=24, | |
9853 | NET_IPV6_MLD_MAX_MSF=25, | |
9854 | }; | |
9855 | enum { | |
9856 | NET_IPV6_ROUTE_FLUSH=1, | |
9857 | NET_IPV6_ROUTE_GC_THRESH=2, | |
9858 | NET_IPV6_ROUTE_MAX_SIZE=3, | |
9859 | NET_IPV6_ROUTE_GC_MIN_INTERVAL=4, | |
9860 | NET_IPV6_ROUTE_GC_TIMEOUT=5, | |
9861 | NET_IPV6_ROUTE_GC_INTERVAL=6, | |
9862 | NET_IPV6_ROUTE_GC_ELASTICITY=7, | |
9863 | NET_IPV6_ROUTE_MTU_EXPIRES=8, | |
9864 | NET_IPV6_ROUTE_MIN_ADVMSS=9, | |
9865 | NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS=10 | |
9866 | }; | |
9867 | enum { | |
9868 | NET_IPV6_FORWARDING=1, | |
9869 | NET_IPV6_HOP_LIMIT=2, | |
9870 | NET_IPV6_MTU=3, | |
9871 | NET_IPV6_ACCEPT_RA=4, | |
9872 | NET_IPV6_ACCEPT_REDIRECTS=5, | |
9873 | NET_IPV6_AUTOCONF=6, | |
9874 | NET_IPV6_DAD_TRANSMITS=7, | |
9875 | NET_IPV6_RTR_SOLICITS=8, | |
9876 | NET_IPV6_RTR_SOLICIT_INTERVAL=9, | |
9877 | NET_IPV6_RTR_SOLICIT_DELAY=10, | |
9878 | NET_IPV6_USE_TEMPADDR=11, | |
9879 | NET_IPV6_TEMP_VALID_LFT=12, | |
9880 | NET_IPV6_TEMP_PREFERED_LFT=13, | |
9881 | NET_IPV6_REGEN_MAX_RETRY=14, | |
9882 | NET_IPV6_MAX_DESYNC_FACTOR=15, | |
9883 | NET_IPV6_MAX_ADDRESSES=16, | |
9884 | NET_IPV6_FORCE_MLD_VERSION=17, | |
9885 | NET_IPV6_ACCEPT_RA_DEFRTR=18, | |
9886 | NET_IPV6_ACCEPT_RA_PINFO=19, | |
9887 | NET_IPV6_ACCEPT_RA_RTR_PREF=20, | |
9888 | NET_IPV6_RTR_PROBE_INTERVAL=21, | |
9889 | NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN=22, | |
9890 | NET_IPV6_PROXY_NDP=23, | |
9891 | NET_IPV6_ACCEPT_SOURCE_ROUTE=25, | |
9892 | __NET_IPV6_MAX | |
9893 | }; | |
9894 | enum { | |
9895 | NET_IPV6_ICMP_RATELIMIT=1 | |
9896 | }; | |
9897 | enum { | |
9898 | NET_NEIGH_MCAST_SOLICIT=1, | |
9899 | NET_NEIGH_UCAST_SOLICIT=2, | |
9900 | NET_NEIGH_APP_SOLICIT=3, | |
9901 | NET_NEIGH_RETRANS_TIME=4, | |
9902 | NET_NEIGH_REACHABLE_TIME=5, | |
9903 | NET_NEIGH_DELAY_PROBE_TIME=6, | |
9904 | NET_NEIGH_GC_STALE_TIME=7, | |
9905 | NET_NEIGH_UNRES_QLEN=8, | |
9906 | NET_NEIGH_PROXY_QLEN=9, | |
9907 | NET_NEIGH_ANYCAST_DELAY=10, | |
9908 | NET_NEIGH_PROXY_DELAY=11, | |
9909 | NET_NEIGH_LOCKTIME=12, | |
9910 | NET_NEIGH_GC_INTERVAL=13, | |
9911 | NET_NEIGH_GC_THRESH1=14, | |
9912 | NET_NEIGH_GC_THRESH2=15, | |
9913 | NET_NEIGH_GC_THRESH3=16, | |
9914 | NET_NEIGH_RETRANS_TIME_MS=17, | |
9915 | NET_NEIGH_REACHABLE_TIME_MS=18, | |
9916 | }; | |
9917 | enum { | |
9918 | NET_DCCP_DEFAULT=1, | |
9919 | }; | |
9920 | enum { | |
9921 | NET_IPX_PPROP_BROADCASTING=1, | |
9922 | NET_IPX_FORWARDING=2 | |
9923 | }; | |
9924 | enum { | |
9925 | NET_LLC2=1, | |
9926 | NET_LLC_STATION=2, | |
9927 | }; | |
9928 | enum { | |
9929 | NET_LLC2_TIMEOUT=1, | |
9930 | }; | |
9931 | enum { | |
9932 | NET_LLC_STATION_ACK_TIMEOUT=1, | |
9933 | }; | |
9934 | enum { | |
9935 | NET_LLC2_ACK_TIMEOUT=1, | |
9936 | NET_LLC2_P_TIMEOUT=2, | |
9937 | NET_LLC2_REJ_TIMEOUT=3, | |
9938 | NET_LLC2_BUSY_TIMEOUT=4, | |
9939 | }; | |
9940 | enum { | |
9941 | NET_ATALK_AARP_EXPIRY_TIME=1, | |
9942 | NET_ATALK_AARP_TICK_TIME=2, | |
9943 | NET_ATALK_AARP_RETRANSMIT_LIMIT=3, | |
9944 | NET_ATALK_AARP_RESOLVE_TIME=4 | |
9945 | }; | |
9946 | enum { | |
9947 | NET_NETROM_DEFAULT_PATH_QUALITY=1, | |
9948 | NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER=2, | |
9949 | NET_NETROM_NETWORK_TTL_INITIALISER=3, | |
9950 | NET_NETROM_TRANSPORT_TIMEOUT=4, | |
9951 | NET_NETROM_TRANSPORT_MAXIMUM_TRIES=5, | |
9952 | NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY=6, | |
9953 | NET_NETROM_TRANSPORT_BUSY_DELAY=7, | |
9954 | NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE=8, | |
9955 | NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT=9, | |
9956 | NET_NETROM_ROUTING_CONTROL=10, | |
9957 | NET_NETROM_LINK_FAILS_COUNT=11, | |
9958 | NET_NETROM_RESET=12 | |
9959 | }; | |
9960 | enum { | |
9961 | NET_AX25_IP_DEFAULT_MODE=1, | |
9962 | NET_AX25_DEFAULT_MODE=2, | |
9963 | NET_AX25_BACKOFF_TYPE=3, | |
9964 | NET_AX25_CONNECT_MODE=4, | |
9965 | NET_AX25_STANDARD_WINDOW=5, | |
9966 | NET_AX25_EXTENDED_WINDOW=6, | |
9967 | NET_AX25_T1_TIMEOUT=7, | |
9968 | NET_AX25_T2_TIMEOUT=8, | |
9969 | NET_AX25_T3_TIMEOUT=9, | |
9970 | NET_AX25_IDLE_TIMEOUT=10, | |
9971 | NET_AX25_N2=11, | |
9972 | NET_AX25_PACLEN=12, | |
9973 | NET_AX25_PROTOCOL=13, | |
9974 | NET_AX25_DAMA_SLAVE_TIMEOUT=14 | |
9975 | }; | |
9976 | enum { | |
9977 | NET_ROSE_RESTART_REQUEST_TIMEOUT=1, | |
9978 | NET_ROSE_CALL_REQUEST_TIMEOUT=2, | |
9979 | NET_ROSE_RESET_REQUEST_TIMEOUT=3, | |
9980 | NET_ROSE_CLEAR_REQUEST_TIMEOUT=4, | |
9981 | NET_ROSE_ACK_HOLD_BACK_TIMEOUT=5, | |
9982 | NET_ROSE_ROUTING_CONTROL=6, | |
9983 | NET_ROSE_LINK_FAIL_TIMEOUT=7, | |
9984 | NET_ROSE_MAX_VCS=8, | |
9985 | NET_ROSE_WINDOW_SIZE=9, | |
9986 | NET_ROSE_NO_ACTIVITY_TIMEOUT=10 | |
9987 | }; | |
9988 | enum { | |
9989 | NET_X25_RESTART_REQUEST_TIMEOUT=1, | |
9990 | NET_X25_CALL_REQUEST_TIMEOUT=2, | |
9991 | NET_X25_RESET_REQUEST_TIMEOUT=3, | |
9992 | NET_X25_CLEAR_REQUEST_TIMEOUT=4, | |
9993 | NET_X25_ACK_HOLD_BACK_TIMEOUT=5, | |
9994 | NET_X25_FORWARD=6 | |
9995 | }; | |
9996 | enum | |
9997 | { | |
9998 | NET_TR_RIF_TIMEOUT=1 | |
9999 | }; | |
10000 | enum { | |
10001 | NET_DECNET_NODE_TYPE = 1, | |
10002 | NET_DECNET_NODE_ADDRESS = 2, | |
10003 | NET_DECNET_NODE_NAME = 3, | |
10004 | NET_DECNET_DEFAULT_DEVICE = 4, | |
10005 | NET_DECNET_TIME_WAIT = 5, | |
10006 | NET_DECNET_DN_COUNT = 6, | |
10007 | NET_DECNET_DI_COUNT = 7, | |
10008 | NET_DECNET_DR_COUNT = 8, | |
10009 | NET_DECNET_DST_GC_INTERVAL = 9, | |
10010 | NET_DECNET_CONF = 10, | |
10011 | NET_DECNET_NO_FC_MAX_CWND = 11, | |
10012 | NET_DECNET_MEM = 12, | |
10013 | NET_DECNET_RMEM = 13, | |
10014 | NET_DECNET_WMEM = 14, | |
10015 | NET_DECNET_DEBUG_LEVEL = 255 | |
10016 | }; | |
10017 | enum { | |
10018 | NET_DECNET_CONF_LOOPBACK = -2, | |
10019 | NET_DECNET_CONF_DDCMP = -3, | |
10020 | NET_DECNET_CONF_PPP = -4, | |
10021 | NET_DECNET_CONF_X25 = -5, | |
10022 | NET_DECNET_CONF_GRE = -6, | |
10023 | NET_DECNET_CONF_ETHER = -7 | |
10024 | }; | |
10025 | enum { | |
10026 | NET_DECNET_CONF_DEV_PRIORITY = 1, | |
10027 | NET_DECNET_CONF_DEV_T1 = 2, | |
10028 | NET_DECNET_CONF_DEV_T2 = 3, | |
10029 | NET_DECNET_CONF_DEV_T3 = 4, | |
10030 | NET_DECNET_CONF_DEV_FORWARDING = 5, | |
10031 | NET_DECNET_CONF_DEV_BLKSIZE = 6, | |
10032 | NET_DECNET_CONF_DEV_STATE = 7 | |
10033 | }; | |
10034 | enum { | |
10035 | NET_SCTP_RTO_INITIAL = 1, | |
10036 | NET_SCTP_RTO_MIN = 2, | |
10037 | NET_SCTP_RTO_MAX = 3, | |
10038 | NET_SCTP_RTO_ALPHA = 4, | |
10039 | NET_SCTP_RTO_BETA = 5, | |
10040 | NET_SCTP_VALID_COOKIE_LIFE = 6, | |
10041 | NET_SCTP_ASSOCIATION_MAX_RETRANS = 7, | |
10042 | NET_SCTP_PATH_MAX_RETRANS = 8, | |
10043 | NET_SCTP_MAX_INIT_RETRANSMITS = 9, | |
10044 | NET_SCTP_HB_INTERVAL = 10, | |
10045 | NET_SCTP_PRESERVE_ENABLE = 11, | |
10046 | NET_SCTP_MAX_BURST = 12, | |
10047 | NET_SCTP_ADDIP_ENABLE = 13, | |
10048 | NET_SCTP_PRSCTP_ENABLE = 14, | |
10049 | NET_SCTP_SNDBUF_POLICY = 15, | |
10050 | NET_SCTP_SACK_TIMEOUT = 16, | |
10051 | NET_SCTP_RCVBUF_POLICY = 17, | |
10052 | }; | |
10053 | enum { | |
10054 | NET_BRIDGE_NF_CALL_ARPTABLES = 1, | |
10055 | NET_BRIDGE_NF_CALL_IPTABLES = 2, | |
10056 | NET_BRIDGE_NF_CALL_IP6TABLES = 3, | |
10057 | NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4, | |
10058 | NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5, | |
10059 | }; | |
10060 | enum { | |
10061 | NET_IRDA_DISCOVERY=1, | |
10062 | NET_IRDA_DEVNAME=2, | |
10063 | NET_IRDA_DEBUG=3, | |
10064 | NET_IRDA_FAST_POLL=4, | |
10065 | NET_IRDA_DISCOVERY_SLOTS=5, | |
10066 | NET_IRDA_DISCOVERY_TIMEOUT=6, | |
10067 | NET_IRDA_SLOT_TIMEOUT=7, | |
10068 | NET_IRDA_MAX_BAUD_RATE=8, | |
10069 | NET_IRDA_MIN_TX_TURN_TIME=9, | |
10070 | NET_IRDA_MAX_TX_DATA_SIZE=10, | |
10071 | NET_IRDA_MAX_TX_WINDOW=11, | |
10072 | NET_IRDA_MAX_NOREPLY_TIME=12, | |
10073 | NET_IRDA_WARN_NOREPLY_TIME=13, | |
10074 | NET_IRDA_LAP_KEEPALIVE_TIME=14, | |
10075 | }; | |
10076 | enum | |
10077 | { | |
10078 | FS_NRINODE=1, | |
10079 | FS_STATINODE=2, | |
10080 | FS_MAXINODE=3, | |
10081 | FS_NRDQUOT=4, | |
10082 | FS_MAXDQUOT=5, | |
10083 | FS_NRFILE=6, | |
10084 | FS_MAXFILE=7, | |
10085 | FS_DENTRY=8, | |
10086 | FS_NRSUPER=9, | |
10087 | FS_MAXSUPER=10, | |
10088 | FS_OVERFLOWUID=11, | |
10089 | FS_OVERFLOWGID=12, | |
10090 | FS_LEASES=13, | |
10091 | FS_DIR_NOTIFY=14, | |
10092 | FS_LEASE_TIME=15, | |
10093 | FS_DQSTATS=16, | |
10094 | FS_XFS=17, | |
10095 | FS_AIO_NR=18, | |
10096 | FS_AIO_MAX_NR=19, | |
10097 | FS_INOTIFY=20, | |
10098 | FS_OCFS2=988, | |
10099 | }; | |
10100 | enum { | |
10101 | FS_DQ_LOOKUPS = 1, | |
10102 | FS_DQ_DROPS = 2, | |
10103 | FS_DQ_READS = 3, | |
10104 | FS_DQ_WRITES = 4, | |
10105 | FS_DQ_CACHE_HITS = 5, | |
10106 | FS_DQ_ALLOCATED = 6, | |
10107 | FS_DQ_FREE = 7, | |
10108 | FS_DQ_SYNCS = 8, | |
10109 | FS_DQ_WARNINGS = 9, | |
10110 | }; | |
10111 | enum { | |
10112 | DEV_CDROM=1, | |
10113 | DEV_HWMON=2, | |
10114 | DEV_PARPORT=3, | |
10115 | DEV_RAID=4, | |
10116 | DEV_MAC_HID=5, | |
10117 | DEV_SCSI=6, | |
10118 | DEV_IPMI=7, | |
10119 | }; | |
10120 | enum { | |
10121 | DEV_CDROM_INFO=1, | |
10122 | DEV_CDROM_AUTOCLOSE=2, | |
10123 | DEV_CDROM_AUTOEJECT=3, | |
10124 | DEV_CDROM_DEBUG=4, | |
10125 | DEV_CDROM_LOCK=5, | |
10126 | DEV_CDROM_CHECK_MEDIA=6 | |
10127 | }; | |
10128 | enum { | |
10129 | DEV_PARPORT_DEFAULT=-3 | |
10130 | }; | |
10131 | enum { | |
10132 | DEV_RAID_SPEED_LIMIT_MIN=1, | |
10133 | DEV_RAID_SPEED_LIMIT_MAX=2 | |
10134 | }; | |
10135 | enum { | |
10136 | DEV_PARPORT_DEFAULT_TIMESLICE=1, | |
10137 | DEV_PARPORT_DEFAULT_SPINTIME=2 | |
10138 | }; | |
10139 | enum { | |
10140 | DEV_PARPORT_SPINTIME=1, | |
10141 | DEV_PARPORT_BASE_ADDR=2, | |
10142 | DEV_PARPORT_IRQ=3, | |
10143 | DEV_PARPORT_DMA=4, | |
10144 | DEV_PARPORT_MODES=5, | |
10145 | DEV_PARPORT_DEVICES=6, | |
10146 | DEV_PARPORT_AUTOPROBE=16 | |
10147 | }; | |
10148 | enum { | |
10149 | DEV_PARPORT_DEVICES_ACTIVE=-3, | |
10150 | }; | |
10151 | enum { | |
10152 | DEV_PARPORT_DEVICE_TIMESLICE=1, | |
10153 | }; | |
10154 | enum { | |
10155 | DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES=1, | |
10156 | DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES=2, | |
10157 | DEV_MAC_HID_MOUSE_BUTTON_EMULATION=3, | |
10158 | DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE=4, | |
10159 | DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE=5, | |
10160 | DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES=6 | |
10161 | }; | |
10162 | enum { | |
10163 | DEV_SCSI_LOGGING_LEVEL=1, | |
10164 | }; | |
10165 | enum { | |
10166 | DEV_IPMI_POWEROFF_POWERCYCLE=1, | |
10167 | }; | |
10168 | enum | |
10169 | { | |
10170 | ABI_DEFHANDLER_COFF=1, | |
10171 | ABI_DEFHANDLER_ELF=2, | |
10172 | ABI_DEFHANDLER_LCALL7=3, | |
10173 | ABI_DEFHANDLER_LIBCSO=4, | |
10174 | ABI_TRACE=5, | |
10175 | ABI_FAKE_UTSNAME=6, | |
10176 | }; | |
10177 | extern void rcutorture_record_test_transition(void); | |
10178 | extern void rcutorture_record_progress(unsigned long vernum); | |
10179 | struct rcu_head { | |
10180 | struct rcu_head *next; | |
10181 | void (*func)(struct rcu_head *head); | |
10182 | }; | |
10183 | extern void call_rcu_sched(struct rcu_head *head, | |
10184 | void (*func)(struct rcu_head *rcu)); | |
10185 | extern void synchronize_sched(void); | |
10186 | extern void rcu_barrier_bh(void); | |
10187 | extern void rcu_barrier_sched(void); | |
10188 | static inline __attribute__((always_inline)) void __rcu_read_lock_bh(void) | |
10189 | { | |
10190 | local_bh_disable(); | |
10191 | } | |
10192 | static inline __attribute__((always_inline)) void __rcu_read_unlock_bh(void) | |
10193 | { | |
10194 | local_bh_enable(); | |
10195 | } | |
10196 | extern void __rcu_read_lock(void); | |
10197 | extern void __rcu_read_unlock(void); | |
10198 | void synchronize_rcu(void); | |
10199 | extern void rcu_sched_qs(int cpu); | |
10200 | extern void rcu_bh_qs(int cpu); | |
10201 | extern void rcu_check_callbacks(int cpu, int user); | |
10202 | struct notifier_block; | |
10203 | static inline __attribute__((always_inline)) void rcu_enter_nohz(void) | |
10204 | { | |
10205 | } | |
10206 | static inline __attribute__((always_inline)) void rcu_exit_nohz(void) | |
10207 | { | |
10208 | } | |
10209 | extern void rcu_init(void); | |
10210 | extern void rcu_note_context_switch(int cpu); | |
10211 | extern int rcu_needs_cpu(int cpu); | |
10212 | extern void rcu_cpu_stall_reset(void); | |
10213 | static inline __attribute__((always_inline)) void rcu_virt_note_context_switch(int cpu) | |
10214 | { | |
10215 | rcu_note_context_switch(cpu); | |
10216 | } | |
10217 | extern void exit_rcu(void); | |
10218 | extern void synchronize_rcu_bh(void); | |
10219 | extern void synchronize_sched_expedited(void); | |
10220 | extern void synchronize_rcu_expedited(void); | |
10221 | static inline __attribute__((always_inline)) void synchronize_rcu_bh_expedited(void) | |
10222 | { | |
10223 | synchronize_sched_expedited(); | |
10224 | } | |
10225 | extern void rcu_barrier(void); | |
10226 | extern unsigned long rcutorture_testseq; | |
10227 | extern unsigned long rcutorture_vernum; | |
10228 | extern long rcu_batches_completed(void); | |
10229 | extern long rcu_batches_completed_bh(void); | |
10230 | extern long rcu_batches_completed_sched(void); | |
10231 | extern void rcu_force_quiescent_state(void); | |
10232 | extern void rcu_bh_force_quiescent_state(void); | |
10233 | extern void rcu_sched_force_quiescent_state(void); | |
10234 | static inline __attribute__((always_inline)) int rcu_blocking_is_gp(void) | |
10235 | { | |
10236 | return cpumask_weight(cpu_online_mask) == 1; | |
10237 | } | |
10238 | extern void rcu_scheduler_starting(void); | |
10239 | extern int rcu_scheduler_active __attribute__((__section__(".data..read_mostly"))); | |
10240 | static inline __attribute__((always_inline)) void init_rcu_head_on_stack(struct rcu_head *head) | |
10241 | { | |
10242 | } | |
10243 | static inline __attribute__((always_inline)) void destroy_rcu_head_on_stack(struct rcu_head *head) | |
10244 | { | |
10245 | } | |
10246 | extern struct lockdep_map rcu_lock_map; | |
10247 | extern struct lockdep_map rcu_bh_lock_map; | |
10248 | extern struct lockdep_map rcu_sched_lock_map; | |
10249 | extern int debug_lockdep_rcu_enabled(void); | |
10250 | static inline __attribute__((always_inline)) int rcu_read_lock_held(void) | |
10251 | { | |
10252 | if (__builtin_constant_p(((!debug_lockdep_rcu_enabled()))) ? !!((!debug_lockdep_rcu_enabled())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 216, }; ______r = !!((!debug_lockdep_rcu_enabled())); ______f.miss_hit[______r]++; ______r; })) | |
10253 | return 1; | |
10254 | return lock_is_held(&rcu_lock_map); | |
10255 | } | |
10256 | extern int rcu_read_lock_bh_held(void); | |
10257 | static inline __attribute__((always_inline)) int rcu_read_lock_sched_held(void) | |
10258 | { | |
10259 | int lockdep_opinion = 0; | |
10260 | if (__builtin_constant_p(((!debug_lockdep_rcu_enabled()))) ? !!((!debug_lockdep_rcu_enabled())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 247, }; ______r = !!((!debug_lockdep_rcu_enabled())); ______f.miss_hit[______r]++; ______r; })) | |
10261 | return 1; | |
10262 | if (__builtin_constant_p(((debug_locks))) ? !!((debug_locks)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 249, }; ______r = !!((debug_locks)); ______f.miss_hit[______r]++; ______r; })) | |
10263 | lockdep_opinion = lock_is_held(&rcu_sched_lock_map); | |
10264 | return lockdep_opinion || (current_thread_info()->preempt_count) != 0 || ({ unsigned long _flags; do { ({ unsigned long __dummy; typeof(_flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _flags = arch_local_save_flags(); } while (0); ({ ({ unsigned long __dummy; typeof(_flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(_flags); }); }); | |
10265 | } | |
10266 | static inline __attribute__((always_inline)) void rcu_read_lock(void) | |
10267 | { | |
10268 | __rcu_read_lock(); | |
10269 | (void)0; | |
10270 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; })); | |
10271 | } | |
10272 | static inline __attribute__((always_inline)) void rcu_read_unlock(void) | |
10273 | { | |
10274 | lock_release(&rcu_lock_map, 1, ({ __label__ __here; __here: (unsigned long)&&__here; })); | |
10275 | (void)0; | |
10276 | __rcu_read_unlock(); | |
10277 | } | |
10278 | static inline __attribute__((always_inline)) void rcu_read_lock_bh(void) | |
10279 | { | |
10280 | __rcu_read_lock_bh(); | |
10281 | (void)0; | |
10282 | lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; })); | |
10283 | } | |
10284 | static inline __attribute__((always_inline)) void rcu_read_unlock_bh(void) | |
10285 | { | |
10286 | lock_release(&rcu_bh_lock_map, 1, ({ __label__ __here; __here: (unsigned long)&&__here; })); | |
10287 | (void)0; | |
10288 | __rcu_read_unlock_bh(); | |
10289 | } | |
10290 | static inline __attribute__((always_inline)) void rcu_read_lock_sched(void) | |
10291 | { | |
10292 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
10293 | (void)0; | |
10294 | lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; })); | |
10295 | } | |
10296 | static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void rcu_read_lock_sched_notrace(void) | |
10297 | { | |
10298 | do { do { (current_thread_info()->preempt_count) += (1); } while (0); __asm__ __volatile__("": : :"memory"); } while (0); | |
10299 | (void)0; | |
10300 | } | |
10301 | static inline __attribute__((always_inline)) void rcu_read_unlock_sched(void) | |
10302 | { | |
10303 | lock_release(&rcu_sched_lock_map, 1, ({ __label__ __here; __here: (unsigned long)&&__here; })); | |
10304 | (void)0; | |
10305 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 681, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 681, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 681, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 681, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
10306 | } | |
10307 | static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void rcu_read_unlock_sched_notrace(void) | |
10308 | { | |
10309 | (void)0; | |
10310 | do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 688, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 688, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 688, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 688, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
10311 | } | |
10312 | struct rcu_synchronize { | |
10313 | struct rcu_head head; | |
10314 | struct completion completion; | |
10315 | }; | |
10316 | extern void wakeme_after_rcu(struct rcu_head *head); | |
10317 | extern void call_rcu(struct rcu_head *head, | |
10318 | void (*func)(struct rcu_head *head)); | |
10319 | extern void call_rcu_bh(struct rcu_head *head, | |
10320 | void (*func)(struct rcu_head *head)); | |
10321 | static inline __attribute__((always_inline)) void debug_rcu_head_queue(struct rcu_head *head) | |
10322 | { | |
10323 | } | |
10324 | static inline __attribute__((always_inline)) void debug_rcu_head_unqueue(struct rcu_head *head) | |
10325 | { | |
10326 | } | |
10327 | static inline __attribute__((always_inline)) __attribute__((always_inline)) bool __is_kfree_rcu_offset(unsigned long offset) | |
10328 | { | |
10329 | return offset < 4096; | |
10330 | } | |
10331 | static inline __attribute__((always_inline)) __attribute__((always_inline)) | |
10332 | void __kfree_rcu(struct rcu_head *head, unsigned long offset) | |
10333 | { | |
10334 | typedef void (*rcu_callback)(struct rcu_head *); | |
10335 | do { ((void)sizeof(char[1 - 2*!!(!__builtin_constant_p(offset))])); if (__builtin_constant_p(((!__builtin_constant_p(offset)))) ? !!((!__builtin_constant_p(offset))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 822, }; ______r = !!((!__builtin_constant_p(offset))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); | |
10336 | do { ((void)sizeof(char[1 - 2*!!(!__is_kfree_rcu_offset(offset))])); if (__builtin_constant_p(((!__is_kfree_rcu_offset(offset)))) ? !!((!__is_kfree_rcu_offset(offset))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 825, }; ______r = !!((!__is_kfree_rcu_offset(offset))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); | |
10337 | call_rcu(head, (rcu_callback)offset); | |
10338 | } | |
10339 | extern void kfree(const void *); | |
10340 | static inline __attribute__((always_inline)) void __rcu_reclaim(struct rcu_head *head) | |
10341 | { | |
10342 | unsigned long offset = (unsigned long)head->func; | |
10343 | if (__builtin_constant_p(((__is_kfree_rcu_offset(offset)))) ? !!((__is_kfree_rcu_offset(offset))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 836, }; ______r = !!((__is_kfree_rcu_offset(offset))); ______f.miss_hit[______r]++; ______r; })) | |
10344 | kfree((void *)head - offset); | |
10345 | else | |
10346 | head->func(head); | |
10347 | } | |
10348 | struct ctl_table; | |
10349 | struct nsproxy; | |
10350 | struct ctl_table_root; | |
10351 | struct ctl_table_set { | |
10352 | struct list_head list; | |
10353 | struct ctl_table_set *parent; | |
10354 | int (*is_seen)(struct ctl_table_set *); | |
10355 | }; | |
10356 | extern void setup_sysctl_set(struct ctl_table_set *p, | |
10357 | struct ctl_table_set *parent, | |
10358 | int (*is_seen)(struct ctl_table_set *)); | |
10359 | struct ctl_table_header; | |
10360 | extern void sysctl_head_get(struct ctl_table_header *); | |
10361 | extern void sysctl_head_put(struct ctl_table_header *); | |
10362 | extern int sysctl_is_seen(struct ctl_table_header *); | |
10363 | extern struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *); | |
10364 | extern struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev); | |
10365 | extern struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces, | |
10366 | struct ctl_table_header *prev); | |
10367 | extern void sysctl_head_finish(struct ctl_table_header *prev); | |
10368 | extern int sysctl_perm(struct ctl_table_root *root, | |
10369 | struct ctl_table *table, int op); | |
10370 | typedef struct ctl_table ctl_table; | |
10371 | typedef int proc_handler (struct ctl_table *ctl, int write, | |
10372 | void *buffer, size_t *lenp, loff_t *ppos); | |
10373 | extern int proc_dostring(struct ctl_table *, int, | |
10374 | void *, size_t *, loff_t *); | |
10375 | extern int proc_dointvec(struct ctl_table *, int, | |
10376 | void *, size_t *, loff_t *); | |
10377 | extern int proc_dointvec_minmax(struct ctl_table *, int, | |
10378 | void *, size_t *, loff_t *); | |
10379 | extern int proc_dointvec_jiffies(struct ctl_table *, int, | |
10380 | void *, size_t *, loff_t *); | |
10381 | extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, | |
10382 | void *, size_t *, loff_t *); | |
10383 | extern int proc_dointvec_ms_jiffies(struct ctl_table *, int, | |
10384 | void *, size_t *, loff_t *); | |
10385 | extern int proc_doulongvec_minmax(struct ctl_table *, int, | |
10386 | void *, size_t *, loff_t *); | |
10387 | extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, | |
10388 | void *, size_t *, loff_t *); | |
10389 | extern int proc_do_large_bitmap(struct ctl_table *, int, | |
10390 | void *, size_t *, loff_t *); | |
10391 | struct ctl_table | |
10392 | { | |
10393 | const char *procname; | |
10394 | void *data; | |
10395 | int maxlen; | |
10396 | mode_t mode; | |
10397 | struct ctl_table *child; | |
10398 | struct ctl_table *parent; | |
10399 | proc_handler *proc_handler; | |
10400 | void *extra1; | |
10401 | void *extra2; | |
10402 | }; | |
10403 | struct ctl_table_root { | |
10404 | struct list_head root_list; | |
10405 | struct ctl_table_set default_set; | |
10406 | struct ctl_table_set *(*lookup)(struct ctl_table_root *root, | |
10407 | struct nsproxy *namespaces); | |
10408 | int (*permissions)(struct ctl_table_root *root, | |
10409 | struct nsproxy *namespaces, struct ctl_table *table); | |
10410 | }; | |
10411 | struct ctl_table_header | |
10412 | { | |
10413 | union { | |
10414 | struct { | |
10415 | struct ctl_table *ctl_table; | |
10416 | struct list_head ctl_entry; | |
10417 | int used; | |
10418 | int count; | |
10419 | }; | |
10420 | struct rcu_head rcu; | |
10421 | }; | |
10422 | struct completion *unregistering; | |
10423 | struct ctl_table *ctl_table_arg; | |
10424 | struct ctl_table_root *root; | |
10425 | struct ctl_table_set *set; | |
10426 | struct ctl_table *attached_by; | |
10427 | struct ctl_table *attached_to; | |
10428 | struct ctl_table_header *parent; | |
10429 | }; | |
10430 | struct ctl_path { | |
10431 | const char *procname; | |
10432 | }; | |
10433 | void register_sysctl_root(struct ctl_table_root *root); | |
10434 | struct ctl_table_header *__register_sysctl_paths( | |
10435 | struct ctl_table_root *root, struct nsproxy *namespaces, | |
10436 | const struct ctl_path *path, struct ctl_table *table); | |
10437 | struct ctl_table_header *register_sysctl_table(struct ctl_table * table); | |
10438 | struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path, | |
10439 | struct ctl_table *table); | |
10440 | void unregister_sysctl_table(struct ctl_table_header * table); | |
10441 | int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table); | |
10442 | extern char modprobe_path[]; | |
10443 | extern int __request_module(bool wait, const char *name, ...) | |
10444 | __attribute__((format(printf, 2, 3))); | |
10445 | struct cred; | |
10446 | struct file; | |
10447 | enum umh_wait { | |
10448 | UMH_NO_WAIT = -1, | |
10449 | UMH_WAIT_EXEC = 0, | |
10450 | UMH_WAIT_PROC = 1, | |
10451 | }; | |
10452 | struct subprocess_info { | |
10453 | struct work_struct work; | |
10454 | struct completion *complete; | |
10455 | char *path; | |
10456 | char **argv; | |
10457 | char **envp; | |
10458 | enum umh_wait wait; | |
10459 | int retval; | |
10460 | int (*init)(struct subprocess_info *info, struct cred *new); | |
10461 | void (*cleanup)(struct subprocess_info *info); | |
10462 | void *data; | |
10463 | }; | |
10464 | struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, | |
10465 | char **envp, gfp_t gfp_mask); | |
10466 | void call_usermodehelper_setfns(struct subprocess_info *info, | |
10467 | int (*init)(struct subprocess_info *info, struct cred *new), | |
10468 | void (*cleanup)(struct subprocess_info *info), | |
10469 | void *data); | |
10470 | int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait); | |
10471 | void call_usermodehelper_freeinfo(struct subprocess_info *info); | |
10472 | static inline __attribute__((always_inline)) int | |
10473 | call_usermodehelper_fns(char *path, char **argv, char **envp, | |
10474 | enum umh_wait wait, | |
10475 | int (*init)(struct subprocess_info *info, struct cred *new), | |
10476 | void (*cleanup)(struct subprocess_info *), void *data) | |
10477 | { | |
10478 | struct subprocess_info *info; | |
10479 | gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? ((( gfp_t)0x20u)) : ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u)); | |
10480 | info = call_usermodehelper_setup(path, argv, envp, gfp_mask); | |
10481 | if (__builtin_constant_p(((info == ((void *)0)))) ? !!((info == ((void *)0))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kmod.h", .line = 98, }; ______r = !!((info == ((void *)0))); ______f.miss_hit[______r]++; ______r; })) | |
10482 | return -12; | |
10483 | call_usermodehelper_setfns(info, init, cleanup, data); | |
10484 | return call_usermodehelper_exec(info, wait); | |
10485 | } | |
10486 | static inline __attribute__((always_inline)) int | |
10487 | call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait) | |
10488 | { | |
10489 | return call_usermodehelper_fns(path, argv, envp, wait, | |
10490 | ((void *)0), ((void *)0), ((void *)0)); | |
10491 | } | |
10492 | extern struct ctl_table usermodehelper_table[]; | |
10493 | extern void usermodehelper_init(void); | |
10494 | extern int usermodehelper_disable(void); | |
10495 | extern void usermodehelper_enable(void); | |
10496 | extern bool usermodehelper_is_disabled(void); | |
10497 | struct user_i387_struct { | |
10498 | long cwd; | |
10499 | long swd; | |
10500 | long twd; | |
10501 | long fip; | |
10502 | long fcs; | |
10503 | long foo; | |
10504 | long fos; | |
10505 | long st_space[20]; | |
10506 | }; | |
10507 | struct user_fxsr_struct { | |
10508 | unsigned short cwd; | |
10509 | unsigned short swd; | |
10510 | unsigned short twd; | |
10511 | unsigned short fop; | |
10512 | long fip; | |
10513 | long fcs; | |
10514 | long foo; | |
10515 | long fos; | |
10516 | long mxcsr; | |
10517 | long reserved; | |
10518 | long st_space[32]; | |
10519 | long xmm_space[32]; | |
10520 | long padding[56]; | |
10521 | }; | |
10522 | struct user_regs_struct { | |
10523 | unsigned long bx; | |
10524 | unsigned long cx; | |
10525 | unsigned long dx; | |
10526 | unsigned long si; | |
10527 | unsigned long di; | |
10528 | unsigned long bp; | |
10529 | unsigned long ax; | |
10530 | unsigned long ds; | |
10531 | unsigned long es; | |
10532 | unsigned long fs; | |
10533 | unsigned long gs; | |
10534 | unsigned long orig_ax; | |
10535 | unsigned long ip; | |
10536 | unsigned long cs; | |
10537 | unsigned long flags; | |
10538 | unsigned long sp; | |
10539 | unsigned long ss; | |
10540 | }; | |
10541 | struct user{ | |
10542 | struct user_regs_struct regs; | |
10543 | int u_fpvalid; | |
10544 | struct user_i387_struct i387; | |
10545 | unsigned long int u_tsize; | |
10546 | unsigned long int u_dsize; | |
10547 | unsigned long int u_ssize; | |
10548 | unsigned long start_code; | |
10549 | unsigned long start_stack; | |
10550 | long int signal; | |
10551 | int reserved; | |
10552 | unsigned long u_ar0; | |
10553 | struct user_i387_struct *u_fpstate; | |
10554 | unsigned long magic; | |
10555 | char u_comm[32]; | |
10556 | int u_debugreg[8]; | |
10557 | }; | |
10558 | struct user_ymmh_regs { | |
10559 | __u32 ymmh_space[64]; | |
10560 | }; | |
10561 | struct user_xsave_hdr { | |
10562 | __u64 xstate_bv; | |
10563 | __u64 reserved1[2]; | |
10564 | __u64 reserved2[5]; | |
10565 | }; | |
10566 | struct user_xstateregs { | |
10567 | struct { | |
10568 | __u64 fpx_space[58]; | |
10569 | __u64 xstate_fx_sw[6]; | |
10570 | } i387; | |
10571 | struct user_xsave_hdr xsave_hdr; | |
10572 | struct user_ymmh_regs ymmh; | |
10573 | }; | |
10574 | typedef unsigned long elf_greg_t; | |
10575 | typedef elf_greg_t elf_gregset_t[(sizeof(struct user_regs_struct) / sizeof(elf_greg_t))]; | |
10576 | typedef struct user_i387_struct elf_fpregset_t; | |
10577 | typedef struct user_fxsr_struct elf_fpxregset_t; | |
10578 | extern const char VDSO32_PRELINK[]; | |
10579 | extern void __kernel_sigreturn; | |
10580 | extern void __kernel_rt_sigreturn; | |
10581 | extern const char vdso32_int80_start, vdso32_int80_end; | |
10582 | extern const char vdso32_syscall_start, vdso32_syscall_end; | |
10583 | extern const char vdso32_sysenter_start, vdso32_sysenter_end; | |
10584 | extern unsigned int vdso_enabled; | |
10585 | struct user_desc { | |
10586 | unsigned int entry_number; | |
10587 | unsigned int base_addr; | |
10588 | unsigned int limit; | |
10589 | unsigned int seg_32bit:1; | |
10590 | unsigned int contents:2; | |
10591 | unsigned int read_exec_only:1; | |
10592 | unsigned int limit_in_pages:1; | |
10593 | unsigned int seg_not_present:1; | |
10594 | unsigned int useable:1; | |
10595 | }; | |
10596 | static inline __attribute__((always_inline)) void fill_ldt(struct desc_struct *desc, const struct user_desc *info) | |
10597 | { | |
10598 | desc->limit0 = info->limit & 0x0ffff; | |
10599 | desc->base0 = (info->base_addr & 0x0000ffff); | |
10600 | desc->base1 = (info->base_addr & 0x00ff0000) >> 16; | |
10601 | desc->type = (info->read_exec_only ^ 1) << 1; | |
10602 | desc->type |= info->contents << 2; | |
10603 | desc->s = 1; | |
10604 | desc->dpl = 0x3; | |
10605 | desc->p = info->seg_not_present ^ 1; | |
10606 | desc->limit = (info->limit & 0xf0000) >> 16; | |
10607 | desc->avl = info->useable; | |
10608 | desc->d = info->seg_32bit; | |
10609 | desc->g = info->limit_in_pages; | |
10610 | desc->base2 = (info->base_addr & 0xff000000) >> 24; | |
10611 | desc->l = 0; | |
10612 | } | |
10613 | extern struct desc_ptr idt_descr; | |
10614 | extern gate_desc idt_table[]; | |
10615 | struct gdt_page { | |
10616 | struct desc_struct gdt[32]; | |
10617 | } __attribute__((aligned(((1UL) << 12)))); | |
10618 | extern __attribute__((section(".data..percpu" "..page_aligned"))) __typeof__(struct gdt_page) gdt_page __attribute__((aligned(((1UL) << 12)))); | |
10619 | static inline __attribute__((always_inline)) struct desc_struct *get_cpu_gdt_table(unsigned int cpu) | |
10620 | { | |
10621 | return (*({ do { const void *__vpp_verify = (typeof((&(gdt_page))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(gdt_page))) *)(&(gdt_page)))); (typeof((typeof(*(&(gdt_page))) *)(&(gdt_page)))) (__ptr + (((__per_cpu_offset[cpu])))); }); })).gdt; | |
10622 | } | |
10623 | static inline __attribute__((always_inline)) void pack_gate(gate_desc *gate, unsigned char type, | |
10624 | unsigned long base, unsigned dpl, unsigned flags, | |
10625 | unsigned short seg) | |
10626 | { | |
10627 | gate->a = (seg << 16) | (base & 0xffff); | |
10628 | gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8); | |
10629 | } | |
10630 | static inline __attribute__((always_inline)) int desc_empty(const void *ptr) | |
10631 | { | |
10632 | const u32 *desc = ptr; | |
10633 | return !(desc[0] | desc[1]); | |
10634 | } | |
10635 | static inline __attribute__((always_inline)) void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate) | |
10636 | { | |
10637 | __builtin_memcpy(&idt[entry], gate, sizeof(*gate)); | |
10638 | } | |
10639 | static inline __attribute__((always_inline)) void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc) | |
10640 | { | |
10641 | __builtin_memcpy(&ldt[entry], desc, 8); | |
10642 | } | |
10643 | static inline __attribute__((always_inline)) void | |
10644 | native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type) | |
10645 | { | |
10646 | unsigned int size; | |
10647 | switch (type) { | |
10648 | case DESC_TSS: size = sizeof(tss_desc); break; | |
10649 | case DESC_LDT: size = sizeof(ldt_desc); break; | |
10650 | default: size = sizeof(*gdt); break; | |
10651 | } | |
10652 | __builtin_memcpy(&gdt[entry], desc, size); | |
10653 | } | |
10654 | static inline __attribute__((always_inline)) void pack_descriptor(struct desc_struct *desc, unsigned long base, | |
10655 | unsigned long limit, unsigned char type, | |
10656 | unsigned char flags) | |
10657 | { | |
10658 | desc->a = ((base & 0xffff) << 16) | (limit & 0xffff); | |
10659 | desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | | |
10660 | (limit & 0x000f0000) | ((type & 0xff) << 8) | | |
10661 | ((flags & 0xf) << 20); | |
10662 | desc->p = 1; | |
10663 | } | |
10664 | static inline __attribute__((always_inline)) void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size) | |
10665 | { | |
10666 | pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0); | |
10667 | } | |
10668 | static inline __attribute__((always_inline)) void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) | |
10669 | { | |
10670 | struct desc_struct *d = get_cpu_gdt_table(cpu); | |
10671 | tss_desc tss; | |
10672 | set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS, | |
10673 | __builtin_offsetof(struct tss_struct,io_bitmap) + (65536/8) + | |
10674 | sizeof(unsigned long) - 1); | |
10675 | write_gdt_entry(d, entry, &tss, DESC_TSS); | |
10676 | } | |
10677 | static inline __attribute__((always_inline)) void native_set_ldt(const void *addr, unsigned int entries) | |
10678 | { | |
10679 | if (__builtin_constant_p((((__builtin_constant_p(entries == 0) ? !!(entries == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 194, }; ______r = __builtin_expect(!!(entries == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(entries == 0) ? !!(entries == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 194, }; ______r = __builtin_expect(!!(entries == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 194, }; ______r = !!(((__builtin_constant_p(entries == 0) ? !!(entries == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 194, }; ______r = __builtin_expect(!!(entries == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
10680 | asm volatile("lldt %w0"::"q" (0)); | |
10681 | else { | |
10682 | unsigned cpu = debug_smp_processor_id(); | |
10683 | ldt_desc ldt; | |
10684 | set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT, | |
10685 | entries * 8 - 1); | |
10686 | write_gdt_entry(get_cpu_gdt_table(cpu), ((12)+5), | |
10687 | &ldt, DESC_LDT); | |
10688 | asm volatile("lldt %w0"::"q" (((12)+5)*8)); | |
10689 | } | |
10690 | } | |
10691 | static inline __attribute__((always_inline)) void native_load_tr_desc(void) | |
10692 | { | |
10693 | asm volatile("ltr %w0"::"q" (((12)+4)*8)); | |
10694 | } | |
10695 | static inline __attribute__((always_inline)) void native_load_gdt(const struct desc_ptr *dtr) | |
10696 | { | |
10697 | asm volatile("lgdt %0"::"m" (*dtr)); | |
10698 | } | |
10699 | static inline __attribute__((always_inline)) void native_load_idt(const struct desc_ptr *dtr) | |
10700 | { | |
10701 | asm volatile("lidt %0"::"m" (*dtr)); | |
10702 | } | |
10703 | static inline __attribute__((always_inline)) void native_store_gdt(struct desc_ptr *dtr) | |
10704 | { | |
10705 | asm volatile("sgdt %0":"=m" (*dtr)); | |
10706 | } | |
10707 | static inline __attribute__((always_inline)) void native_store_idt(struct desc_ptr *dtr) | |
10708 | { | |
10709 | asm volatile("sidt %0":"=m" (*dtr)); | |
10710 | } | |
10711 | static inline __attribute__((always_inline)) unsigned long native_store_tr(void) | |
10712 | { | |
10713 | unsigned long tr; | |
10714 | asm volatile("str %0":"=r" (tr)); | |
10715 | return tr; | |
10716 | } | |
10717 | static inline __attribute__((always_inline)) void native_load_tls(struct thread_struct *t, unsigned int cpu) | |
10718 | { | |
10719 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | |
10720 | unsigned int i; | |
10721 | for (i = 0; i < 3; i++) | |
10722 | gdt[6 + i] = t->tls_array[i]; | |
10723 | } | |
10724 | static inline __attribute__((always_inline)) void clear_LDT(void) | |
10725 | { | |
10726 | set_ldt(((void *)0), 0); | |
10727 | } | |
10728 | static inline __attribute__((always_inline)) void load_LDT_nolock(mm_context_t *pc) | |
10729 | { | |
10730 | set_ldt(pc->ldt, pc->size); | |
10731 | } | |
10732 | static inline __attribute__((always_inline)) void load_LDT(mm_context_t *pc) | |
10733 | { | |
10734 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
10735 | load_LDT_nolock(pc); | |
10736 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 284, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 284, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 284, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 284, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
10737 | } | |
10738 | static inline __attribute__((always_inline)) unsigned long get_desc_base(const struct desc_struct *desc) | |
10739 | { | |
10740 | return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); | |
10741 | } | |
10742 | static inline __attribute__((always_inline)) void set_desc_base(struct desc_struct *desc, unsigned long base) | |
10743 | { | |
10744 | desc->base0 = base & 0xffff; | |
10745 | desc->base1 = (base >> 16) & 0xff; | |
10746 | desc->base2 = (base >> 24) & 0xff; | |
10747 | } | |
10748 | static inline __attribute__((always_inline)) unsigned long get_desc_limit(const struct desc_struct *desc) | |
10749 | { | |
10750 | return desc->limit0 | (desc->limit << 16); | |
10751 | } | |
10752 | static inline __attribute__((always_inline)) void set_desc_limit(struct desc_struct *desc, unsigned long limit) | |
10753 | { | |
10754 | desc->limit0 = limit & 0xffff; | |
10755 | desc->limit = (limit >> 16) & 0xf; | |
10756 | } | |
10757 | static inline __attribute__((always_inline)) void _set_gate(int gate, unsigned type, void *addr, | |
10758 | unsigned dpl, unsigned ist, unsigned seg) | |
10759 | { | |
10760 | gate_desc s; | |
10761 | pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg); | |
10762 | write_idt_entry(idt_table, gate, &s); | |
10763 | } | |
10764 | static inline __attribute__((always_inline)) void set_intr_gate(unsigned int n, void *addr) | |
10765 | { | |
10766 | do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 331, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 331, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 331, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 331, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (331), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
10767 | _set_gate(n, GATE_INTERRUPT, addr, 0, 0, (((12)+0)*8)); | |
10768 | } | |
10769 | extern int first_system_vector; | |
10770 | extern unsigned long used_vectors[]; | |
10771 | static inline __attribute__((always_inline)) void alloc_system_vector(int vector) | |
10772 | { | |
10773 | if (__builtin_constant_p(((!(__builtin_constant_p((vector)) ? constant_test_bit((vector), (used_vectors)) : variable_test_bit((vector), (used_vectors)))))) ? !!((!(__builtin_constant_p((vector)) ? constant_test_bit((vector), (used_vectors)) : variable_test_bit((vector), (used_vectors))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 341, }; ______r = !!((!(__builtin_constant_p((vector)) ? constant_test_bit((vector), (used_vectors)) : variable_test_bit((vector), (used_vectors))))); ______f.miss_hit[______r]++; ______r; })) { | |
10774 | set_bit(vector, used_vectors); | |
10775 | if (__builtin_constant_p(((first_system_vector > vector))) ? !!((first_system_vector > vector)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 343, }; ______r = !!((first_system_vector > vector)); ______f.miss_hit[______r]++; ______r; })) | |
10776 | first_system_vector = vector; | |
10777 | } else { | |
10778 | do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (346), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); | |
10779 | } | |
10780 | } | |
10781 | static inline __attribute__((always_inline)) void alloc_intr_gate(unsigned int n, void *addr) | |
10782 | { | |
10783 | alloc_system_vector(n); | |
10784 | set_intr_gate(n, addr); | |
10785 | } | |
10786 | static inline __attribute__((always_inline)) void set_system_intr_gate(unsigned int n, void *addr) | |
10787 | { | |
10788 | do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 361, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 361, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 361, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 361, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (361), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
10789 | _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, (((12)+0)*8)); | |
10790 | } | |
10791 | static inline __attribute__((always_inline)) void set_system_trap_gate(unsigned int n, void *addr) | |
10792 | { | |
10793 | do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 367, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 367, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 367, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 367, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (367), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
10794 | _set_gate(n, GATE_TRAP, addr, 0x3, 0, (((12)+0)*8)); | |
10795 | } | |
10796 | static inline __attribute__((always_inline)) void set_trap_gate(unsigned int n, void *addr) | |
10797 | { | |
10798 | do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 373, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 373, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 373, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 373, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (373), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
10799 | _set_gate(n, GATE_TRAP, addr, 0, 0, (((12)+0)*8)); | |
10800 | } | |
10801 | static inline __attribute__((always_inline)) void set_task_gate(unsigned int n, unsigned int gdt_entry) | |
10802 | { | |
10803 | do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 379, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 379, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 379, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 379, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (379), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
10804 | _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3)); | |
10805 | } | |
10806 | static inline __attribute__((always_inline)) void set_intr_gate_ist(int n, void *addr, unsigned ist) | |
10807 | { | |
10808 | do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 385, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 385, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 385, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 385, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (385), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
10809 | _set_gate(n, GATE_INTERRUPT, addr, 0, ist, (((12)+0)*8)); | |
10810 | } | |
10811 | static inline __attribute__((always_inline)) void set_system_intr_gate_ist(int n, void *addr, unsigned ist) | |
10812 | { | |
10813 | do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 391, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 391, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 391, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 391, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (391), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
10814 | _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, (((12)+0)*8)); | |
10815 | } | |
10816 | struct task_struct; | |
10817 | struct linux_binprm; | |
10818 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, | |
10819 | int uses_interp); | |
10820 | extern int syscall32_setup_pages(struct linux_binprm *, int exstack); | |
10821 | extern unsigned long arch_randomize_brk(struct mm_struct *mm); | |
10822 | struct file; | |
10823 | typedef __u32 Elf32_Addr; | |
10824 | typedef __u16 Elf32_Half; | |
10825 | typedef __u32 Elf32_Off; | |
10826 | typedef __s32 Elf32_Sword; | |
10827 | typedef __u32 Elf32_Word; | |
10828 | typedef __u64 Elf64_Addr; | |
10829 | typedef __u16 Elf64_Half; | |
10830 | typedef __s16 Elf64_SHalf; | |
10831 | typedef __u64 Elf64_Off; | |
10832 | typedef __s32 Elf64_Sword; | |
10833 | typedef __u32 Elf64_Word; | |
10834 | typedef __u64 Elf64_Xword; | |
10835 | typedef __s64 Elf64_Sxword; | |
10836 | typedef struct dynamic{ | |
10837 | Elf32_Sword d_tag; | |
10838 | union{ | |
10839 | Elf32_Sword d_val; | |
10840 | Elf32_Addr d_ptr; | |
10841 | } d_un; | |
10842 | } Elf32_Dyn; | |
10843 | typedef struct { | |
10844 | Elf64_Sxword d_tag; | |
10845 | union { | |
10846 | Elf64_Xword d_val; | |
10847 | Elf64_Addr d_ptr; | |
10848 | } d_un; | |
10849 | } Elf64_Dyn; | |
10850 | typedef struct elf32_rel { | |
10851 | Elf32_Addr r_offset; | |
10852 | Elf32_Word r_info; | |
10853 | } Elf32_Rel; | |
10854 | typedef struct elf64_rel { | |
10855 | Elf64_Addr r_offset; | |
10856 | Elf64_Xword r_info; | |
10857 | } Elf64_Rel; | |
10858 | typedef struct elf32_rela{ | |
10859 | Elf32_Addr r_offset; | |
10860 | Elf32_Word r_info; | |
10861 | Elf32_Sword r_addend; | |
10862 | } Elf32_Rela; | |
10863 | typedef struct elf64_rela { | |
10864 | Elf64_Addr r_offset; | |
10865 | Elf64_Xword r_info; | |
10866 | Elf64_Sxword r_addend; | |
10867 | } Elf64_Rela; | |
10868 | typedef struct elf32_sym{ | |
10869 | Elf32_Word st_name; | |
10870 | Elf32_Addr st_value; | |
10871 | Elf32_Word st_size; | |
10872 | unsigned char st_info; | |
10873 | unsigned char st_other; | |
10874 | Elf32_Half st_shndx; | |
10875 | } Elf32_Sym; | |
10876 | typedef struct elf64_sym { | |
10877 | Elf64_Word st_name; | |
10878 | unsigned char st_info; | |
10879 | unsigned char st_other; | |
10880 | Elf64_Half st_shndx; | |
10881 | Elf64_Addr st_value; | |
10882 | Elf64_Xword st_size; | |
10883 | } Elf64_Sym; | |
10884 | typedef struct elf32_hdr{ | |
10885 | unsigned char e_ident[16]; | |
10886 | Elf32_Half e_type; | |
10887 | Elf32_Half e_machine; | |
10888 | Elf32_Word e_version; | |
10889 | Elf32_Addr e_entry; | |
10890 | Elf32_Off e_phoff; | |
10891 | Elf32_Off e_shoff; | |
10892 | Elf32_Word e_flags; | |
10893 | Elf32_Half e_ehsize; | |
10894 | Elf32_Half e_phentsize; | |
10895 | Elf32_Half e_phnum; | |
10896 | Elf32_Half e_shentsize; | |
10897 | Elf32_Half e_shnum; | |
10898 | Elf32_Half e_shstrndx; | |
10899 | } Elf32_Ehdr; | |
10900 | typedef struct elf64_hdr { | |
10901 | unsigned char e_ident[16]; | |
10902 | Elf64_Half e_type; | |
10903 | Elf64_Half e_machine; | |
10904 | Elf64_Word e_version; | |
10905 | Elf64_Addr e_entry; | |
10906 | Elf64_Off e_phoff; | |
10907 | Elf64_Off e_shoff; | |
10908 | Elf64_Word e_flags; | |
10909 | Elf64_Half e_ehsize; | |
10910 | Elf64_Half e_phentsize; | |
10911 | Elf64_Half e_phnum; | |
10912 | Elf64_Half e_shentsize; | |
10913 | Elf64_Half e_shnum; | |
10914 | Elf64_Half e_shstrndx; | |
10915 | } Elf64_Ehdr; | |
10916 | typedef struct elf32_phdr{ | |
10917 | Elf32_Word p_type; | |
10918 | Elf32_Off p_offset; | |
10919 | Elf32_Addr p_vaddr; | |
10920 | Elf32_Addr p_paddr; | |
10921 | Elf32_Word p_filesz; | |
10922 | Elf32_Word p_memsz; | |
10923 | Elf32_Word p_flags; | |
10924 | Elf32_Word p_align; | |
10925 | } Elf32_Phdr; | |
10926 | typedef struct elf64_phdr { | |
10927 | Elf64_Word p_type; | |
10928 | Elf64_Word p_flags; | |
10929 | Elf64_Off p_offset; | |
10930 | Elf64_Addr p_vaddr; | |
10931 | Elf64_Addr p_paddr; | |
10932 | Elf64_Xword p_filesz; | |
10933 | Elf64_Xword p_memsz; | |
10934 | Elf64_Xword p_align; | |
10935 | } Elf64_Phdr; | |
10936 | typedef struct elf32_shdr { | |
10937 | Elf32_Word sh_name; | |
10938 | Elf32_Word sh_type; | |
10939 | Elf32_Word sh_flags; | |
10940 | Elf32_Addr sh_addr; | |
10941 | Elf32_Off sh_offset; | |
10942 | Elf32_Word sh_size; | |
10943 | Elf32_Word sh_link; | |
10944 | Elf32_Word sh_info; | |
10945 | Elf32_Word sh_addralign; | |
10946 | Elf32_Word sh_entsize; | |
10947 | } Elf32_Shdr; | |
10948 | typedef struct elf64_shdr { | |
10949 | Elf64_Word sh_name; | |
10950 | Elf64_Word sh_type; | |
10951 | Elf64_Xword sh_flags; | |
10952 | Elf64_Addr sh_addr; | |
10953 | Elf64_Off sh_offset; | |
10954 | Elf64_Xword sh_size; | |
10955 | Elf64_Word sh_link; | |
10956 | Elf64_Word sh_info; | |
10957 | Elf64_Xword sh_addralign; | |
10958 | Elf64_Xword sh_entsize; | |
10959 | } Elf64_Shdr; | |
10960 | typedef struct elf32_note { | |
10961 | Elf32_Word n_namesz; | |
10962 | Elf32_Word n_descsz; | |
10963 | Elf32_Word n_type; | |
10964 | } Elf32_Nhdr; | |
10965 | typedef struct elf64_note { | |
10966 | Elf64_Word n_namesz; | |
10967 | Elf64_Word n_descsz; | |
10968 | Elf64_Word n_type; | |
10969 | } Elf64_Nhdr; | |
10970 | extern Elf32_Dyn _DYNAMIC []; | |
10971 | static inline __attribute__((always_inline)) int elf_coredump_extra_notes_size(void) { return 0; } | |
10972 | static inline __attribute__((always_inline)) int elf_coredump_extra_notes_write(struct file *file, | |
10973 | loff_t *foffset) { return 0; } | |
10974 | struct sock; | |
10975 | struct kobject; | |
10976 | enum kobj_ns_type { | |
10977 | KOBJ_NS_TYPE_NONE = 0, | |
10978 | KOBJ_NS_TYPE_NET, | |
10979 | KOBJ_NS_TYPES | |
10980 | }; | |
10981 | struct kobj_ns_type_operations { | |
10982 | enum kobj_ns_type type; | |
10983 | void *(*grab_current_ns)(void); | |
10984 | const void *(*netlink_ns)(struct sock *sk); | |
10985 | const void *(*initial_ns)(void); | |
10986 | void (*drop_ns)(void *); | |
10987 | }; | |
10988 | int kobj_ns_type_register(const struct kobj_ns_type_operations *ops); | |
10989 | int kobj_ns_type_registered(enum kobj_ns_type type); | |
10990 | const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent); | |
10991 | const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj); | |
10992 | void *kobj_ns_grab_current(enum kobj_ns_type type); | |
10993 | const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk); | |
10994 | const void *kobj_ns_initial(enum kobj_ns_type type); | |
10995 | void kobj_ns_drop(enum kobj_ns_type type, void *ns); | |
10996 | struct kobject; | |
10997 | struct module; | |
10998 | enum kobj_ns_type; | |
10999 | struct attribute { | |
11000 | const char *name; | |
11001 | mode_t mode; | |
11002 | struct lock_class_key *key; | |
11003 | struct lock_class_key skey; | |
11004 | }; | |
11005 | struct attribute_group { | |
11006 | const char *name; | |
11007 | mode_t (*is_visible)(struct kobject *, | |
11008 | struct attribute *, int); | |
11009 | struct attribute **attrs; | |
11010 | }; | |
11011 | struct file; | |
11012 | struct vm_area_struct; | |
11013 | struct bin_attribute { | |
11014 | struct attribute attr; | |
11015 | size_t size; | |
11016 | void *private; | |
11017 | ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, | |
11018 | char *, loff_t, size_t); | |
11019 | ssize_t (*write)(struct file *,struct kobject *, struct bin_attribute *, | |
11020 | char *, loff_t, size_t); | |
11021 | int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr, | |
11022 | struct vm_area_struct *vma); | |
11023 | }; | |
11024 | struct sysfs_ops { | |
11025 | ssize_t (*show)(struct kobject *, struct attribute *,char *); | |
11026 | ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t); | |
11027 | }; | |
11028 | struct sysfs_dirent; | |
11029 | int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *), | |
11030 | void *data, struct module *owner); | |
11031 | int __attribute__((warn_unused_result)) sysfs_create_dir(struct kobject *kobj); | |
11032 | void sysfs_remove_dir(struct kobject *kobj); | |
11033 | int __attribute__((warn_unused_result)) sysfs_rename_dir(struct kobject *kobj, const char *new_name); | |
11034 | int __attribute__((warn_unused_result)) sysfs_move_dir(struct kobject *kobj, | |
11035 | struct kobject *new_parent_kobj); | |
11036 | int __attribute__((warn_unused_result)) sysfs_create_file(struct kobject *kobj, | |
11037 | const struct attribute *attr); | |
11038 | int __attribute__((warn_unused_result)) sysfs_create_files(struct kobject *kobj, | |
11039 | const struct attribute **attr); | |
11040 | int __attribute__((warn_unused_result)) sysfs_chmod_file(struct kobject *kobj, | |
11041 | const struct attribute *attr, mode_t mode); | |
11042 | void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr); | |
11043 | void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr); | |
11044 | int __attribute__((warn_unused_result)) sysfs_create_bin_file(struct kobject *kobj, | |
11045 | const struct bin_attribute *attr); | |
11046 | void sysfs_remove_bin_file(struct kobject *kobj, | |
11047 | const struct bin_attribute *attr); | |
11048 | int __attribute__((warn_unused_result)) sysfs_create_link(struct kobject *kobj, struct kobject *target, | |
11049 | const char *name); | |
11050 | int __attribute__((warn_unused_result)) sysfs_create_link_nowarn(struct kobject *kobj, | |
11051 | struct kobject *target, | |
11052 | const char *name); | |
11053 | void sysfs_remove_link(struct kobject *kobj, const char *name); | |
11054 | int sysfs_rename_link(struct kobject *kobj, struct kobject *target, | |
11055 | const char *old_name, const char *new_name); | |
11056 | void sysfs_delete_link(struct kobject *dir, struct kobject *targ, | |
11057 | const char *name); | |
11058 | int __attribute__((warn_unused_result)) sysfs_create_group(struct kobject *kobj, | |
11059 | const struct attribute_group *grp); | |
11060 | int sysfs_update_group(struct kobject *kobj, | |
11061 | const struct attribute_group *grp); | |
11062 | void sysfs_remove_group(struct kobject *kobj, | |
11063 | const struct attribute_group *grp); | |
11064 | int sysfs_add_file_to_group(struct kobject *kobj, | |
11065 | const struct attribute *attr, const char *group); | |
11066 | void sysfs_remove_file_from_group(struct kobject *kobj, | |
11067 | const struct attribute *attr, const char *group); | |
11068 | int sysfs_merge_group(struct kobject *kobj, | |
11069 | const struct attribute_group *grp); | |
11070 | void sysfs_unmerge_group(struct kobject *kobj, | |
11071 | const struct attribute_group *grp); | |
11072 | void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); | |
11073 | void sysfs_notify_dirent(struct sysfs_dirent *sd); | |
11074 | struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd, | |
11075 | const void *ns, | |
11076 | const unsigned char *name); | |
11077 | struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd); | |
11078 | void sysfs_put(struct sysfs_dirent *sd); | |
11079 | int __attribute__((warn_unused_result)) sysfs_init(void); | |
11080 | struct kref { | |
11081 | atomic_t refcount; | |
11082 | }; | |
11083 | void kref_init(struct kref *kref); | |
11084 | void kref_get(struct kref *kref); | |
11085 | int kref_put(struct kref *kref, void (*release) (struct kref *kref)); | |
11086 | int kref_sub(struct kref *kref, unsigned int count, | |
11087 | void (*release) (struct kref *kref)); | |
11088 | extern char uevent_helper[]; | |
11089 | extern u64 uevent_seqnum; | |
11090 | enum kobject_action { | |
11091 | KOBJ_ADD, | |
11092 | KOBJ_REMOVE, | |
11093 | KOBJ_CHANGE, | |
11094 | KOBJ_MOVE, | |
11095 | KOBJ_ONLINE, | |
11096 | KOBJ_OFFLINE, | |
11097 | KOBJ_MAX | |
11098 | }; | |
11099 | struct kobject { | |
11100 | const char *name; | |
11101 | struct list_head entry; | |
11102 | struct kobject *parent; | |
11103 | struct kset *kset; | |
11104 | struct kobj_type *ktype; | |
11105 | struct sysfs_dirent *sd; | |
11106 | struct kref kref; | |
11107 | unsigned int state_initialized:1; | |
11108 | unsigned int state_in_sysfs:1; | |
11109 | unsigned int state_add_uevent_sent:1; | |
11110 | unsigned int state_remove_uevent_sent:1; | |
11111 | unsigned int uevent_suppress:1; | |
11112 | }; | |
11113 | extern int kobject_set_name(struct kobject *kobj, const char *name, ...) | |
11114 | __attribute__((format(printf, 2, 3))); | |
11115 | extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, | |
11116 | va_list vargs); | |
11117 | static inline __attribute__((always_inline)) const char *kobject_name(const struct kobject *kobj) | |
11118 | { | |
11119 | return kobj->name; | |
11120 | } | |
11121 | extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype); | |
11122 | extern int __attribute__((warn_unused_result)) kobject_add(struct kobject *kobj, | |
11123 | struct kobject *parent, | |
11124 | const char *fmt, ...) | |
11125 | __attribute__((format(printf, 3, 4))); | |
11126 | extern int __attribute__((warn_unused_result)) kobject_init_and_add(struct kobject *kobj, | |
11127 | struct kobj_type *ktype, | |
11128 | struct kobject *parent, | |
11129 | const char *fmt, ...) | |
11130 | __attribute__((format(printf, 4, 5))); | |
11131 | extern void kobject_del(struct kobject *kobj); | |
11132 | extern struct kobject * __attribute__((warn_unused_result)) kobject_create(void); | |
11133 | extern struct kobject * __attribute__((warn_unused_result)) kobject_create_and_add(const char *name, | |
11134 | struct kobject *parent); | |
11135 | extern int __attribute__((warn_unused_result)) kobject_rename(struct kobject *, const char *new_name); | |
11136 | extern int __attribute__((warn_unused_result)) kobject_move(struct kobject *, struct kobject *); | |
11137 | extern struct kobject *kobject_get(struct kobject *kobj); | |
11138 | extern void kobject_put(struct kobject *kobj); | |
11139 | extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); | |
11140 | struct kobj_type { | |
11141 | void (*release)(struct kobject *kobj); | |
11142 | const struct sysfs_ops *sysfs_ops; | |
11143 | struct attribute **default_attrs; | |
11144 | const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj); | |
11145 | const void *(*namespace)(struct kobject *kobj); | |
11146 | }; | |
11147 | struct kobj_uevent_env { | |
11148 | char *envp[32]; | |
11149 | int envp_idx; | |
11150 | char buf[2048]; | |
11151 | int buflen; | |
11152 | }; | |
11153 | struct kset_uevent_ops { | |
11154 | int (* const filter)(struct kset *kset, struct kobject *kobj); | |
11155 | const char *(* const name)(struct kset *kset, struct kobject *kobj); | |
11156 | int (* const uevent)(struct kset *kset, struct kobject *kobj, | |
11157 | struct kobj_uevent_env *env); | |
11158 | }; | |
11159 | struct kobj_attribute { | |
11160 | struct attribute attr; | |
11161 | ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr, | |
11162 | char *buf); | |
11163 | ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr, | |
11164 | const char *buf, size_t count); | |
11165 | }; | |
11166 | extern const struct sysfs_ops kobj_sysfs_ops; | |
11167 | struct sock; | |
11168 | struct kset { | |
11169 | struct list_head list; | |
11170 | spinlock_t list_lock; | |
11171 | struct kobject kobj; | |
11172 | const struct kset_uevent_ops *uevent_ops; | |
11173 | }; | |
11174 | extern void kset_init(struct kset *kset); | |
11175 | extern int __attribute__((warn_unused_result)) kset_register(struct kset *kset); | |
11176 | extern void kset_unregister(struct kset *kset); | |
11177 | extern struct kset * __attribute__((warn_unused_result)) kset_create_and_add(const char *name, | |
11178 | const struct kset_uevent_ops *u, | |
11179 | struct kobject *parent_kobj); | |
11180 | static inline __attribute__((always_inline)) struct kset *to_kset(struct kobject *kobj) | |
11181 | { | |
11182 | return kobj ? ({ const typeof( ((struct kset *)0)->kobj ) *__mptr = (kobj); (struct kset *)( (char *)__mptr - __builtin_offsetof(struct kset,kobj) );}) : ((void *)0); | |
11183 | } | |
11184 | static inline __attribute__((always_inline)) struct kset *kset_get(struct kset *k) | |
11185 | { | |
11186 | return k ? to_kset(kobject_get(&k->kobj)) : ((void *)0); | |
11187 | } | |
11188 | static inline __attribute__((always_inline)) void kset_put(struct kset *k) | |
11189 | { | |
11190 | kobject_put(&k->kobj); | |
11191 | } | |
11192 | static inline __attribute__((always_inline)) struct kobj_type *get_ktype(struct kobject *kobj) | |
11193 | { | |
11194 | return kobj->ktype; | |
11195 | } | |
11196 | extern struct kobject *kset_find_obj(struct kset *, const char *); | |
11197 | extern struct kobject *kset_find_obj_hinted(struct kset *, const char *, | |
11198 | struct kobject *); | |
11199 | extern struct kobject *kernel_kobj; | |
11200 | extern struct kobject *mm_kobj; | |
11201 | extern struct kobject *hypervisor_kobj; | |
11202 | extern struct kobject *power_kobj; | |
11203 | extern struct kobject *firmware_kobj; | |
11204 | int kobject_uevent(struct kobject *kobj, enum kobject_action action); | |
11205 | int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |
11206 | char *envp[]); | |
11207 | int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) | |
11208 | __attribute__((format (printf, 2, 3))); | |
11209 | int kobject_action_type(const char *buf, size_t count, | |
11210 | enum kobject_action *type); | |
11211 | struct kernel_param; | |
11212 | struct kernel_param_ops { | |
11213 | int (*set)(const char *val, const struct kernel_param *kp); | |
11214 | int (*get)(char *buffer, const struct kernel_param *kp); | |
11215 | void (*free)(void *arg); | |
11216 | }; | |
11217 | struct kernel_param { | |
11218 | const char *name; | |
11219 | const struct kernel_param_ops *ops; | |
11220 | u16 perm; | |
11221 | u16 flags; | |
11222 | union { | |
11223 | void *arg; | |
11224 | const struct kparam_string *str; | |
11225 | const struct kparam_array *arr; | |
11226 | }; | |
11227 | }; | |
11228 | struct kparam_string { | |
11229 | unsigned int maxlen; | |
11230 | char *string; | |
11231 | }; | |
11232 | struct kparam_array | |
11233 | { | |
11234 | unsigned int max; | |
11235 | unsigned int elemsize; | |
11236 | unsigned int *num; | |
11237 | const struct kernel_param_ops *ops; | |
11238 | void *elem; | |
11239 | }; | |
11240 | static inline __attribute__((always_inline)) int | |
11241 | __check_old_set_param(int (*oldset)(const char *, struct kernel_param *)) | |
11242 | { | |
11243 | return 0; | |
11244 | } | |
11245 | extern void __kernel_param_lock(void); | |
11246 | extern void __kernel_param_unlock(void); | |
11247 | extern int parse_args(const char *name, | |
11248 | char *args, | |
11249 | const struct kernel_param *params, | |
11250 | unsigned num, | |
11251 | int (*unknown)(char *param, char *val)); | |
11252 | extern void destroy_params(const struct kernel_param *params, unsigned num); | |
11253 | extern struct kernel_param_ops param_ops_byte; | |
11254 | extern int param_set_byte(const char *val, const struct kernel_param *kp); | |
11255 | extern int param_get_byte(char *buffer, const struct kernel_param *kp); | |
11256 | extern struct kernel_param_ops param_ops_short; | |
11257 | extern int param_set_short(const char *val, const struct kernel_param *kp); | |
11258 | extern int param_get_short(char *buffer, const struct kernel_param *kp); | |
11259 | extern struct kernel_param_ops param_ops_ushort; | |
11260 | extern int param_set_ushort(const char *val, const struct kernel_param *kp); | |
11261 | extern int param_get_ushort(char *buffer, const struct kernel_param *kp); | |
11262 | extern struct kernel_param_ops param_ops_int; | |
11263 | extern int param_set_int(const char *val, const struct kernel_param *kp); | |
11264 | extern int param_get_int(char *buffer, const struct kernel_param *kp); | |
11265 | extern struct kernel_param_ops param_ops_uint; | |
11266 | extern int param_set_uint(const char *val, const struct kernel_param *kp); | |
11267 | extern int param_get_uint(char *buffer, const struct kernel_param *kp); | |
11268 | extern struct kernel_param_ops param_ops_long; | |
11269 | extern int param_set_long(const char *val, const struct kernel_param *kp); | |
11270 | extern int param_get_long(char *buffer, const struct kernel_param *kp); | |
11271 | extern struct kernel_param_ops param_ops_ulong; | |
11272 | extern int param_set_ulong(const char *val, const struct kernel_param *kp); | |
11273 | extern int param_get_ulong(char *buffer, const struct kernel_param *kp); | |
11274 | extern struct kernel_param_ops param_ops_charp; | |
11275 | extern int param_set_charp(const char *val, const struct kernel_param *kp); | |
11276 | extern int param_get_charp(char *buffer, const struct kernel_param *kp); | |
11277 | extern struct kernel_param_ops param_ops_bool; | |
11278 | extern int param_set_bool(const char *val, const struct kernel_param *kp); | |
11279 | extern int param_get_bool(char *buffer, const struct kernel_param *kp); | |
11280 | extern struct kernel_param_ops param_ops_invbool; | |
11281 | extern int param_set_invbool(const char *val, const struct kernel_param *kp); | |
11282 | extern int param_get_invbool(char *buffer, const struct kernel_param *kp); | |
11283 | extern struct kernel_param_ops param_array_ops; | |
11284 | extern struct kernel_param_ops param_ops_string; | |
11285 | extern int param_set_copystring(const char *val, const struct kernel_param *); | |
11286 | extern int param_get_string(char *buffer, const struct kernel_param *kp); | |
11287 | struct module; | |
11288 | extern int module_param_sysfs_setup(struct module *mod, | |
11289 | const struct kernel_param *kparam, | |
11290 | unsigned int num_params); | |
11291 | extern void module_param_sysfs_remove(struct module *mod); | |
11292 | struct jump_label_key { | |
11293 | atomic_t enabled; | |
11294 | struct jump_entry *entries; | |
11295 | struct jump_label_mod *next; | |
11296 | }; | |
11297 | static inline __attribute__((always_inline)) __attribute__((always_inline)) bool arch_static_branch(struct jump_label_key *key) | |
11298 | { | |
11299 | asm goto("1:" | |
11300 | ".byte 0xe9 \n\t .long 0\n\t" | |
11301 | ".pushsection __jump_table, \"aw\" \n\t" | |
11302 | " " ".balign 4" " " "\n\t" | |
11303 | " " ".long" " " "1b, %l[l_yes], %c0 \n\t" | |
11304 | ".popsection \n\t" | |
11305 | : : "i" (key) : : l_yes); | |
11306 | return false; | |
11307 | l_yes: | |
11308 | return true; | |
11309 | } | |
11310 | typedef u32 jump_label_t; | |
11311 | struct jump_entry { | |
11312 | jump_label_t code; | |
11313 | jump_label_t target; | |
11314 | jump_label_t key; | |
11315 | }; | |
11316 | enum jump_label_type { | |
11317 | JUMP_LABEL_DISABLE = 0, | |
11318 | JUMP_LABEL_ENABLE, | |
11319 | }; | |
11320 | struct module; | |
11321 | static inline __attribute__((always_inline)) __attribute__((always_inline)) bool static_branch(struct jump_label_key *key) | |
11322 | { | |
11323 | return arch_static_branch(key); | |
11324 | } | |
11325 | extern struct jump_entry __start___jump_table[]; | |
11326 | extern struct jump_entry __stop___jump_table[]; | |
11327 | extern void jump_label_lock(void); | |
11328 | extern void jump_label_unlock(void); | |
11329 | extern void arch_jump_label_transform(struct jump_entry *entry, | |
11330 | enum jump_label_type type); | |
11331 | extern void arch_jump_label_text_poke_early(jump_label_t addr); | |
11332 | extern int jump_label_text_reserved(void *start, void *end); | |
11333 | extern void jump_label_inc(struct jump_label_key *key); | |
11334 | extern void jump_label_dec(struct jump_label_key *key); | |
11335 | extern bool jump_label_enabled(struct jump_label_key *key); | |
11336 | extern void jump_label_apply_nops(struct module *mod); | |
11337 | struct module; | |
11338 | struct tracepoint; | |
11339 | struct tracepoint_func { | |
11340 | void *func; | |
11341 | void *data; | |
11342 | }; | |
11343 | struct tracepoint { | |
11344 | const char *name; | |
11345 | struct jump_label_key key; | |
11346 | void (*regfunc)(void); | |
11347 | void (*unregfunc)(void); | |
11348 | struct tracepoint_func *funcs; | |
11349 | }; | |
11350 | extern int tracepoint_probe_register(const char *name, void *probe, void *data); | |
11351 | extern int | |
11352 | tracepoint_probe_unregister(const char *name, void *probe, void *data); | |
11353 | extern int tracepoint_probe_register_noupdate(const char *name, void *probe, | |
11354 | void *data); | |
11355 | extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe, | |
11356 | void *data); | |
11357 | extern void tracepoint_probe_update_all(void); | |
11358 | struct tracepoint_iter { | |
11359 | struct module *module; | |
11360 | struct tracepoint * const *tracepoint; | |
11361 | }; | |
11362 | extern void tracepoint_iter_start(struct tracepoint_iter *iter); | |
11363 | extern void tracepoint_iter_next(struct tracepoint_iter *iter); | |
11364 | extern void tracepoint_iter_stop(struct tracepoint_iter *iter); | |
11365 | extern void tracepoint_iter_reset(struct tracepoint_iter *iter); | |
11366 | extern int tracepoint_get_iter_range(struct tracepoint * const **tracepoint, | |
11367 | struct tracepoint * const *begin, struct tracepoint * const *end); | |
11368 | static inline __attribute__((always_inline)) void tracepoint_synchronize_unregister(void) | |
11369 | { | |
11370 | synchronize_sched(); | |
11371 | } | |
11372 | extern | |
11373 | void tracepoint_update_probe_range(struct tracepoint * const *begin, | |
11374 | struct tracepoint * const *end); | |
11375 | struct mod_arch_specific | |
11376 | { | |
11377 | }; | |
11378 | struct module; | |
11379 | extern struct tracepoint | |
11380 | __tracepoint_module_load | |
11381 | ; static inline __attribute__((always_inline)) void | |
11382 | trace_module_load | |
11383 | (struct module *mod) { if (__builtin_constant_p(((static_branch(&__tracepoint_module_load.key)))) ? !!((static_branch(&__tracepoint_module_load.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
11384 | "include/trace/events/module.h" | |
11385 | , .line = | |
11386 | 45 | |
11387 | , }; ______r = !!((static_branch(&__tracepoint_module_load.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
11388 | "include/trace/events/module.h" | |
11389 | , .line = | |
11390 | 45 | |
11391 | , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_module_load)->funcs)) *_________p1 = (typeof(*((&__tracepoint_module_load)->funcs))* )(*(volatile typeof(((&__tracepoint_module_load)->funcs)) *)&(((&__tracepoint_module_load)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_module_load)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
11392 | "include/trace/events/module.h" | |
11393 | , .line = | |
11394 | 45 | |
11395 | , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, struct module *mod))(it_func))(__data, mod); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int | |
11396 | register_trace_module_load | |
11397 | (void (*probe)(void *__data, struct module *mod), void *data) { return tracepoint_probe_register("module_load", (void *)probe, data); } static inline __attribute__((always_inline)) int | |
11398 | unregister_trace_module_load | |
11399 | (void (*probe)(void *__data, struct module *mod), void *data) { return tracepoint_probe_unregister("module_load", (void *)probe, data); } static inline __attribute__((always_inline)) void | |
11400 | check_trace_callback_type_module_load | |
11401 | (void (*cb)(void *__data, struct module *mod)) { } | |
11402 | ; | |
11403 | extern struct tracepoint | |
11404 | __tracepoint_module_free | |
11405 | ; static inline __attribute__((always_inline)) void | |
11406 | trace_module_free | |
11407 | (struct module *mod) { if (__builtin_constant_p(((static_branch(&__tracepoint_module_free.key)))) ? !!((static_branch(&__tracepoint_module_free.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
11408 | "include/trace/events/module.h" | |
11409 | , .line = | |
11410 | 62 | |
11411 | , }; ______r = !!((static_branch(&__tracepoint_module_free.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
11412 | "include/trace/events/module.h" | |
11413 | , .line = | |
11414 | 62 | |
11415 | , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_module_free)->funcs)) *_________p1 = (typeof(*((&__tracepoint_module_free)->funcs))* )(*(volatile typeof(((&__tracepoint_module_free)->funcs)) *)&(((&__tracepoint_module_free)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_module_free)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
11416 | "include/trace/events/module.h" | |
11417 | , .line = | |
11418 | 62 | |
11419 | , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, struct module *mod))(it_func))(__data, mod); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int | |
11420 | register_trace_module_free | |
11421 | (void (*probe)(void *__data, struct module *mod), void *data) { return tracepoint_probe_register("module_free", (void *)probe, data); } static inline __attribute__((always_inline)) int | |
11422 | unregister_trace_module_free | |
11423 | (void (*probe)(void *__data, struct module *mod), void *data) { return tracepoint_probe_unregister("module_free", (void *)probe, data); } static inline __attribute__((always_inline)) void | |
11424 | check_trace_callback_type_module_free | |
11425 | (void (*cb)(void *__data, struct module *mod)) { } | |
11426 | ; | |
11427 | ; | |
11428 | extern struct tracepoint | |
11429 | __tracepoint_module_get | |
11430 | ; static inline __attribute__((always_inline)) void | |
11431 | trace_module_get | |
11432 | (struct module *mod, unsigned long ip) { if (__builtin_constant_p(((static_branch(&__tracepoint_module_get.key)))) ? !!((static_branch(&__tracepoint_module_get.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
11433 | "include/trace/events/module.h" | |
11434 | , .line = | |
11435 | 94 | |
11436 | , }; ______r = !!((static_branch(&__tracepoint_module_get.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
11437 | "include/trace/events/module.h" | |
11438 | , .line = | |
11439 | 94 | |
11440 | , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_module_get)->funcs)) *_________p1 = (typeof(*((&__tracepoint_module_get)->funcs))* )(*(volatile typeof(((&__tracepoint_module_get)->funcs)) *)&(((&__tracepoint_module_get)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_module_get)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
11441 | "include/trace/events/module.h" | |
11442 | , .line = | |
11443 | 94 | |
11444 | , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, struct module *mod, unsigned long ip))(it_func))(__data, mod, ip); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int | |
11445 | register_trace_module_get | |
11446 | (void (*probe)(void *__data, struct module *mod, unsigned long ip), void *data) { return tracepoint_probe_register("module_get", (void *)probe, data); } static inline __attribute__((always_inline)) int | |
11447 | unregister_trace_module_get | |
11448 | (void (*probe)(void *__data, struct module *mod, unsigned long ip), void *data) { return tracepoint_probe_unregister("module_get", (void *)probe, data); } static inline __attribute__((always_inline)) void | |
11449 | check_trace_callback_type_module_get | |
11450 | (void (*cb)(void *__data, struct module *mod, unsigned long ip)) { } | |
11451 | ; | |
11452 | extern struct tracepoint | |
11453 | __tracepoint_module_put | |
11454 | ; static inline __attribute__((always_inline)) void | |
11455 | trace_module_put | |
11456 | (struct module *mod, unsigned long ip) { if (__builtin_constant_p(((static_branch(&__tracepoint_module_put.key)))) ? !!((static_branch(&__tracepoint_module_put.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
11457 | "include/trace/events/module.h" | |
11458 | , .line = | |
11459 | 101 | |
11460 | , }; ______r = !!((static_branch(&__tracepoint_module_put.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
11461 | "include/trace/events/module.h" | |
11462 | , .line = | |
11463 | 101 | |
11464 | , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_module_put)->funcs)) *_________p1 = (typeof(*((&__tracepoint_module_put)->funcs))* )(*(volatile typeof(((&__tracepoint_module_put)->funcs)) *)&(((&__tracepoint_module_put)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_module_put)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
11465 | "include/trace/events/module.h" | |
11466 | , .line = | |
11467 | 101 | |
11468 | , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, struct module *mod, unsigned long ip))(it_func))(__data, mod, ip); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int | |
11469 | register_trace_module_put | |
11470 | (void (*probe)(void *__data, struct module *mod, unsigned long ip), void *data) { return tracepoint_probe_register("module_put", (void *)probe, data); } static inline __attribute__((always_inline)) int | |
11471 | unregister_trace_module_put | |
11472 | (void (*probe)(void *__data, struct module *mod, unsigned long ip), void *data) { return tracepoint_probe_unregister("module_put", (void *)probe, data); } static inline __attribute__((always_inline)) void | |
11473 | check_trace_callback_type_module_put | |
11474 | (void (*cb)(void *__data, struct module *mod, unsigned long ip)) { } | |
11475 | ; | |
11476 | extern struct tracepoint | |
11477 | __tracepoint_module_request | |
11478 | ; static inline __attribute__((always_inline)) void | |
11479 | trace_module_request | |
11480 | (char *name, bool wait, unsigned long ip) { if (__builtin_constant_p(((static_branch(&__tracepoint_module_request.key)))) ? !!((static_branch(&__tracepoint_module_request.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
11481 | "include/trace/events/module.h" | |
11482 | , .line = | |
11483 | 124 | |
11484 | , }; ______r = !!((static_branch(&__tracepoint_module_request.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
11485 | "include/trace/events/module.h" | |
11486 | , .line = | |
11487 | 124 | |
11488 | , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_module_request)->funcs)) *_________p1 = (typeof(*((&__tracepoint_module_request)->funcs))* )(*(volatile typeof(((&__tracepoint_module_request)->funcs)) *)&(((&__tracepoint_module_request)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_module_request)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
11489 | "include/trace/events/module.h" | |
11490 | , .line = | |
11491 | 124 | |
11492 | , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, char *name, bool wait, unsigned long ip))(it_func))(__data, name, wait, ip); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int | |
11493 | register_trace_module_request | |
11494 | (void (*probe)(void *__data, char *name, bool wait, unsigned long ip), void *data) { return tracepoint_probe_register("module_request", (void *)probe, data); } static inline __attribute__((always_inline)) int | |
11495 | unregister_trace_module_request | |
11496 | (void (*probe)(void *__data, char *name, bool wait, unsigned long ip), void *data) { return tracepoint_probe_unregister("module_request", (void *)probe, data); } static inline __attribute__((always_inline)) void | |
11497 | check_trace_callback_type_module_request | |
11498 | (void (*cb)(void *__data, char *name, bool wait, unsigned long ip)) { } | |
11499 | ; | |
11500 | struct kernel_symbol | |
11501 | { | |
11502 | unsigned long value; | |
11503 | const char *name; | |
11504 | }; | |
11505 | struct modversion_info | |
11506 | { | |
11507 | unsigned long crc; | |
11508 | char name[(64 - sizeof(unsigned long))]; | |
11509 | }; | |
11510 | struct module; | |
11511 | struct module_attribute { | |
11512 | struct attribute attr; | |
11513 | ssize_t (*show)(struct module_attribute *, struct module *, char *); | |
11514 | ssize_t (*store)(struct module_attribute *, struct module *, | |
11515 | const char *, size_t count); | |
11516 | void (*setup)(struct module *, const char *); | |
11517 | int (*test)(struct module *); | |
11518 | void (*free)(struct module *); | |
11519 | }; | |
11520 | struct module_version_attribute { | |
11521 | struct module_attribute mattr; | |
11522 | const char *module_name; | |
11523 | const char *version; | |
11524 | } __attribute__ ((__aligned__(sizeof(void *)))); | |
11525 | extern ssize_t __modver_version_show(struct module_attribute *, | |
11526 | struct module *, char *); | |
11527 | struct module_kobject | |
11528 | { | |
11529 | struct kobject kobj; | |
11530 | struct module *mod; | |
11531 | struct kobject *drivers_dir; | |
11532 | struct module_param_attrs *mp; | |
11533 | }; | |
11534 | extern int init_module(void); | |
11535 | extern void cleanup_module(void); | |
11536 | struct exception_table_entry; | |
11537 | const struct exception_table_entry * | |
11538 | search_extable(const struct exception_table_entry *first, | |
11539 | const struct exception_table_entry *last, | |
11540 | unsigned long value); | |
11541 | void sort_extable(struct exception_table_entry *start, | |
11542 | struct exception_table_entry *finish); | |
11543 | void sort_main_extable(void); | |
11544 | void trim_init_extable(struct module *m); | |
11545 | extern struct module __this_module; | |
11546 | const struct exception_table_entry *search_exception_tables(unsigned long add); | |
11547 | struct notifier_block; | |
11548 | extern int modules_disabled; | |
11549 | void *__symbol_get(const char *symbol); | |
11550 | void *__symbol_get_gpl(const char *symbol); | |
11551 | struct module_use { | |
11552 | struct list_head source_list; | |
11553 | struct list_head target_list; | |
11554 | struct module *source, *target; | |
11555 | }; | |
11556 | enum module_state | |
11557 | { | |
11558 | MODULE_STATE_LIVE, | |
11559 | MODULE_STATE_COMING, | |
11560 | MODULE_STATE_GOING, | |
11561 | }; | |
11562 | struct module | |
11563 | { | |
11564 | enum module_state state; | |
11565 | struct list_head list; | |
11566 | char name[(64 - sizeof(unsigned long))]; | |
11567 | struct module_kobject mkobj; | |
11568 | struct module_attribute *modinfo_attrs; | |
11569 | const char *version; | |
11570 | const char *srcversion; | |
11571 | struct kobject *holders_dir; | |
11572 | const struct kernel_symbol *syms; | |
11573 | const unsigned long *crcs; | |
11574 | unsigned int num_syms; | |
11575 | struct kernel_param *kp; | |
11576 | unsigned int num_kp; | |
11577 | unsigned int num_gpl_syms; | |
11578 | const struct kernel_symbol *gpl_syms; | |
11579 | const unsigned long *gpl_crcs; | |
11580 | const struct kernel_symbol *unused_syms; | |
11581 | const unsigned long *unused_crcs; | |
11582 | unsigned int num_unused_syms; | |
11583 | unsigned int num_unused_gpl_syms; | |
11584 | const struct kernel_symbol *unused_gpl_syms; | |
11585 | const unsigned long *unused_gpl_crcs; | |
11586 | const struct kernel_symbol *gpl_future_syms; | |
11587 | const unsigned long *gpl_future_crcs; | |
11588 | unsigned int num_gpl_future_syms; | |
11589 | unsigned int num_exentries; | |
11590 | struct exception_table_entry *extable; | |
11591 | int (*init)(void); | |
11592 | void *module_init; | |
11593 | void *module_core; | |
11594 | unsigned int init_size, core_size; | |
11595 | unsigned int init_text_size, core_text_size; | |
11596 | unsigned int init_ro_size, core_ro_size; | |
11597 | struct mod_arch_specific arch; | |
11598 | unsigned int taints; | |
11599 | unsigned num_bugs; | |
11600 | struct list_head bug_list; | |
11601 | struct bug_entry *bug_table; | |
11602 | Elf32_Sym *symtab, *core_symtab; | |
11603 | unsigned int num_symtab, core_num_syms; | |
11604 | char *strtab, *core_strtab; | |
11605 | struct module_sect_attrs *sect_attrs; | |
11606 | struct module_notes_attrs *notes_attrs; | |
11607 | char *args; | |
11608 | void *percpu; | |
11609 | unsigned int percpu_size; | |
11610 | unsigned int num_tracepoints; | |
11611 | struct tracepoint * const *tracepoints_ptrs; | |
11612 | struct jump_entry *jump_entries; | |
11613 | unsigned int num_jump_entries; | |
11614 | unsigned int num_trace_bprintk_fmt; | |
11615 | const char **trace_bprintk_fmt_start; | |
11616 | struct ftrace_event_call **trace_events; | |
11617 | unsigned int num_trace_events; | |
11618 | unsigned int num_ftrace_callsites; | |
11619 | unsigned long *ftrace_callsites; | |
11620 | struct list_head source_list; | |
11621 | struct list_head target_list; | |
11622 | struct task_struct *waiter; | |
11623 | void (*exit)(void); | |
11624 | struct module_ref { | |
11625 | unsigned int incs; | |
11626 | unsigned int decs; | |
11627 | } *refptr; | |
11628 | }; | |
11629 | extern struct mutex module_mutex; | |
11630 | static inline __attribute__((always_inline)) int module_is_live(struct module *mod) | |
11631 | { | |
11632 | return mod->state != MODULE_STATE_GOING; | |
11633 | } | |
11634 | struct module *__module_text_address(unsigned long addr); | |
11635 | struct module *__module_address(unsigned long addr); | |
11636 | bool is_module_address(unsigned long addr); | |
11637 | bool is_module_percpu_address(unsigned long addr); | |
11638 | bool is_module_text_address(unsigned long addr); | |
11639 | static inline __attribute__((always_inline)) int within_module_core(unsigned long addr, struct module *mod) | |
11640 | { | |
11641 | return (unsigned long)mod->module_core <= addr && | |
11642 | addr < (unsigned long)mod->module_core + mod->core_size; | |
11643 | } | |
11644 | static inline __attribute__((always_inline)) int within_module_init(unsigned long addr, struct module *mod) | |
11645 | { | |
11646 | return (unsigned long)mod->module_init <= addr && | |
11647 | addr < (unsigned long)mod->module_init + mod->init_size; | |
11648 | } | |
11649 | struct module *find_module(const char *name); | |
11650 | struct symsearch { | |
11651 | const struct kernel_symbol *start, *stop; | |
11652 | const unsigned long *crcs; | |
11653 | enum { | |
11654 | NOT_GPL_ONLY, | |
11655 | GPL_ONLY, | |
11656 | WILL_BE_GPL_ONLY, | |
11657 | } licence; | |
11658 | bool unused; | |
11659 | }; | |
11660 | const struct kernel_symbol *find_symbol(const char *name, | |
11661 | struct module **owner, | |
11662 | const unsigned long **crc, | |
11663 | bool gplok, | |
11664 | bool warn); | |
11665 | bool each_symbol_section(bool (*fn)(const struct symsearch *arr, | |
11666 | struct module *owner, | |
11667 | void *data), void *data); | |
11668 | int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, | |
11669 | char *name, char *module_name, int *exported); | |
11670 | unsigned long module_kallsyms_lookup_name(const char *name); | |
11671 | int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, | |
11672 | struct module *, unsigned long), | |
11673 | void *data); | |
11674 | extern void __module_put_and_exit(struct module *mod, long code) | |
11675 | __attribute__((noreturn)); | |
11676 | unsigned int module_refcount(struct module *mod); | |
11677 | void __symbol_put(const char *symbol); | |
11678 | void symbol_put_addr(void *addr); | |
11679 | static inline __attribute__((always_inline)) void __module_get(struct module *module) | |
11680 | { | |
11681 | if (__builtin_constant_p(((module))) ? !!((module)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 510, }; ______r = !!((module)); ______f.miss_hit[______r]++; ______r; })) { | |
11682 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
11683 | do { do { const void *__vpp_verify = (typeof(&(((module->refptr->incs)))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((module->refptr->incs)))) { case 1: do { typedef typeof((((module->refptr->incs)))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((module->refptr->incs))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((((module->refptr->incs)))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((module->refptr->incs))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((((module->refptr->incs)))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((module->refptr->incs))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((((module->refptr->incs))))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((((module->refptr->incs)))))); (typeof(*(&((((module->refptr->incs)))))) *)tcp_ptr__; }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); | |
11684 | trace_module_get(module, ({ __label__ __here; __here: (unsigned long)&&__here; })); | |
11685 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 514, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 514, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 514, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 514, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
11686 | } | |
11687 | } | |
11688 | static inline __attribute__((always_inline)) int try_module_get(struct module *module) | |
11689 | { | |
11690 | int ret = 1; | |
11691 | if (__builtin_constant_p(((module))) ? !!((module)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 522, }; ______r = !!((module)); ______f.miss_hit[______r]++; ______r; })) { | |
11692 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
11693 | if (__builtin_constant_p((((__builtin_constant_p(module_is_live(module)) ? !!(module_is_live(module)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 525, }; ______r = __builtin_expect(!!(module_is_live(module)), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(module_is_live(module)) ? !!(module_is_live(module)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 525, }; ______r = __builtin_expect(!!(module_is_live(module)), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 525, }; ______r = !!(((__builtin_constant_p(module_is_live(module)) ? !!(module_is_live(module)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 525, }; ______r = __builtin_expect(!!(module_is_live(module)), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; })) { | |
11694 | do { do { const void *__vpp_verify = (typeof(&(((module->refptr->incs)))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((module->refptr->incs)))) { case 1: do { typedef typeof((((module->refptr->incs)))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((module->refptr->incs))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((((module->refptr->incs)))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((module->refptr->incs))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((((module->refptr->incs)))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((module->refptr->incs))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((((module->refptr->incs))))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((((module->refptr->incs)))))); (typeof(*(&((((module->refptr->incs)))))) *)tcp_ptr__; }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); | |
11695 | trace_module_get(module, ({ __label__ __here; __here: (unsigned long)&&__here; })); | |
11696 | } else | |
11697 | ret = 0; | |
11698 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 531, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 531, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 531, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 531, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
11699 | } | |
11700 | return ret; | |
11701 | } | |
11702 | extern void module_put(struct module *module); | |
11703 | int ref_module(struct module *a, struct module *b); | |
11704 | const char *module_address_lookup(unsigned long addr, | |
11705 | unsigned long *symbolsize, | |
11706 | unsigned long *offset, | |
11707 | char **modname, | |
11708 | char *namebuf); | |
11709 | int lookup_module_symbol_name(unsigned long addr, char *symname); | |
11710 | int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); | |
11711 | const struct exception_table_entry *search_module_extables(unsigned long addr); | |
11712 | int register_module_notifier(struct notifier_block * nb); | |
11713 | int unregister_module_notifier(struct notifier_block * nb); | |
11714 | extern void print_modules(void); | |
11715 | extern void module_update_tracepoints(void); | |
11716 | extern int module_get_iter_tracepoints(struct tracepoint_iter *iter); | |
11717 | extern struct kset *module_kset; | |
11718 | extern struct kobj_type module_ktype; | |
11719 | extern int module_sysfs_initialized; | |
11720 | static inline __attribute__((always_inline)) void set_all_modules_text_rw(void) { } | |
11721 | static inline __attribute__((always_inline)) void set_all_modules_text_ro(void) { } | |
11722 | void module_bug_finalize(const Elf32_Ehdr *, const Elf32_Shdr *, | |
11723 | struct module *); | |
11724 | void module_bug_cleanup(struct module *); | |
11725 | void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) kmem_cache_init(void); | |
11726 | int slab_is_available(void); | |
11727 | struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, | |
11728 | unsigned long, | |
11729 | void (*)(void *)); | |
11730 | void kmem_cache_destroy(struct kmem_cache *); | |
11731 | int kmem_cache_shrink(struct kmem_cache *); | |
11732 | void kmem_cache_free(struct kmem_cache *, void *); | |
11733 | unsigned int kmem_cache_size(struct kmem_cache *); | |
11734 | void * __attribute__((warn_unused_result)) __krealloc(const void *, size_t, gfp_t); | |
11735 | void * __attribute__((warn_unused_result)) krealloc(const void *, size_t, gfp_t); | |
11736 | void kfree(const void *); | |
11737 | void kzfree(const void *); | |
11738 | size_t ksize(const void *); | |
11739 | static inline __attribute__((always_inline)) void kmemleak_init(void) | |
11740 | { | |
11741 | } | |
11742 | static inline __attribute__((always_inline)) void kmemleak_alloc(const void *ptr, size_t size, int min_count, | |
11743 | gfp_t gfp) | |
11744 | { | |
11745 | } | |
11746 | static inline __attribute__((always_inline)) void kmemleak_alloc_recursive(const void *ptr, size_t size, | |
11747 | int min_count, unsigned long flags, | |
11748 | gfp_t gfp) | |
11749 | { | |
11750 | } | |
11751 | static inline __attribute__((always_inline)) void kmemleak_free(const void *ptr) | |
11752 | { | |
11753 | } | |
11754 | static inline __attribute__((always_inline)) void kmemleak_free_part(const void *ptr, size_t size) | |
11755 | { | |
11756 | } | |
11757 | static inline __attribute__((always_inline)) void kmemleak_free_recursive(const void *ptr, unsigned long flags) | |
11758 | { | |
11759 | } | |
11760 | static inline __attribute__((always_inline)) void kmemleak_not_leak(const void *ptr) | |
11761 | { | |
11762 | } | |
11763 | static inline __attribute__((always_inline)) void kmemleak_ignore(const void *ptr) | |
11764 | { | |
11765 | } | |
11766 | static inline __attribute__((always_inline)) void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) | |
11767 | { | |
11768 | } | |
11769 | static inline __attribute__((always_inline)) void kmemleak_erase(void **ptr) | |
11770 | { | |
11771 | } | |
11772 | static inline __attribute__((always_inline)) void kmemleak_no_scan(const void *ptr) | |
11773 | { | |
11774 | } | |
11775 | enum stat_item { | |
11776 | ALLOC_FASTPATH, | |
11777 | ALLOC_SLOWPATH, | |
11778 | FREE_FASTPATH, | |
11779 | FREE_SLOWPATH, | |
11780 | FREE_FROZEN, | |
11781 | FREE_ADD_PARTIAL, | |
11782 | FREE_REMOVE_PARTIAL, | |
11783 | ALLOC_FROM_PARTIAL, | |
11784 | ALLOC_SLAB, | |
11785 | ALLOC_REFILL, | |
11786 | FREE_SLAB, | |
11787 | CPUSLAB_FLUSH, | |
11788 | DEACTIVATE_FULL, | |
11789 | DEACTIVATE_EMPTY, | |
11790 | DEACTIVATE_TO_HEAD, | |
11791 | DEACTIVATE_TO_TAIL, | |
11792 | DEACTIVATE_REMOTE_FREES, | |
11793 | ORDER_FALLBACK, | |
11794 | CMPXCHG_DOUBLE_CPU_FAIL, | |
11795 | NR_SLUB_STAT_ITEMS }; | |
11796 | struct kmem_cache_cpu { | |
11797 | void **freelist; | |
11798 | unsigned long tid; | |
11799 | struct page *page; | |
11800 | int node; | |
11801 | }; | |
11802 | struct kmem_cache_node { | |
11803 | spinlock_t list_lock; | |
11804 | unsigned long nr_partial; | |
11805 | struct list_head partial; | |
11806 | atomic_long_t nr_slabs; | |
11807 | atomic_long_t total_objects; | |
11808 | struct list_head full; | |
11809 | }; | |
11810 | struct kmem_cache_order_objects { | |
11811 | unsigned long x; | |
11812 | }; | |
11813 | struct kmem_cache { | |
11814 | struct kmem_cache_cpu *cpu_slab; | |
11815 | unsigned long flags; | |
11816 | unsigned long min_partial; | |
11817 | int size; | |
11818 | int objsize; | |
11819 | int offset; | |
11820 | struct kmem_cache_order_objects oo; | |
11821 | struct kmem_cache_order_objects max; | |
11822 | struct kmem_cache_order_objects min; | |
11823 | gfp_t allocflags; | |
11824 | int refcount; | |
11825 | void (*ctor)(void *); | |
11826 | int inuse; | |
11827 | int align; | |
11828 | int reserved; | |
11829 | const char *name; | |
11830 | struct list_head list; | |
11831 | struct kobject kobj; | |
11832 | struct kmem_cache_node *node[(1 << 0)]; | |
11833 | }; | |
11834 | extern struct kmem_cache *kmalloc_caches[(12 + 2)]; | |
11835 | static inline __attribute__((always_inline)) __attribute__((always_inline)) int kmalloc_index(size_t size) | |
11836 | { | |
11837 | if (__builtin_constant_p(((!size))) ? !!((!size)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 158, }; ______r = !!((!size)); ______f.miss_hit[______r]++; ______r; })) | |
11838 | return 0; | |
11839 | if (__builtin_constant_p(((size <= 8))) ? !!((size <= 8)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 161, }; ______r = !!((size <= 8)); ______f.miss_hit[______r]++; ______r; })) | |
11840 | return ( __builtin_constant_p(8) ? ( (8) < 1 ? ____ilog2_NaN() : (8) & (1ULL << 63) ? 63 : (8) & (1ULL << 62) ? 62 : (8) & (1ULL << 61) ? 61 : (8) & (1ULL << 60) ? 60 : (8) & (1ULL << 59) ? 59 : (8) & (1ULL << 58) ? 58 : (8) & (1ULL << 57) ? 57 : (8) & (1ULL << 56) ? 56 : (8) & (1ULL << 55) ? 55 : (8) & (1ULL << 54) ? 54 : (8) & (1ULL << 53) ? 53 : (8) & (1ULL << 52) ? 52 : (8) & (1ULL << 51) ? 51 : (8) & (1ULL << 50) ? 50 : (8) & (1ULL << 49) ? 49 : (8) & (1ULL << 48) ? 48 : (8) & (1ULL << 47) ? 47 : (8) & (1ULL << 46) ? 46 : (8) & (1ULL << 45) ? 45 : (8) & (1ULL << 44) ? 44 : (8) & (1ULL << 43) ? 43 : (8) & (1ULL << 42) ? 42 : (8) & (1ULL << 41) ? 41 : (8) & (1ULL << 40) ? 40 : (8) & (1ULL << 39) ? 39 : (8) & (1ULL << 38) ? 38 : (8) & (1ULL << 37) ? 37 : (8) & (1ULL << 36) ? 36 : (8) & (1ULL << 35) ? 35 : (8) & (1ULL << 34) ? 34 : (8) & (1ULL << 33) ? 33 : (8) & (1ULL << 32) ? 32 : (8) & (1ULL << 31) ? 31 : (8) & (1ULL << 30) ? 30 : (8) & (1ULL << 29) ? 29 : (8) & (1ULL << 28) ? 28 : (8) & (1ULL << 27) ? 27 : (8) & (1ULL << 26) ? 26 : (8) & (1ULL << 25) ? 25 : (8) & (1ULL << 24) ? 24 : (8) & (1ULL << 23) ? 23 : (8) & (1ULL << 22) ? 22 : (8) & (1ULL << 21) ? 21 : (8) & (1ULL << 20) ? 20 : (8) & (1ULL << 19) ? 19 : (8) & (1ULL << 18) ? 18 : (8) & (1ULL << 17) ? 17 : (8) & (1ULL << 16) ? 16 : (8) & (1ULL << 15) ? 15 : (8) & (1ULL << 14) ? 14 : (8) & (1ULL << 13) ? 13 : (8) & (1ULL << 12) ? 12 : (8) & (1ULL << 11) ? 11 : (8) & (1ULL << 10) ? 10 : (8) & (1ULL << 9) ? 9 : (8) & (1ULL << 8) ? 8 : (8) & (1ULL << 7) ? 7 : (8) & (1ULL << 6) ? 6 : (8) & (1ULL << 5) ? 5 : (8) & (1ULL << 4) ? 4 : (8) & (1ULL << 3) ? 3 : (8) & (1ULL << 2) ? 2 : (8) & (1ULL << 1) ? 1 : (8) & (1ULL << 0) ? 0 : ____ilog2_NaN() ) : (sizeof(8) <= 4) ? __ilog2_u32(8) : __ilog2_u64(8) ); | |
11841 | if (__builtin_constant_p(((8 <= 32 && size > 64 && size <= 96))) ? !!((8 <= 32 && size > 64 && size <= 96)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 164, }; ______r = !!((8 <= 32 && size > 64 && size <= 96)); ______f.miss_hit[______r]++; ______r; })) | |
11842 | return 1; | |
11843 | if (__builtin_constant_p(((8 <= 64 && size > 128 && size <= 192))) ? !!((8 <= 64 && size > 128 && size <= 192)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 166, }; ______r = !!((8 <= 64 && size > 128 && size <= 192)); ______f.miss_hit[______r]++; ______r; })) | |
11844 | return 2; | |
11845 | if (__builtin_constant_p(((size <= 8))) ? !!((size <= 8)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 168, }; ______r = !!((size <= 8)); ______f.miss_hit[______r]++; ______r; })) return 3; | |
11846 | if (__builtin_constant_p(((size <= 16))) ? !!((size <= 16)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 169, }; ______r = !!((size <= 16)); ______f.miss_hit[______r]++; ______r; })) return 4; | |
11847 | if (__builtin_constant_p(((size <= 32))) ? !!((size <= 32)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 170, }; ______r = !!((size <= 32)); ______f.miss_hit[______r]++; ______r; })) return 5; | |
11848 | if (__builtin_constant_p(((size <= 64))) ? !!((size <= 64)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 171, }; ______r = !!((size <= 64)); ______f.miss_hit[______r]++; ______r; })) return 6; | |
11849 | if (__builtin_constant_p(((size <= 128))) ? !!((size <= 128)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 172, }; ______r = !!((size <= 128)); ______f.miss_hit[______r]++; ______r; })) return 7; | |
11850 | if (__builtin_constant_p(((size <= 256))) ? !!((size <= 256)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 173, }; ______r = !!((size <= 256)); ______f.miss_hit[______r]++; ______r; })) return 8; | |
11851 | if (__builtin_constant_p(((size <= 512))) ? !!((size <= 512)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 174, }; ______r = !!((size <= 512)); ______f.miss_hit[______r]++; ______r; })) return 9; | |
11852 | if (__builtin_constant_p(((size <= 1024))) ? !!((size <= 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 175, }; ______r = !!((size <= 1024)); ______f.miss_hit[______r]++; ______r; })) return 10; | |
11853 | if (__builtin_constant_p(((size <= 2 * 1024))) ? !!((size <= 2 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 176, }; ______r = !!((size <= 2 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 11; | |
11854 | if (__builtin_constant_p(((size <= 4 * 1024))) ? !!((size <= 4 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 177, }; ______r = !!((size <= 4 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 12; | |
11855 | if (__builtin_constant_p(((size <= 8 * 1024))) ? !!((size <= 8 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 183, }; ______r = !!((size <= 8 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 13; | |
11856 | if (__builtin_constant_p(((size <= 16 * 1024))) ? !!((size <= 16 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 184, }; ______r = !!((size <= 16 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 14; | |
11857 | if (__builtin_constant_p(((size <= 32 * 1024))) ? !!((size <= 32 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 185, }; ______r = !!((size <= 32 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 15; | |
11858 | if (__builtin_constant_p(((size <= 64 * 1024))) ? !!((size <= 64 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 186, }; ______r = !!((size <= 64 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 16; | |
11859 | if (__builtin_constant_p(((size <= 128 * 1024))) ? !!((size <= 128 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 187, }; ______r = !!((size <= 128 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 17; | |
11860 | if (__builtin_constant_p(((size <= 256 * 1024))) ? !!((size <= 256 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 188, }; ______r = !!((size <= 256 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 18; | |
11861 | if (__builtin_constant_p(((size <= 512 * 1024))) ? !!((size <= 512 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 189, }; ______r = !!((size <= 512 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 19; | |
11862 | if (__builtin_constant_p(((size <= 1024 * 1024))) ? !!((size <= 1024 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 190, }; ______r = !!((size <= 1024 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 20; | |
11863 | if (__builtin_constant_p(((size <= 2 * 1024 * 1024))) ? !!((size <= 2 * 1024 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 191, }; ______r = !!((size <= 2 * 1024 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 21; | |
11864 | do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/slub_def.h"), "i" (192), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); | |
11865 | return -1; | |
11866 | } | |
11867 | static inline __attribute__((always_inline)) __attribute__((always_inline)) struct kmem_cache *kmalloc_slab(size_t size) | |
11868 | { | |
11869 | int index = kmalloc_index(size); | |
11870 | if (__builtin_constant_p(((index == 0))) ? !!((index == 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 214, }; ______r = !!((index == 0)); ______f.miss_hit[______r]++; ______r; })) | |
11871 | return ((void *)0); | |
11872 | return kmalloc_caches[index]; | |
11873 | } | |
11874 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | |
11875 | void *__kmalloc(size_t size, gfp_t flags); | |
11876 | static inline __attribute__((always_inline)) __attribute__((always_inline)) void * | |
11877 | kmalloc_order(size_t size, gfp_t flags, unsigned int order) | |
11878 | { | |
11879 | void *ret = (void *) __get_free_pages(flags | (( gfp_t)0x4000u), order); | |
11880 | kmemleak_alloc(ret, size, 1, flags); | |
11881 | return ret; | |
11882 | } | |
11883 | extern void * | |
11884 | kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size); | |
11885 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); | |
11886 | static inline __attribute__((always_inline)) __attribute__((always_inline)) void *kmalloc_large(size_t size, gfp_t flags) | |
11887 | { | |
11888 | unsigned int order = get_order(size); | |
11889 | return kmalloc_order_trace(size, flags, order); | |
11890 | } | |
11891 | static inline __attribute__((always_inline)) __attribute__((always_inline)) void *kmalloc(size_t size, gfp_t flags) | |
11892 | { | |
11893 | if (__builtin_constant_p(((__builtin_constant_p(size)))) ? !!((__builtin_constant_p(size))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 257, }; ______r = !!((__builtin_constant_p(size))); ______f.miss_hit[______r]++; ______r; })) { | |
11894 | if (__builtin_constant_p(((size > (2 * ((1UL) << 12))))) ? !!((size > (2 * ((1UL) << 12)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 258, }; ______r = !!((size > (2 * ((1UL) << 12)))); ______f.miss_hit[______r]++; ______r; })) | |
11895 | return kmalloc_large(size, flags); | |
11896 | if (__builtin_constant_p(((!(flags & (( gfp_t)0x01u))))) ? !!((!(flags & (( gfp_t)0x01u)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 261, }; ______r = !!((!(flags & (( gfp_t)0x01u)))); ______f.miss_hit[______r]++; ______r; })) { | |
11897 | struct kmem_cache *s = kmalloc_slab(size); | |
11898 | if (__builtin_constant_p(((!s))) ? !!((!s)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 264, }; ______r = !!((!s)); ______f.miss_hit[______r]++; ______r; })) | |
11899 | return ((void *)16); | |
11900 | return kmem_cache_alloc_trace(s, flags, size); | |
11901 | } | |
11902 | } | |
11903 | return __kmalloc(size, flags); | |
11904 | } | |
11905 | static inline __attribute__((always_inline)) void *kcalloc(size_t n, size_t size, gfp_t flags) | |
11906 | { | |
11907 | if (__builtin_constant_p(((size != 0 && n > (~0UL) / size))) ? !!((size != 0 && n > (~0UL) / size)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slab.h", .line = 225, }; ______r = !!((size != 0 && n > (~0UL) / size)); ______f.miss_hit[______r]++; ______r; })) | |
11908 | return ((void *)0); | |
11909 | return __kmalloc(n * size, flags | (( gfp_t)0x8000u)); | |
11910 | } | |
11911 | static inline __attribute__((always_inline)) void *kmalloc_node(size_t size, gfp_t flags, int node) | |
11912 | { | |
11913 | return kmalloc(size, flags); | |
11914 | } | |
11915 | static inline __attribute__((always_inline)) void *__kmalloc_node(size_t size, gfp_t flags, int node) | |
11916 | { | |
11917 | return __kmalloc(size, flags); | |
11918 | } | |
11919 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | |
11920 | static inline __attribute__((always_inline)) void *kmem_cache_alloc_node(struct kmem_cache *cachep, | |
11921 | gfp_t flags, int node) | |
11922 | { | |
11923 | return kmem_cache_alloc(cachep, flags); | |
11924 | } | |
11925 | extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); | |
11926 | static inline __attribute__((always_inline)) void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) | |
11927 | { | |
11928 | return kmem_cache_alloc(k, flags | (( gfp_t)0x8000u)); | |
11929 | } | |
11930 | static inline __attribute__((always_inline)) void *kzalloc(size_t size, gfp_t flags) | |
11931 | { | |
11932 | return kmalloc(size, flags | (( gfp_t)0x8000u)); | |
11933 | } | |
11934 | static inline __attribute__((always_inline)) void *kzalloc_node(size_t size, gfp_t flags, int node) | |
11935 | { | |
11936 | return kmalloc_node(size, flags | (( gfp_t)0x8000u), node); | |
11937 | } | |
11938 | void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) kmem_cache_init_late(void); | |
11939 | static inline __attribute__((always_inline)) void pagefault_disable(void) | |
11940 | { | |
11941 | add_preempt_count(1); | |
11942 | __asm__ __volatile__("": : :"memory"); | |
11943 | } | |
11944 | static inline __attribute__((always_inline)) void pagefault_enable(void) | |
11945 | { | |
11946 | __asm__ __volatile__("": : :"memory"); | |
11947 | sub_preempt_count(1); | |
11948 | __asm__ __volatile__("": : :"memory"); | |
11949 | do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/uaccess.h", .line = 38, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/uaccess.h", .line = 38, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/uaccess.h", .line = 38, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/uaccess.h", .line = 38, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); | |
11950 | } | |
11951 | extern long probe_kernel_read(void *dst, const void *src, size_t size); | |
11952 | extern long __probe_kernel_read(void *dst, const void *src, size_t size); | |
11953 | extern long __attribute__((no_instrument_function)) probe_kernel_write(void *dst, const void *src, size_t size); | |
11954 | extern long __attribute__((no_instrument_function)) __probe_kernel_write(void *dst, const void *src, size_t size); | |
11955 | struct scatterlist; | |
11956 | struct crypto_ablkcipher; | |
11957 | struct crypto_async_request; | |
11958 | struct crypto_aead; | |
11959 | struct crypto_blkcipher; | |
11960 | struct crypto_hash; | |
11961 | struct crypto_rng; | |
11962 | struct crypto_tfm; | |
11963 | struct crypto_type; | |
11964 | struct aead_givcrypt_request; | |
11965 | struct skcipher_givcrypt_request; | |
11966 | typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); | |
11967 | struct crypto_async_request { | |
11968 | struct list_head list; | |
11969 | crypto_completion_t complete; | |
11970 | void *data; | |
11971 | struct crypto_tfm *tfm; | |
11972 | u32 flags; | |
11973 | }; | |
11974 | struct ablkcipher_request { | |
11975 | struct crypto_async_request base; | |
11976 | unsigned int nbytes; | |
11977 | void *info; | |
11978 | struct scatterlist *src; | |
11979 | struct scatterlist *dst; | |
11980 | void *__ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long)))); | |
11981 | }; | |
11982 | struct aead_request { | |
11983 | struct crypto_async_request base; | |
11984 | unsigned int assoclen; | |
11985 | unsigned int cryptlen; | |
11986 | u8 *iv; | |
11987 | struct scatterlist *assoc; | |
11988 | struct scatterlist *src; | |
11989 | struct scatterlist *dst; | |
11990 | void *__ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long)))); | |
11991 | }; | |
11992 | struct blkcipher_desc { | |
11993 | struct crypto_blkcipher *tfm; | |
11994 | void *info; | |
11995 | u32 flags; | |
11996 | }; | |
11997 | struct cipher_desc { | |
11998 | struct crypto_tfm *tfm; | |
11999 | void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); | |
12000 | unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst, | |
12001 | const u8 *src, unsigned int nbytes); | |
12002 | void *info; | |
12003 | }; | |
12004 | struct hash_desc { | |
12005 | struct crypto_hash *tfm; | |
12006 | u32 flags; | |
12007 | }; | |
12008 | struct ablkcipher_alg { | |
12009 | int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, | |
12010 | unsigned int keylen); | |
12011 | int (*encrypt)(struct ablkcipher_request *req); | |
12012 | int (*decrypt)(struct ablkcipher_request *req); | |
12013 | int (*givencrypt)(struct skcipher_givcrypt_request *req); | |
12014 | int (*givdecrypt)(struct skcipher_givcrypt_request *req); | |
12015 | const char *geniv; | |
12016 | unsigned int min_keysize; | |
12017 | unsigned int max_keysize; | |
12018 | unsigned int ivsize; | |
12019 | }; | |
12020 | struct aead_alg { | |
12021 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, | |
12022 | unsigned int keylen); | |
12023 | int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize); | |
12024 | int (*encrypt)(struct aead_request *req); | |
12025 | int (*decrypt)(struct aead_request *req); | |
12026 | int (*givencrypt)(struct aead_givcrypt_request *req); | |
12027 | int (*givdecrypt)(struct aead_givcrypt_request *req); | |
12028 | const char *geniv; | |
12029 | unsigned int ivsize; | |
12030 | unsigned int maxauthsize; | |
12031 | }; | |
12032 | struct blkcipher_alg { | |
12033 | int (*setkey)(struct crypto_tfm *tfm, const u8 *key, | |
12034 | unsigned int keylen); | |
12035 | int (*encrypt)(struct blkcipher_desc *desc, | |
12036 | struct scatterlist *dst, struct scatterlist *src, | |
12037 | unsigned int nbytes); | |
12038 | int (*decrypt)(struct blkcipher_desc *desc, | |
12039 | struct scatterlist *dst, struct scatterlist *src, | |
12040 | unsigned int nbytes); | |
12041 | const char *geniv; | |
12042 | unsigned int min_keysize; | |
12043 | unsigned int max_keysize; | |
12044 | unsigned int ivsize; | |
12045 | }; | |
12046 | struct cipher_alg { | |
12047 | unsigned int cia_min_keysize; | |
12048 | unsigned int cia_max_keysize; | |
12049 | int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, | |
12050 | unsigned int keylen); | |
12051 | void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); | |
12052 | void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); | |
12053 | }; | |
12054 | struct compress_alg { | |
12055 | int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, | |
12056 | unsigned int slen, u8 *dst, unsigned int *dlen); | |
12057 | int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, | |
12058 | unsigned int slen, u8 *dst, unsigned int *dlen); | |
12059 | }; | |
12060 | struct rng_alg { | |
12061 | int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, | |
12062 | unsigned int dlen); | |
12063 | int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); | |
12064 | unsigned int seedsize; | |
12065 | }; | |
12066 | struct crypto_alg { | |
12067 | struct list_head cra_list; | |
12068 | struct list_head cra_users; | |
12069 | u32 cra_flags; | |
12070 | unsigned int cra_blocksize; | |
12071 | unsigned int cra_ctxsize; | |
12072 | unsigned int cra_alignmask; | |
12073 | int cra_priority; | |
12074 | atomic_t cra_refcnt; | |
12075 | char cra_name[64]; | |
12076 | char cra_driver_name[64]; | |
12077 | const struct crypto_type *cra_type; | |
12078 | union { | |
12079 | struct ablkcipher_alg ablkcipher; | |
12080 | struct aead_alg aead; | |
12081 | struct blkcipher_alg blkcipher; | |
12082 | struct cipher_alg cipher; | |
12083 | struct compress_alg compress; | |
12084 | struct rng_alg rng; | |
12085 | } cra_u; | |
12086 | int (*cra_init)(struct crypto_tfm *tfm); | |
12087 | void (*cra_exit)(struct crypto_tfm *tfm); | |
12088 | void (*cra_destroy)(struct crypto_alg *alg); | |
12089 | struct module *cra_module; | |
12090 | }; | |
12091 | int crypto_register_alg(struct crypto_alg *alg); | |
12092 | int crypto_unregister_alg(struct crypto_alg *alg); | |
12093 | int crypto_has_alg(const char *name, u32 type, u32 mask); | |
12094 | struct ablkcipher_tfm { | |
12095 | int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, | |
12096 | unsigned int keylen); | |
12097 | int (*encrypt)(struct ablkcipher_request *req); | |
12098 | int (*decrypt)(struct ablkcipher_request *req); | |
12099 | int (*givencrypt)(struct skcipher_givcrypt_request *req); | |
12100 | int (*givdecrypt)(struct skcipher_givcrypt_request *req); | |
12101 | struct crypto_ablkcipher *base; | |
12102 | unsigned int ivsize; | |
12103 | unsigned int reqsize; | |
12104 | }; | |
12105 | struct aead_tfm { | |
12106 | int (*setkey)(struct crypto_aead *tfm, const u8 *key, | |
12107 | unsigned int keylen); | |
12108 | int (*encrypt)(struct aead_request *req); | |
12109 | int (*decrypt)(struct aead_request *req); | |
12110 | int (*givencrypt)(struct aead_givcrypt_request *req); | |
12111 | int (*givdecrypt)(struct aead_givcrypt_request *req); | |
12112 | struct crypto_aead *base; | |
12113 | unsigned int ivsize; | |
12114 | unsigned int authsize; | |
12115 | unsigned int reqsize; | |
12116 | }; | |
12117 | struct blkcipher_tfm { | |
12118 | void *iv; | |
12119 | int (*setkey)(struct crypto_tfm *tfm, const u8 *key, | |
12120 | unsigned int keylen); | |
12121 | int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, | |
12122 | struct scatterlist *src, unsigned int nbytes); | |
12123 | int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, | |
12124 | struct scatterlist *src, unsigned int nbytes); | |
12125 | }; | |
12126 | struct cipher_tfm { | |
12127 | int (*cit_setkey)(struct crypto_tfm *tfm, | |
12128 | const u8 *key, unsigned int keylen); | |
12129 | void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); | |
12130 | void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); | |
12131 | }; | |
12132 | struct hash_tfm { | |
12133 | int (*init)(struct hash_desc *desc); | |
12134 | int (*update)(struct hash_desc *desc, | |
12135 | struct scatterlist *sg, unsigned int nsg); | |
12136 | int (*final)(struct hash_desc *desc, u8 *out); | |
12137 | int (*digest)(struct hash_desc *desc, struct scatterlist *sg, | |
12138 | unsigned int nsg, u8 *out); | |
12139 | int (*setkey)(struct crypto_hash *tfm, const u8 *key, | |
12140 | unsigned int keylen); | |
12141 | unsigned int digestsize; | |
12142 | }; | |
12143 | struct compress_tfm { | |
12144 | int (*cot_compress)(struct crypto_tfm *tfm, | |
12145 | const u8 *src, unsigned int slen, | |
12146 | u8 *dst, unsigned int *dlen); | |
12147 | int (*cot_decompress)(struct crypto_tfm *tfm, | |
12148 | const u8 *src, unsigned int slen, | |
12149 | u8 *dst, unsigned int *dlen); | |
12150 | }; | |
12151 | struct rng_tfm { | |
12152 | int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata, | |
12153 | unsigned int dlen); | |
12154 | int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen); | |
12155 | }; | |
12156 | struct crypto_tfm { | |
12157 | u32 crt_flags; | |
12158 | union { | |
12159 | struct ablkcipher_tfm ablkcipher; | |
12160 | struct aead_tfm aead; | |
12161 | struct blkcipher_tfm blkcipher; | |
12162 | struct cipher_tfm cipher; | |
12163 | struct hash_tfm hash; | |
12164 | struct compress_tfm compress; | |
12165 | struct rng_tfm rng; | |
12166 | } crt_u; | |
12167 | void (*exit)(struct crypto_tfm *tfm); | |
12168 | struct crypto_alg *__crt_alg; | |
12169 | void *__crt_ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long)))); | |
12170 | }; | |
12171 | struct crypto_ablkcipher { | |
12172 | struct crypto_tfm base; | |
12173 | }; | |
12174 | struct crypto_aead { | |
12175 | struct crypto_tfm base; | |
12176 | }; | |
12177 | struct crypto_blkcipher { | |
12178 | struct crypto_tfm base; | |
12179 | }; | |
12180 | struct crypto_cipher { | |
12181 | struct crypto_tfm base; | |
12182 | }; | |
12183 | struct crypto_comp { | |
12184 | struct crypto_tfm base; | |
12185 | }; | |
12186 | struct crypto_hash { | |
12187 | struct crypto_tfm base; | |
12188 | }; | |
12189 | struct crypto_rng { | |
12190 | struct crypto_tfm base; | |
12191 | }; | |
12192 | enum { | |
12193 | CRYPTOA_UNSPEC, | |
12194 | CRYPTOA_ALG, | |
12195 | CRYPTOA_TYPE, | |
12196 | CRYPTOA_U32, | |
12197 | __CRYPTOA_MAX, | |
12198 | }; | |
12199 | struct crypto_attr_alg { | |
12200 | char name[64]; | |
12201 | }; | |
12202 | struct crypto_attr_type { | |
12203 | u32 type; | |
12204 | u32 mask; | |
12205 | }; | |
12206 | struct crypto_attr_u32 { | |
12207 | u32 num; | |
12208 | }; | |
12209 | struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); | |
12210 | void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); | |
12211 | static inline __attribute__((always_inline)) void crypto_free_tfm(struct crypto_tfm *tfm) | |
12212 | { | |
12213 | return crypto_destroy_tfm(tfm, tfm); | |
12214 | } | |
12215 | int alg_test(const char *driver, const char *alg, u32 type, u32 mask); | |
12216 | static inline __attribute__((always_inline)) const char *crypto_tfm_alg_name(struct crypto_tfm *tfm) | |
12217 | { | |
12218 | return tfm->__crt_alg->cra_name; | |
12219 | } | |
12220 | static inline __attribute__((always_inline)) const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) | |
12221 | { | |
12222 | return tfm->__crt_alg->cra_driver_name; | |
12223 | } | |
12224 | static inline __attribute__((always_inline)) int crypto_tfm_alg_priority(struct crypto_tfm *tfm) | |
12225 | { | |
12226 | return tfm->__crt_alg->cra_priority; | |
12227 | } | |
12228 | static inline __attribute__((always_inline)) const char *crypto_tfm_alg_modname(struct crypto_tfm *tfm) | |
12229 | { | |
12230 | return ({ struct module *__mod = (tfm->__crt_alg->cra_module); __mod ? __mod->name : "kernel"; }); | |
12231 | } | |
12232 | static inline __attribute__((always_inline)) u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) | |
12233 | { | |
12234 | return tfm->__crt_alg->cra_flags & 0x0000000f; | |
12235 | } | |
12236 | static inline __attribute__((always_inline)) unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm) | |
12237 | { | |
12238 | return tfm->__crt_alg->cra_blocksize; | |
12239 | } | |
12240 | static inline __attribute__((always_inline)) unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) | |
12241 | { | |
12242 | return tfm->__crt_alg->cra_alignmask; | |
12243 | } | |
12244 | static inline __attribute__((always_inline)) u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) | |
12245 | { | |
12246 | return tfm->crt_flags; | |
12247 | } | |
12248 | static inline __attribute__((always_inline)) void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags) | |
12249 | { | |
12250 | tfm->crt_flags |= flags; | |
12251 | } | |
12252 | static inline __attribute__((always_inline)) void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) | |
12253 | { | |
12254 | tfm->crt_flags &= ~flags; | |
12255 | } | |
12256 | static inline __attribute__((always_inline)) void *crypto_tfm_ctx(struct crypto_tfm *tfm) | |
12257 | { | |
12258 | return tfm->__crt_ctx; | |
12259 | } | |
12260 | static inline __attribute__((always_inline)) unsigned int crypto_tfm_ctx_alignment(void) | |
12261 | { | |
12262 | struct crypto_tfm *tfm; | |
12263 | return __alignof__(tfm->__crt_ctx); | |
12264 | } | |
12265 | static inline __attribute__((always_inline)) struct crypto_ablkcipher *__crypto_ablkcipher_cast( | |
12266 | struct crypto_tfm *tfm) | |
12267 | { | |
12268 | return (struct crypto_ablkcipher *)tfm; | |
12269 | } | |
12270 | static inline __attribute__((always_inline)) u32 crypto_skcipher_type(u32 type) | |
12271 | { | |
12272 | type &= ~(0x0000000f | 0x00000200); | |
12273 | type |= 0x00000004; | |
12274 | return type; | |
12275 | } | |
12276 | static inline __attribute__((always_inline)) u32 crypto_skcipher_mask(u32 mask) | |
12277 | { | |
12278 | mask &= ~(0x0000000f | 0x00000200); | |
12279 | mask |= 0x0000000c; | |
12280 | return mask; | |
12281 | } | |
12282 | struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, | |
12283 | u32 type, u32 mask); | |
12284 | static inline __attribute__((always_inline)) struct crypto_tfm *crypto_ablkcipher_tfm( | |
12285 | struct crypto_ablkcipher *tfm) | |
12286 | { | |
12287 | return &tfm->base; | |
12288 | } | |
12289 | static inline __attribute__((always_inline)) void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) | |
12290 | { | |
12291 | crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); | |
12292 | } | |
12293 | static inline __attribute__((always_inline)) int crypto_has_ablkcipher(const char *alg_name, u32 type, | |
12294 | u32 mask) | |
12295 | { | |
12296 | return crypto_has_alg(alg_name, crypto_skcipher_type(type), | |
12297 | crypto_skcipher_mask(mask)); | |
12298 | } | |
12299 | static inline __attribute__((always_inline)) struct ablkcipher_tfm *crypto_ablkcipher_crt( | |
12300 | struct crypto_ablkcipher *tfm) | |
12301 | { | |
12302 | return &crypto_ablkcipher_tfm(tfm)->crt_u.ablkcipher; | |
12303 | } | |
12304 | static inline __attribute__((always_inline)) unsigned int crypto_ablkcipher_ivsize( | |
12305 | struct crypto_ablkcipher *tfm) | |
12306 | { | |
12307 | return crypto_ablkcipher_crt(tfm)->ivsize; | |
12308 | } | |
12309 | static inline __attribute__((always_inline)) unsigned int crypto_ablkcipher_blocksize( | |
12310 | struct crypto_ablkcipher *tfm) | |
12311 | { | |
12312 | return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm)); | |
12313 | } | |
12314 | static inline __attribute__((always_inline)) unsigned int crypto_ablkcipher_alignmask( | |
12315 | struct crypto_ablkcipher *tfm) | |
12316 | { | |
12317 | return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm)); | |
12318 | } | |
12319 | static inline __attribute__((always_inline)) u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm) | |
12320 | { | |
12321 | return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm)); | |
12322 | } | |
12323 | static inline __attribute__((always_inline)) void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm, | |
12324 | u32 flags) | |
12325 | { | |
12326 | crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags); | |
12327 | } | |
12328 | static inline __attribute__((always_inline)) void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, | |
12329 | u32 flags) | |
12330 | { | |
12331 | crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); | |
12332 | } | |
12333 | static inline __attribute__((always_inline)) int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, | |
12334 | const u8 *key, unsigned int keylen) | |
12335 | { | |
12336 | struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm); | |
12337 | return crt->setkey(crt->base, key, keylen); | |
12338 | } | |
12339 | static inline __attribute__((always_inline)) struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( | |
12340 | struct ablkcipher_request *req) | |
12341 | { | |
12342 | return __crypto_ablkcipher_cast(req->base.tfm); | |
12343 | } | |
12344 | static inline __attribute__((always_inline)) int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) | |
12345 | { | |
12346 | struct ablkcipher_tfm *crt = | |
12347 | crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); | |
12348 | return crt->encrypt(req); | |
12349 | } | |
12350 | static inline __attribute__((always_inline)) int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) | |
12351 | { | |
12352 | struct ablkcipher_tfm *crt = | |
12353 | crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); | |
12354 | return crt->decrypt(req); | |
12355 | } | |
12356 | static inline __attribute__((always_inline)) unsigned int crypto_ablkcipher_reqsize( | |
12357 | struct crypto_ablkcipher *tfm) | |
12358 | { | |
12359 | return crypto_ablkcipher_crt(tfm)->reqsize; | |
12360 | } | |
12361 | static inline __attribute__((always_inline)) void ablkcipher_request_set_tfm( | |
12362 | struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) | |
12363 | { | |
12364 | req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base); | |
12365 | } | |
12366 | static inline __attribute__((always_inline)) struct ablkcipher_request *ablkcipher_request_cast( | |
12367 | struct crypto_async_request *req) | |
12368 | { | |
12369 | return ({ const typeof( ((struct ablkcipher_request *)0)->base ) *__mptr = (req); (struct ablkcipher_request *)( (char *)__mptr - __builtin_offsetof(struct ablkcipher_request,base) );}); | |
12370 | } | |
12371 | static inline __attribute__((always_inline)) struct ablkcipher_request *ablkcipher_request_alloc( | |
12372 | struct crypto_ablkcipher *tfm, gfp_t gfp) | |
12373 | { | |
12374 | struct ablkcipher_request *req; | |
12375 | req = kmalloc(sizeof(struct ablkcipher_request) + | |
12376 | crypto_ablkcipher_reqsize(tfm), gfp); | |
12377 | if (__builtin_constant_p((((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 693, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 693, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 693, }; ______r = !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 693, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
12378 | ablkcipher_request_set_tfm(req, tfm); | |
12379 | return req; | |
12380 | } | |
12381 | static inline __attribute__((always_inline)) void ablkcipher_request_free(struct ablkcipher_request *req) | |
12382 | { | |
12383 | kzfree(req); | |
12384 | } | |
12385 | static inline __attribute__((always_inline)) void ablkcipher_request_set_callback( | |
12386 | struct ablkcipher_request *req, | |
12387 | u32 flags, crypto_completion_t complete, void *data) | |
12388 | { | |
12389 | req->base.complete = complete; | |
12390 | req->base.data = data; | |
12391 | req->base.flags = flags; | |
12392 | } | |
12393 | static inline __attribute__((always_inline)) void ablkcipher_request_set_crypt( | |
12394 | struct ablkcipher_request *req, | |
12395 | struct scatterlist *src, struct scatterlist *dst, | |
12396 | unsigned int nbytes, void *iv) | |
12397 | { | |
12398 | req->src = src; | |
12399 | req->dst = dst; | |
12400 | req->nbytes = nbytes; | |
12401 | req->info = iv; | |
12402 | } | |
12403 | static inline __attribute__((always_inline)) struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) | |
12404 | { | |
12405 | return (struct crypto_aead *)tfm; | |
12406 | } | |
12407 | struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); | |
12408 | static inline __attribute__((always_inline)) struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) | |
12409 | { | |
12410 | return &tfm->base; | |
12411 | } | |
12412 | static inline __attribute__((always_inline)) void crypto_free_aead(struct crypto_aead *tfm) | |
12413 | { | |
12414 | crypto_free_tfm(crypto_aead_tfm(tfm)); | |
12415 | } | |
12416 | static inline __attribute__((always_inline)) struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm) | |
12417 | { | |
12418 | return &crypto_aead_tfm(tfm)->crt_u.aead; | |
12419 | } | |
12420 | static inline __attribute__((always_inline)) unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) | |
12421 | { | |
12422 | return crypto_aead_crt(tfm)->ivsize; | |
12423 | } | |
12424 | static inline __attribute__((always_inline)) unsigned int crypto_aead_authsize(struct crypto_aead *tfm) | |
12425 | { | |
12426 | return crypto_aead_crt(tfm)->authsize; | |
12427 | } | |
12428 | static inline __attribute__((always_inline)) unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) | |
12429 | { | |
12430 | return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); | |
12431 | } | |
12432 | static inline __attribute__((always_inline)) unsigned int crypto_aead_alignmask(struct crypto_aead *tfm) | |
12433 | { | |
12434 | return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm)); | |
12435 | } | |
12436 | static inline __attribute__((always_inline)) u32 crypto_aead_get_flags(struct crypto_aead *tfm) | |
12437 | { | |
12438 | return crypto_tfm_get_flags(crypto_aead_tfm(tfm)); | |
12439 | } | |
12440 | static inline __attribute__((always_inline)) void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags) | |
12441 | { | |
12442 | crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags); | |
12443 | } | |
12444 | static inline __attribute__((always_inline)) void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) | |
12445 | { | |
12446 | crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); | |
12447 | } | |
12448 | static inline __attribute__((always_inline)) int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, | |
12449 | unsigned int keylen) | |
12450 | { | |
12451 | struct aead_tfm *crt = crypto_aead_crt(tfm); | |
12452 | return crt->setkey(crt->base, key, keylen); | |
12453 | } | |
12454 | int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); | |
12455 | static inline __attribute__((always_inline)) struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) | |
12456 | { | |
12457 | return __crypto_aead_cast(req->base.tfm); | |
12458 | } | |
12459 | static inline __attribute__((always_inline)) int crypto_aead_encrypt(struct aead_request *req) | |
12460 | { | |
12461 | return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req); | |
12462 | } | |
12463 | static inline __attribute__((always_inline)) int crypto_aead_decrypt(struct aead_request *req) | |
12464 | { | |
12465 | return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); | |
12466 | } | |
12467 | static inline __attribute__((always_inline)) unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) | |
12468 | { | |
12469 | return crypto_aead_crt(tfm)->reqsize; | |
12470 | } | |
12471 | static inline __attribute__((always_inline)) void aead_request_set_tfm(struct aead_request *req, | |
12472 | struct crypto_aead *tfm) | |
12473 | { | |
12474 | req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base); | |
12475 | } | |
12476 | static inline __attribute__((always_inline)) struct aead_request *aead_request_alloc(struct crypto_aead *tfm, | |
12477 | gfp_t gfp) | |
12478 | { | |
12479 | struct aead_request *req; | |
12480 | req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp); | |
12481 | if (__builtin_constant_p((((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 824, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 824, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 824, }; ______r = !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 824, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
12482 | aead_request_set_tfm(req, tfm); | |
12483 | return req; | |
12484 | } | |
12485 | static inline __attribute__((always_inline)) void aead_request_free(struct aead_request *req) | |
12486 | { | |
12487 | kzfree(req); | |
12488 | } | |
12489 | static inline __attribute__((always_inline)) void aead_request_set_callback(struct aead_request *req, | |
12490 | u32 flags, | |
12491 | crypto_completion_t complete, | |
12492 | void *data) | |
12493 | { | |
12494 | req->base.complete = complete; | |
12495 | req->base.data = data; | |
12496 | req->base.flags = flags; | |
12497 | } | |
12498 | static inline __attribute__((always_inline)) void aead_request_set_crypt(struct aead_request *req, | |
12499 | struct scatterlist *src, | |
12500 | struct scatterlist *dst, | |
12501 | unsigned int cryptlen, u8 *iv) | |
12502 | { | |
12503 | req->src = src; | |
12504 | req->dst = dst; | |
12505 | req->cryptlen = cryptlen; | |
12506 | req->iv = iv; | |
12507 | } | |
12508 | static inline __attribute__((always_inline)) void aead_request_set_assoc(struct aead_request *req, | |
12509 | struct scatterlist *assoc, | |
12510 | unsigned int assoclen) | |
12511 | { | |
12512 | req->assoc = assoc; | |
12513 | req->assoclen = assoclen; | |
12514 | } | |
12515 | static inline __attribute__((always_inline)) struct crypto_blkcipher *__crypto_blkcipher_cast( | |
12516 | struct crypto_tfm *tfm) | |
12517 | { | |
12518 | return (struct crypto_blkcipher *)tfm; | |
12519 | } | |
12520 | static inline __attribute__((always_inline)) struct crypto_blkcipher *crypto_blkcipher_cast( | |
12521 | struct crypto_tfm *tfm) | |
12522 | { | |
12523 | do { if (__builtin_constant_p((((__builtin_constant_p(crypto_tfm_alg_type(tfm) != 0x00000004) ? !!(crypto_tfm_alg_type(tfm) != 0x00000004) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 873, }; ______r = __builtin_expect(!!(crypto_tfm_alg_type(tfm) != 0x00000004), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(crypto_tfm_alg_type(tfm) != 0x00000004) ? !!(crypto_tfm_alg_type(tfm) != 0x00000004) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 873, }; ______r = __builtin_expect(!!(crypto_tfm_alg_type(tfm) != 0x00000004), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 873, }; ______r = !!(((__builtin_constant_p(crypto_tfm_alg_type(tfm) != 0x00000004) ? !!(crypto_tfm_alg_type(tfm) != 0x00000004) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 873, }; ______r = __builtin_expect(!!(crypto_tfm_alg_type(tfm) != 0x00000004), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/crypto.h"), "i" (873), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
12524 | return __crypto_blkcipher_cast(tfm); | |
12525 | } | |
12526 | static inline __attribute__((always_inline)) struct crypto_blkcipher *crypto_alloc_blkcipher( | |
12527 | const char *alg_name, u32 type, u32 mask) | |
12528 | { | |
12529 | type &= ~0x0000000f; | |
12530 | type |= 0x00000004; | |
12531 | mask |= 0x0000000f; | |
12532 | return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask)); | |
12533 | } | |
12534 | static inline __attribute__((always_inline)) struct crypto_tfm *crypto_blkcipher_tfm( | |
12535 | struct crypto_blkcipher *tfm) | |
12536 | { | |
12537 | return &tfm->base; | |
12538 | } | |
12539 | static inline __attribute__((always_inline)) void crypto_free_blkcipher(struct crypto_blkcipher *tfm) | |
12540 | { | |
12541 | crypto_free_tfm(crypto_blkcipher_tfm(tfm)); | |
12542 | } | |
12543 | static inline __attribute__((always_inline)) int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) | |
12544 | { | |
12545 | type &= ~0x0000000f; | |
12546 | type |= 0x00000004; | |
12547 | mask |= 0x0000000f; | |
12548 | return crypto_has_alg(alg_name, type, mask); | |
12549 | } | |
12550 | static inline __attribute__((always_inline)) const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) | |
12551 | { | |
12552 | return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); | |
12553 | } | |
12554 | static inline __attribute__((always_inline)) struct blkcipher_tfm *crypto_blkcipher_crt( | |
12555 | struct crypto_blkcipher *tfm) | |
12556 | { | |
12557 | return &crypto_blkcipher_tfm(tfm)->crt_u.blkcipher; | |
12558 | } | |
12559 | static inline __attribute__((always_inline)) struct blkcipher_alg *crypto_blkcipher_alg( | |
12560 | struct crypto_blkcipher *tfm) | |
12561 | { | |
12562 | return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_u.blkcipher; | |
12563 | } | |
12564 | static inline __attribute__((always_inline)) unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) | |
12565 | { | |
12566 | return crypto_blkcipher_alg(tfm)->ivsize; | |
12567 | } | |
12568 | static inline __attribute__((always_inline)) unsigned int crypto_blkcipher_blocksize( | |
12569 | struct crypto_blkcipher *tfm) | |
12570 | { | |
12571 | return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm)); | |
12572 | } | |
12573 | static inline __attribute__((always_inline)) unsigned int crypto_blkcipher_alignmask( | |
12574 | struct crypto_blkcipher *tfm) | |
12575 | { | |
12576 | return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm)); | |
12577 | } | |
12578 | static inline __attribute__((always_inline)) u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm) | |
12579 | { | |
12580 | return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm)); | |
12581 | } | |
12582 | static inline __attribute__((always_inline)) void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm, | |
12583 | u32 flags) | |
12584 | { | |
12585 | crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags); | |
12586 | } | |
12587 | static inline __attribute__((always_inline)) void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm, | |
12588 | u32 flags) | |
12589 | { | |
12590 | crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); | |
12591 | } | |
12592 | static inline __attribute__((always_inline)) int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, | |
12593 | const u8 *key, unsigned int keylen) | |
12594 | { | |
12595 | return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm), | |
12596 | key, keylen); | |
12597 | } | |
12598 | static inline __attribute__((always_inline)) int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, | |
12599 | struct scatterlist *dst, | |
12600 | struct scatterlist *src, | |
12601 | unsigned int nbytes) | |
12602 | { | |
12603 | desc->info = crypto_blkcipher_crt(desc->tfm)->iv; | |
12604 | return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); | |
12605 | } | |
12606 | static inline __attribute__((always_inline)) int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, | |
12607 | struct scatterlist *dst, | |
12608 | struct scatterlist *src, | |
12609 | unsigned int nbytes) | |
12610 | { | |
12611 | return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); | |
12612 | } | |
12613 | static inline __attribute__((always_inline)) int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, | |
12614 | struct scatterlist *dst, | |
12615 | struct scatterlist *src, | |
12616 | unsigned int nbytes) | |
12617 | { | |
12618 | desc->info = crypto_blkcipher_crt(desc->tfm)->iv; | |
12619 | return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); | |
12620 | } | |
12621 | static inline __attribute__((always_inline)) int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, | |
12622 | struct scatterlist *dst, | |
12623 | struct scatterlist *src, | |
12624 | unsigned int nbytes) | |
12625 | { | |
12626 | return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); | |
12627 | } | |
12628 | static inline __attribute__((always_inline)) void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, | |
12629 | const u8 *src, unsigned int len) | |
12630 | { | |
12631 | __builtin_memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); | |
12632 | } | |
12633 | static inline __attribute__((always_inline)) void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, | |
12634 | u8 *dst, unsigned int len) | |
12635 | { | |
12636 | __builtin_memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); | |
12637 | } | |
12638 | static inline __attribute__((always_inline)) struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) | |
12639 | { | |
12640 | return (struct crypto_cipher *)tfm; | |
12641 | } | |
12642 | static inline __attribute__((always_inline)) struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) | |
12643 | { | |
12644 | do { if (__builtin_constant_p((((__builtin_constant_p(crypto_tfm_alg_type(tfm) != 0x00000001) ? !!(crypto_tfm_alg_type(tfm) != 0x00000001) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1018, }; ______r = __builtin_expect(!!(crypto_tfm_alg_type(tfm) != 0x00000001), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(crypto_tfm_alg_type(tfm) != 0x00000001) ? !!(crypto_tfm_alg_type(tfm) != 0x00000001) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1018, }; ______r = __builtin_expect(!!(crypto_tfm_alg_type(tfm) != 0x00000001), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1018, }; ______r = !!(((__builtin_constant_p(crypto_tfm_alg_type(tfm) != 0x00000001) ? !!(crypto_tfm_alg_type(tfm) != 0x00000001) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1018, }; ______r = __builtin_expect(!!(crypto_tfm_alg_type(tfm) != 0x00000001), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/crypto.h"), "i" (1018), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
12645 | return __crypto_cipher_cast(tfm); | |
12646 | } | |
12647 | static inline __attribute__((always_inline)) struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, | |
12648 | u32 type, u32 mask) | |
12649 | { | |
12650 | type &= ~0x0000000f; | |
12651 | type |= 0x00000001; | |
12652 | mask |= 0x0000000f; | |
12653 | return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); | |
12654 | } | |
12655 | static inline __attribute__((always_inline)) struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) | |
12656 | { | |
12657 | return &tfm->base; | |
12658 | } | |
12659 | static inline __attribute__((always_inline)) void crypto_free_cipher(struct crypto_cipher *tfm) | |
12660 | { | |
12661 | crypto_free_tfm(crypto_cipher_tfm(tfm)); | |
12662 | } | |
12663 | static inline __attribute__((always_inline)) int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) | |
12664 | { | |
12665 | type &= ~0x0000000f; | |
12666 | type |= 0x00000001; | |
12667 | mask |= 0x0000000f; | |
12668 | return crypto_has_alg(alg_name, type, mask); | |
12669 | } | |
12670 | static inline __attribute__((always_inline)) struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) | |
12671 | { | |
12672 | return &crypto_cipher_tfm(tfm)->crt_u.cipher; | |
12673 | } | |
12674 | static inline __attribute__((always_inline)) unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) | |
12675 | { | |
12676 | return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); | |
12677 | } | |
12678 | static inline __attribute__((always_inline)) unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) | |
12679 | { | |
12680 | return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); | |
12681 | } | |
12682 | static inline __attribute__((always_inline)) u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) | |
12683 | { | |
12684 | return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); | |
12685 | } | |
12686 | static inline __attribute__((always_inline)) void crypto_cipher_set_flags(struct crypto_cipher *tfm, | |
12687 | u32 flags) | |
12688 | { | |
12689 | crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); | |
12690 | } | |
12691 | static inline __attribute__((always_inline)) void crypto_cipher_clear_flags(struct crypto_cipher *tfm, | |
12692 | u32 flags) | |
12693 | { | |
12694 | crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); | |
12695 | } | |
12696 | static inline __attribute__((always_inline)) int crypto_cipher_setkey(struct crypto_cipher *tfm, | |
12697 | const u8 *key, unsigned int keylen) | |
12698 | { | |
12699 | return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm), | |
12700 | key, keylen); | |
12701 | } | |
12702 | static inline __attribute__((always_inline)) void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, | |
12703 | u8 *dst, const u8 *src) | |
12704 | { | |
12705 | crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm), | |
12706 | dst, src); | |
12707 | } | |
12708 | static inline __attribute__((always_inline)) void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, | |
12709 | u8 *dst, const u8 *src) | |
12710 | { | |
12711 | crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm), | |
12712 | dst, src); | |
12713 | } | |
12714 | static inline __attribute__((always_inline)) struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) | |
12715 | { | |
12716 | return (struct crypto_hash *)tfm; | |
12717 | } | |
12718 | static inline __attribute__((always_inline)) struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm) | |
12719 | { | |
12720 | do { if (__builtin_constant_p((((__builtin_constant_p((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e) ? !!((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1112, }; ______r = __builtin_expect(!!((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e) ? !!((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1112, }; ______r = __builtin_expect(!!((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
12721 | "include/linux/crypto.h" | |
12722 | , .line = | |
12723 | 1112 | |
12724 | , }; ______r = !!(((__builtin_constant_p((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e) ? !!((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1112, }; ______r = __builtin_expect(!!((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ( | |
12725 | "include/linux/crypto.h" | |
12726 | ), "i" ( | |
12727 | 1112 | |
12728 | ), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0) | |
12729 | ; | |
12730 | return __crypto_hash_cast(tfm); | |
12731 | } | |
12732 | static inline __attribute__((always_inline)) struct crypto_hash *crypto_alloc_hash(const char *alg_name, | |
12733 | u32 type, u32 mask) | |
12734 | { | |
12735 | type &= ~0x0000000f; | |
12736 | mask &= ~0x0000000f; | |
12737 | type |= 0x00000008; | |
12738 | mask |= 0x0000000e; | |
12739 | return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask)); | |
12740 | } | |
12741 | static inline __attribute__((always_inline)) struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm) | |
12742 | { | |
12743 | return &tfm->base; | |
12744 | } | |
12745 | static inline __attribute__((always_inline)) void crypto_free_hash(struct crypto_hash *tfm) | |
12746 | { | |
12747 | crypto_free_tfm(crypto_hash_tfm(tfm)); | |
12748 | } | |
12749 | static inline __attribute__((always_inline)) int crypto_has_hash(const char *alg_name, u32 type, u32 mask) | |
12750 | { | |
12751 | type &= ~0x0000000f; | |
12752 | mask &= ~0x0000000f; | |
12753 | type |= 0x00000008; | |
12754 | mask |= 0x0000000e; | |
12755 | return crypto_has_alg(alg_name, type, mask); | |
12756 | } | |
12757 | static inline __attribute__((always_inline)) struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm) | |
12758 | { | |
12759 | return &crypto_hash_tfm(tfm)->crt_u.hash; | |
12760 | } | |
12761 | static inline __attribute__((always_inline)) unsigned int crypto_hash_blocksize(struct crypto_hash *tfm) | |
12762 | { | |
12763 | return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm)); | |
12764 | } | |
12765 | static inline __attribute__((always_inline)) unsigned int crypto_hash_alignmask(struct crypto_hash *tfm) | |
12766 | { | |
12767 | return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm)); | |
12768 | } | |
12769 | static inline __attribute__((always_inline)) unsigned int crypto_hash_digestsize(struct crypto_hash *tfm) | |
12770 | { | |
12771 | return crypto_hash_crt(tfm)->digestsize; | |
12772 | } | |
12773 | static inline __attribute__((always_inline)) u32 crypto_hash_get_flags(struct crypto_hash *tfm) | |
12774 | { | |
12775 | return crypto_tfm_get_flags(crypto_hash_tfm(tfm)); | |
12776 | } | |
12777 | static inline __attribute__((always_inline)) void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags) | |
12778 | { | |
12779 | crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags); | |
12780 | } | |
12781 | static inline __attribute__((always_inline)) void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags) | |
12782 | { | |
12783 | crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags); | |
12784 | } | |
12785 | static inline __attribute__((always_inline)) int crypto_hash_init(struct hash_desc *desc) | |
12786 | { | |
12787 | return crypto_hash_crt(desc->tfm)->init(desc); | |
12788 | } | |
12789 | static inline __attribute__((always_inline)) int crypto_hash_update(struct hash_desc *desc, | |
12790 | struct scatterlist *sg, | |
12791 | unsigned int nbytes) | |
12792 | { | |
12793 | return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); | |
12794 | } | |
12795 | static inline __attribute__((always_inline)) int crypto_hash_final(struct hash_desc *desc, u8 *out) | |
12796 | { | |
12797 | return crypto_hash_crt(desc->tfm)->final(desc, out); | |
12798 | } | |
12799 | static inline __attribute__((always_inline)) int crypto_hash_digest(struct hash_desc *desc, | |
12800 | struct scatterlist *sg, | |
12801 | unsigned int nbytes, u8 *out) | |
12802 | { | |
12803 | return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); | |
12804 | } | |
12805 | static inline __attribute__((always_inline)) int crypto_hash_setkey(struct crypto_hash *hash, | |
12806 | const u8 *key, unsigned int keylen) | |
12807 | { | |
12808 | return crypto_hash_crt(hash)->setkey(hash, key, keylen); | |
12809 | } | |
12810 | static inline __attribute__((always_inline)) struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) | |
12811 | { | |
12812 | return (struct crypto_comp *)tfm; | |
12813 | } | |
12814 | static inline __attribute__((always_inline)) struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm) | |
12815 | { | |
12816 | do { if (__builtin_constant_p((((__builtin_constant_p((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f) ? !!((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1220, }; ______r = __builtin_expect(!!((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f) ? !!((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1220, }; ______r = __builtin_expect(!!((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
12817 | "include/linux/crypto.h" | |
12818 | , .line = | |
12819 | 1220 | |
12820 | , }; ______r = !!(((__builtin_constant_p((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f) ? !!((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1220, }; ______r = __builtin_expect(!!((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ( | |
12821 | "include/linux/crypto.h" | |
12822 | ), "i" ( | |
12823 | 1220 | |
12824 | ), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0) | |
12825 | ; | |
12826 | return __crypto_comp_cast(tfm); | |
12827 | } | |
12828 | static inline __attribute__((always_inline)) struct crypto_comp *crypto_alloc_comp(const char *alg_name, | |
12829 | u32 type, u32 mask) | |
12830 | { | |
12831 | type &= ~0x0000000f; | |
12832 | type |= 0x00000002; | |
12833 | mask |= 0x0000000f; | |
12834 | return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); | |
12835 | } | |
12836 | static inline __attribute__((always_inline)) struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) | |
12837 | { | |
12838 | return &tfm->base; | |
12839 | } | |
12840 | static inline __attribute__((always_inline)) void crypto_free_comp(struct crypto_comp *tfm) | |
12841 | { | |
12842 | crypto_free_tfm(crypto_comp_tfm(tfm)); | |
12843 | } | |
12844 | static inline __attribute__((always_inline)) int crypto_has_comp(const char *alg_name, u32 type, u32 mask) | |
12845 | { | |
12846 | type &= ~0x0000000f; | |
12847 | type |= 0x00000002; | |
12848 | mask |= 0x0000000f; | |
12849 | return crypto_has_alg(alg_name, type, mask); | |
12850 | } | |
12851 | static inline __attribute__((always_inline)) const char *crypto_comp_name(struct crypto_comp *tfm) | |
12852 | { | |
12853 | return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); | |
12854 | } | |
12855 | static inline __attribute__((always_inline)) struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm) | |
12856 | { | |
12857 | return &crypto_comp_tfm(tfm)->crt_u.compress; | |
12858 | } | |
12859 | static inline __attribute__((always_inline)) int crypto_comp_compress(struct crypto_comp *tfm, | |
12860 | const u8 *src, unsigned int slen, | |
12861 | u8 *dst, unsigned int *dlen) | |
12862 | { | |
12863 | return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm), | |
12864 | src, slen, dst, dlen); | |
12865 | } | |
12866 | static inline __attribute__((always_inline)) int crypto_comp_decompress(struct crypto_comp *tfm, | |
12867 | const u8 *src, unsigned int slen, | |
12868 | u8 *dst, unsigned int *dlen) | |
12869 | { | |
12870 | return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm), | |
12871 | src, slen, dst, dlen); | |
12872 | } | |
12873 | struct module; | |
12874 | struct rtattr; | |
12875 | struct seq_file; | |
12876 | struct crypto_type { | |
12877 | unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); | |
12878 | unsigned int (*extsize)(struct crypto_alg *alg); | |
12879 | int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); | |
12880 | int (*init_tfm)(struct crypto_tfm *tfm); | |
12881 | void (*show)(struct seq_file *m, struct crypto_alg *alg); | |
12882 | struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); | |
12883 | unsigned int type; | |
12884 | unsigned int maskclear; | |
12885 | unsigned int maskset; | |
12886 | unsigned int tfmsize; | |
12887 | }; | |
12888 | struct crypto_instance { | |
12889 | struct crypto_alg alg; | |
12890 | struct crypto_template *tmpl; | |
12891 | struct hlist_node list; | |
12892 | void *__ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long)))); | |
12893 | }; | |
12894 | struct crypto_template { | |
12895 | struct list_head list; | |
12896 | struct hlist_head instances; | |
12897 | struct module *module; | |
12898 | struct crypto_instance *(*alloc)(struct rtattr **tb); | |
12899 | void (*free)(struct crypto_instance *inst); | |
12900 | int (*create)(struct crypto_template *tmpl, struct rtattr **tb); | |
12901 | char name[64]; | |
12902 | }; | |
12903 | struct crypto_spawn { | |
12904 | struct list_head list; | |
12905 | struct crypto_alg *alg; | |
12906 | struct crypto_instance *inst; | |
12907 | const struct crypto_type *frontend; | |
12908 | u32 mask; | |
12909 | }; | |
12910 | struct crypto_queue { | |
12911 | struct list_head list; | |
12912 | struct list_head *backlog; | |
12913 | unsigned int qlen; | |
12914 | unsigned int max_qlen; | |
12915 | }; | |
12916 | struct scatter_walk { | |
12917 | struct scatterlist *sg; | |
12918 | unsigned int offset; | |
12919 | }; | |
12920 | struct blkcipher_walk { | |
12921 | union { | |
12922 | struct { | |
12923 | struct page *page; | |
12924 | unsigned long offset; | |
12925 | } phys; | |
12926 | struct { | |
12927 | u8 *page; | |
12928 | u8 *addr; | |
12929 | } virt; | |
12930 | } src, dst; | |
12931 | struct scatter_walk in; | |
12932 | unsigned int nbytes; | |
12933 | struct scatter_walk out; | |
12934 | unsigned int total; | |
12935 | void *page; | |
12936 | u8 *buffer; | |
12937 | u8 *iv; | |
12938 | int flags; | |
12939 | unsigned int blocksize; | |
12940 | }; | |
12941 | struct ablkcipher_walk { | |
12942 | struct { | |
12943 | struct page *page; | |
12944 | unsigned int offset; | |
12945 | } src, dst; | |
12946 | struct scatter_walk in; | |
12947 | unsigned int nbytes; | |
12948 | struct scatter_walk out; | |
12949 | unsigned int total; | |
12950 | struct list_head buffers; | |
12951 | u8 *iv_buffer; | |
12952 | u8 *iv; | |
12953 | int flags; | |
12954 | unsigned int blocksize; | |
12955 | }; | |
12956 | extern const struct crypto_type crypto_ablkcipher_type; | |
12957 | extern const struct crypto_type crypto_aead_type; | |
12958 | extern const struct crypto_type crypto_blkcipher_type; | |
12959 | void crypto_mod_put(struct crypto_alg *alg); | |
12960 | int crypto_register_template(struct crypto_template *tmpl); | |
12961 | void crypto_unregister_template(struct crypto_template *tmpl); | |
12962 | struct crypto_template *crypto_lookup_template(const char *name); | |
12963 | int crypto_register_instance(struct crypto_template *tmpl, | |
12964 | struct crypto_instance *inst); | |
12965 | int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, | |
12966 | struct crypto_instance *inst, u32 mask); | |
12967 | int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, | |
12968 | struct crypto_instance *inst, | |
12969 | const struct crypto_type *frontend); | |
12970 | void crypto_drop_spawn(struct crypto_spawn *spawn); | |
12971 | struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, | |
12972 | u32 mask); | |
12973 | void *crypto_spawn_tfm2(struct crypto_spawn *spawn); | |
12974 | static inline __attribute__((always_inline)) void crypto_set_spawn(struct crypto_spawn *spawn, | |
12975 | struct crypto_instance *inst) | |
12976 | { | |
12977 | spawn->inst = inst; | |
12978 | } | |
12979 | struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); | |
12980 | int crypto_check_attr_type(struct rtattr **tb, u32 type); | |
12981 | const char *crypto_attr_alg_name(struct rtattr *rta); | |
12982 | struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, | |
12983 | const struct crypto_type *frontend, | |
12984 | u32 type, u32 mask); | |
12985 | static inline __attribute__((always_inline)) struct crypto_alg *crypto_attr_alg(struct rtattr *rta, | |
12986 | u32 type, u32 mask) | |
12987 | { | |
12988 | return crypto_attr_alg2(rta, ((void *)0), type, mask); | |
12989 | } | |
12990 | int crypto_attr_u32(struct rtattr *rta, u32 *num); | |
12991 | void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, | |
12992 | unsigned int head); | |
12993 | struct crypto_instance *crypto_alloc_instance(const char *name, | |
12994 | struct crypto_alg *alg); | |
12995 | void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); | |
12996 | int crypto_enqueue_request(struct crypto_queue *queue, | |
12997 | struct crypto_async_request *request); | |
12998 | void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset); | |
12999 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); | |
13000 | int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); | |
13001 | void crypto_inc(u8 *a, unsigned int size); | |
13002 | void crypto_xor(u8 *dst, const u8 *src, unsigned int size); | |
13003 | int blkcipher_walk_done(struct blkcipher_desc *desc, | |
13004 | struct blkcipher_walk *walk, int err); | |
13005 | int blkcipher_walk_virt(struct blkcipher_desc *desc, | |
13006 | struct blkcipher_walk *walk); | |
13007 | int blkcipher_walk_phys(struct blkcipher_desc *desc, | |
13008 | struct blkcipher_walk *walk); | |
13009 | int blkcipher_walk_virt_block(struct blkcipher_desc *desc, | |
13010 | struct blkcipher_walk *walk, | |
13011 | unsigned int blocksize); | |
13012 | int ablkcipher_walk_done(struct ablkcipher_request *req, | |
13013 | struct ablkcipher_walk *walk, int err); | |
13014 | int ablkcipher_walk_phys(struct ablkcipher_request *req, | |
13015 | struct ablkcipher_walk *walk); | |
13016 | void __ablkcipher_walk_complete(struct ablkcipher_walk *walk); | |
13017 | static inline __attribute__((always_inline)) void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) | |
13018 | { | |
13019 | return ((typeof(crypto_tfm_ctx(tfm)))(((((unsigned long)(crypto_tfm_ctx(tfm)))) + ((typeof(((unsigned long)(crypto_tfm_ctx(tfm)))))(((crypto_tfm_alg_alignmask(tfm) + 1))) - 1)) & ~((typeof(((unsigned long)(crypto_tfm_ctx(tfm)))))(((crypto_tfm_alg_alignmask(tfm) + 1))) - 1))) | |
13020 | ; | |
13021 | } | |
13022 | static inline __attribute__((always_inline)) struct crypto_instance *crypto_tfm_alg_instance( | |
13023 | struct crypto_tfm *tfm) | |
13024 | { | |
13025 | return ({ const typeof( ((struct crypto_instance *)0)->alg ) *__mptr = (tfm->__crt_alg); (struct crypto_instance *)( (char *)__mptr - __builtin_offsetof(struct crypto_instance,alg) );}); | |
13026 | } | |
13027 | static inline __attribute__((always_inline)) void *crypto_instance_ctx(struct crypto_instance *inst) | |
13028 | { | |
13029 | return inst->__ctx; | |
13030 | } | |
13031 | static inline __attribute__((always_inline)) struct ablkcipher_alg *crypto_ablkcipher_alg( | |
13032 | struct crypto_ablkcipher *tfm) | |
13033 | { | |
13034 | return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_u.ablkcipher; | |
13035 | } | |
13036 | static inline __attribute__((always_inline)) void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm) | |
13037 | { | |
13038 | return crypto_tfm_ctx(&tfm->base); | |
13039 | } | |
13040 | static inline __attribute__((always_inline)) void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm) | |
13041 | { | |
13042 | return crypto_tfm_ctx_aligned(&tfm->base); | |
13043 | } | |
13044 | static inline __attribute__((always_inline)) struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm) | |
13045 | { | |
13046 | return &crypto_aead_tfm(tfm)->__crt_alg->cra_u.aead; | |
13047 | } | |
13048 | static inline __attribute__((always_inline)) void *crypto_aead_ctx(struct crypto_aead *tfm) | |
13049 | { | |
13050 | return crypto_tfm_ctx(&tfm->base); | |
13051 | } | |
13052 | static inline __attribute__((always_inline)) struct crypto_instance *crypto_aead_alg_instance( | |
13053 | struct crypto_aead *aead) | |
13054 | { | |
13055 | return crypto_tfm_alg_instance(&aead->base); | |
13056 | } | |
13057 | static inline __attribute__((always_inline)) struct crypto_blkcipher *crypto_spawn_blkcipher( | |
13058 | struct crypto_spawn *spawn) | |
13059 | { | |
13060 | u32 type = 0x00000004; | |
13061 | u32 mask = 0x0000000f; | |
13062 | return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask)); | |
13063 | } | |
13064 | static inline __attribute__((always_inline)) void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm) | |
13065 | { | |
13066 | return crypto_tfm_ctx(&tfm->base); | |
13067 | } | |
13068 | static inline __attribute__((always_inline)) void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm) | |
13069 | { | |
13070 | return crypto_tfm_ctx_aligned(&tfm->base); | |
13071 | } | |
13072 | static inline __attribute__((always_inline)) struct crypto_cipher *crypto_spawn_cipher( | |
13073 | struct crypto_spawn *spawn) | |
13074 | { | |
13075 | u32 type = 0x00000001; | |
13076 | u32 mask = 0x0000000f; | |
13077 | return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask)); | |
13078 | } | |
13079 | static inline __attribute__((always_inline)) struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) | |
13080 | { | |
13081 | return &crypto_cipher_tfm(tfm)->__crt_alg->cra_u.cipher; | |
13082 | } | |
13083 | static inline __attribute__((always_inline)) struct crypto_hash *crypto_spawn_hash(struct crypto_spawn *spawn) | |
13084 | { | |
13085 | u32 type = 0x00000008; | |
13086 | u32 mask = 0x0000000e; | |
13087 | return __crypto_hash_cast(crypto_spawn_tfm(spawn, type, mask)); | |
13088 | } | |
13089 | static inline __attribute__((always_inline)) void *crypto_hash_ctx(struct crypto_hash *tfm) | |
13090 | { | |
13091 | return crypto_tfm_ctx(&tfm->base); | |
13092 | } | |
13093 | static inline __attribute__((always_inline)) void *crypto_hash_ctx_aligned(struct crypto_hash *tfm) | |
13094 | { | |
13095 | return crypto_tfm_ctx_aligned(&tfm->base); | |
13096 | } | |
13097 | static inline __attribute__((always_inline)) void blkcipher_walk_init(struct blkcipher_walk *walk, | |
13098 | struct scatterlist *dst, | |
13099 | struct scatterlist *src, | |
13100 | unsigned int nbytes) | |
13101 | { | |
13102 | walk->in.sg = src; | |
13103 | walk->out.sg = dst; | |
13104 | walk->total = nbytes; | |
13105 | } | |
13106 | static inline __attribute__((always_inline)) void ablkcipher_walk_init(struct ablkcipher_walk *walk, | |
13107 | struct scatterlist *dst, | |
13108 | struct scatterlist *src, | |
13109 | unsigned int nbytes) | |
13110 | { | |
13111 | walk->in.sg = src; | |
13112 | walk->out.sg = dst; | |
13113 | walk->total = nbytes; | |
13114 | INIT_LIST_HEAD(&walk->buffers); | |
13115 | } | |
13116 | static inline __attribute__((always_inline)) void ablkcipher_walk_complete(struct ablkcipher_walk *walk) | |
13117 | { | |
13118 | if (__builtin_constant_p((((__builtin_constant_p(!list_empty(&walk->buffers)) ? !!(!list_empty(&walk->buffers)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/algapi.h", .line = 322, }; ______r = __builtin_expect(!!(!list_empty(&walk->buffers)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(!list_empty(&walk->buffers)) ? !!(!list_empty(&walk->buffers)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/algapi.h", .line = 322, }; ______r = __builtin_expect(!!(!list_empty(&walk->buffers)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/algapi.h", .line = 322, }; ______r = !!(((__builtin_constant_p(!list_empty(&walk->buffers)) ? !!(!list_empty(&walk->buffers)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/algapi.h", .line = 322, }; ______r = __builtin_expect(!!(!list_empty(&walk->buffers)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
13119 | __ablkcipher_walk_complete(walk); | |
13120 | } | |
13121 | static inline __attribute__((always_inline)) struct crypto_async_request *crypto_get_backlog( | |
13122 | struct crypto_queue *queue) | |
13123 | { | |
13124 | return queue->backlog == &queue->list ? ((void *)0) : | |
13125 | ({ const typeof( ((struct crypto_async_request *)0)->list ) *__mptr = (queue->backlog); (struct crypto_async_request *)( (char *)__mptr - __builtin_offsetof(struct crypto_async_request,list) );}); | |
13126 | } | |
13127 | static inline __attribute__((always_inline)) int ablkcipher_enqueue_request(struct crypto_queue *queue, | |
13128 | struct ablkcipher_request *request) | |
13129 | { | |
13130 | return crypto_enqueue_request(queue, &request->base); | |
13131 | } | |
13132 | static inline __attribute__((always_inline)) struct ablkcipher_request *ablkcipher_dequeue_request( | |
13133 | struct crypto_queue *queue) | |
13134 | { | |
13135 | return ablkcipher_request_cast(crypto_dequeue_request(queue)); | |
13136 | } | |
13137 | static inline __attribute__((always_inline)) void *ablkcipher_request_ctx(struct ablkcipher_request *req) | |
13138 | { | |
13139 | return req->__ctx; | |
13140 | } | |
13141 | static inline __attribute__((always_inline)) int ablkcipher_tfm_in_queue(struct crypto_queue *queue, | |
13142 | struct crypto_ablkcipher *tfm) | |
13143 | { | |
13144 | return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm)); | |
13145 | } | |
13146 | static inline __attribute__((always_inline)) void *aead_request_ctx(struct aead_request *req) | |
13147 | { | |
13148 | return req->__ctx; | |
13149 | } | |
13150 | static inline __attribute__((always_inline)) void aead_request_complete(struct aead_request *req, int err) | |
13151 | { | |
13152 | req->base.complete(&req->base, err); | |
13153 | } | |
13154 | static inline __attribute__((always_inline)) u32 aead_request_flags(struct aead_request *req) | |
13155 | { | |
13156 | return req->base.flags; | |
13157 | } | |
13158 | static inline __attribute__((always_inline)) struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, | |
13159 | u32 type, u32 mask) | |
13160 | { | |
13161 | return crypto_attr_alg(tb[1], type, mask); | |
13162 | } | |
13163 | static inline __attribute__((always_inline)) int crypto_requires_sync(u32 type, u32 mask) | |
13164 | { | |
13165 | return (type ^ 0x00000080) & mask & 0x00000080; | |
13166 | } | |
13167 | struct crypto_aes_ctx { | |
13168 | u32 key_enc[((15 * 16) / sizeof(u32))]; | |
13169 | u32 key_dec[((15 * 16) / sizeof(u32))]; | |
13170 | u32 key_length; | |
13171 | }; | |
13172 | extern const u32 crypto_ft_tab[4][256]; | |
13173 | extern const u32 crypto_fl_tab[4][256]; | |
13174 | extern const u32 crypto_it_tab[4][256]; | |
13175 | extern const u32 crypto_il_tab[4][256]; | |
13176 | int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |
13177 | unsigned int key_len); | |
13178 | int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key, | |
13179 | unsigned int key_len); | |
13180 | struct crypto_ahash; | |
13181 | struct hash_alg_common { | |
13182 | unsigned int digestsize; | |
13183 | unsigned int statesize; | |
13184 | struct crypto_alg base; | |
13185 | }; | |
13186 | struct ahash_request { | |
13187 | struct crypto_async_request base; | |
13188 | unsigned int nbytes; | |
13189 | struct scatterlist *src; | |
13190 | u8 *result; | |
13191 | void *priv; | |
13192 | void *__ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long)))); | |
13193 | }; | |
13194 | struct ahash_alg { | |
13195 | int (*init)(struct ahash_request *req); | |
13196 | int (*update)(struct ahash_request *req); | |
13197 | int (*final)(struct ahash_request *req); | |
13198 | int (*finup)(struct ahash_request *req); | |
13199 | int (*digest)(struct ahash_request *req); | |
13200 | int (*export)(struct ahash_request *req, void *out); | |
13201 | int (*import)(struct ahash_request *req, const void *in); | |
13202 | int (*setkey)(struct crypto_ahash *tfm, const u8 *key, | |
13203 | unsigned int keylen); | |
13204 | struct hash_alg_common halg; | |
13205 | }; | |
13206 | struct shash_desc { | |
13207 | struct crypto_shash *tfm; | |
13208 | u32 flags; | |
13209 | void *__ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long)))); | |
13210 | }; | |
13211 | struct shash_alg { | |
13212 | int (*init)(struct shash_desc *desc); | |
13213 | int (*update)(struct shash_desc *desc, const u8 *data, | |
13214 | unsigned int len); | |
13215 | int (*final)(struct shash_desc *desc, u8 *out); | |
13216 | int (*finup)(struct shash_desc *desc, const u8 *data, | |
13217 | unsigned int len, u8 *out); | |
13218 | int (*digest)(struct shash_desc *desc, const u8 *data, | |
13219 | unsigned int len, u8 *out); | |
13220 | int (*export)(struct shash_desc *desc, void *out); | |
13221 | int (*import)(struct shash_desc *desc, const void *in); | |
13222 | int (*setkey)(struct crypto_shash *tfm, const u8 *key, | |
13223 | unsigned int keylen); | |
13224 | unsigned int descsize; | |
13225 | unsigned int digestsize | |
13226 | __attribute__ ((aligned(__alignof__(struct hash_alg_common)))); | |
13227 | unsigned int statesize; | |
13228 | struct crypto_alg base; | |
13229 | }; | |
13230 | struct crypto_ahash { | |
13231 | int (*init)(struct ahash_request *req); | |
13232 | int (*update)(struct ahash_request *req); | |
13233 | int (*final)(struct ahash_request *req); | |
13234 | int (*finup)(struct ahash_request *req); | |
13235 | int (*digest)(struct ahash_request *req); | |
13236 | int (*export)(struct ahash_request *req, void *out); | |
13237 | int (*import)(struct ahash_request *req, const void *in); | |
13238 | int (*setkey)(struct crypto_ahash *tfm, const u8 *key, | |
13239 | unsigned int keylen); | |
13240 | unsigned int reqsize; | |
13241 | struct crypto_tfm base; | |
13242 | }; | |
13243 | struct crypto_shash { | |
13244 | unsigned int descsize; | |
13245 | struct crypto_tfm base; | |
13246 | }; | |
13247 | static inline __attribute__((always_inline)) struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) | |
13248 | { | |
13249 | return ({ const typeof( ((struct crypto_ahash *)0)->base ) *__mptr = (tfm); (struct crypto_ahash *)( (char *)__mptr - __builtin_offsetof(struct crypto_ahash,base) );}); | |
13250 | } | |
13251 | struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, | |
13252 | u32 mask); | |
13253 | static inline __attribute__((always_inline)) struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) | |
13254 | { | |
13255 | return &tfm->base; | |
13256 | } | |
13257 | static inline __attribute__((always_inline)) void crypto_free_ahash(struct crypto_ahash *tfm) | |
13258 | { | |
13259 | crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm)); | |
13260 | } | |
13261 | static inline __attribute__((always_inline)) unsigned int crypto_ahash_alignmask( | |
13262 | struct crypto_ahash *tfm) | |
13263 | { | |
13264 | return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm)); | |
13265 | } | |
13266 | static inline __attribute__((always_inline)) struct hash_alg_common *__crypto_hash_alg_common( | |
13267 | struct crypto_alg *alg) | |
13268 | { | |
13269 | return ({ const typeof( ((struct hash_alg_common *)0)->base ) *__mptr = (alg); (struct hash_alg_common *)( (char *)__mptr - __builtin_offsetof(struct hash_alg_common,base) );}); | |
13270 | } | |
13271 | static inline __attribute__((always_inline)) struct hash_alg_common *crypto_hash_alg_common( | |
13272 | struct crypto_ahash *tfm) | |
13273 | { | |
13274 | return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg); | |
13275 | } | |
13276 | static inline __attribute__((always_inline)) unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) | |
13277 | { | |
13278 | return crypto_hash_alg_common(tfm)->digestsize; | |
13279 | } | |
13280 | static inline __attribute__((always_inline)) unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm) | |
13281 | { | |
13282 | return crypto_hash_alg_common(tfm)->statesize; | |
13283 | } | |
13284 | static inline __attribute__((always_inline)) u32 crypto_ahash_get_flags(struct crypto_ahash *tfm) | |
13285 | { | |
13286 | return crypto_tfm_get_flags(crypto_ahash_tfm(tfm)); | |
13287 | } | |
13288 | static inline __attribute__((always_inline)) void crypto_ahash_set_flags(struct crypto_ahash *tfm, u32 flags) | |
13289 | { | |
13290 | crypto_tfm_set_flags(crypto_ahash_tfm(tfm), flags); | |
13291 | } | |
13292 | static inline __attribute__((always_inline)) void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags) | |
13293 | { | |
13294 | crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags); | |
13295 | } | |
13296 | static inline __attribute__((always_inline)) struct crypto_ahash *crypto_ahash_reqtfm( | |
13297 | struct ahash_request *req) | |
13298 | { | |
13299 | return __crypto_ahash_cast(req->base.tfm); | |
13300 | } | |
13301 | static inline __attribute__((always_inline)) unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) | |
13302 | { | |
13303 | return tfm->reqsize; | |
13304 | } | |
13305 | static inline __attribute__((always_inline)) void *ahash_request_ctx(struct ahash_request *req) | |
13306 | { | |
13307 | return req->__ctx; | |
13308 | } | |
13309 | int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, | |
13310 | unsigned int keylen); | |
13311 | int crypto_ahash_finup(struct ahash_request *req); | |
13312 | int crypto_ahash_final(struct ahash_request *req); | |
13313 | int crypto_ahash_digest(struct ahash_request *req); | |
13314 | static inline __attribute__((always_inline)) int crypto_ahash_export(struct ahash_request *req, void *out) | |
13315 | { | |
13316 | return crypto_ahash_reqtfm(req)->export(req, out); | |
13317 | } | |
13318 | static inline __attribute__((always_inline)) int crypto_ahash_import(struct ahash_request *req, const void *in) | |
13319 | { | |
13320 | return crypto_ahash_reqtfm(req)->import(req, in); | |
13321 | } | |
13322 | static inline __attribute__((always_inline)) int crypto_ahash_init(struct ahash_request *req) | |
13323 | { | |
13324 | return crypto_ahash_reqtfm(req)->init(req); | |
13325 | } | |
13326 | static inline __attribute__((always_inline)) int crypto_ahash_update(struct ahash_request *req) | |
13327 | { | |
13328 | return crypto_ahash_reqtfm(req)->update(req); | |
13329 | } | |
13330 | static inline __attribute__((always_inline)) void ahash_request_set_tfm(struct ahash_request *req, | |
13331 | struct crypto_ahash *tfm) | |
13332 | { | |
13333 | req->base.tfm = crypto_ahash_tfm(tfm); | |
13334 | } | |
13335 | static inline __attribute__((always_inline)) struct ahash_request *ahash_request_alloc( | |
13336 | struct crypto_ahash *tfm, gfp_t gfp) | |
13337 | { | |
13338 | struct ahash_request *req; | |
13339 | req = kmalloc(sizeof(struct ahash_request) + | |
13340 | crypto_ahash_reqsize(tfm), gfp); | |
13341 | if (__builtin_constant_p((((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/hash.h", .line = 222, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/hash.h", .line = 222, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/hash.h", .line = 222, }; ______r = !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/hash.h", .line = 222, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
13342 | ahash_request_set_tfm(req, tfm); | |
13343 | return req; | |
13344 | } | |
13345 | static inline __attribute__((always_inline)) void ahash_request_free(struct ahash_request *req) | |
13346 | { | |
13347 | kzfree(req); | |
13348 | } | |
13349 | static inline __attribute__((always_inline)) struct ahash_request *ahash_request_cast( | |
13350 | struct crypto_async_request *req) | |
13351 | { | |
13352 | return ({ const typeof( ((struct ahash_request *)0)->base ) *__mptr = (req); (struct ahash_request *)( (char *)__mptr - __builtin_offsetof(struct ahash_request,base) );}); | |
13353 | } | |
13354 | static inline __attribute__((always_inline)) void ahash_request_set_callback(struct ahash_request *req, | |
13355 | u32 flags, | |
13356 | crypto_completion_t complete, | |
13357 | void *data) | |
13358 | { | |
13359 | req->base.complete = complete; | |
13360 | req->base.data = data; | |
13361 | req->base.flags = flags; | |
13362 | } | |
13363 | static inline __attribute__((always_inline)) void ahash_request_set_crypt(struct ahash_request *req, | |
13364 | struct scatterlist *src, u8 *result, | |
13365 | unsigned int nbytes) | |
13366 | { | |
13367 | req->src = src; | |
13368 | req->nbytes = nbytes; | |
13369 | req->result = result; | |
13370 | } | |
13371 | struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, | |
13372 | u32 mask); | |
13373 | static inline __attribute__((always_inline)) struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm) | |
13374 | { | |
13375 | return &tfm->base; | |
13376 | } | |
13377 | static inline __attribute__((always_inline)) void crypto_free_shash(struct crypto_shash *tfm) | |
13378 | { | |
13379 | crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm)); | |
13380 | } | |
13381 | static inline __attribute__((always_inline)) unsigned int crypto_shash_alignmask( | |
13382 | struct crypto_shash *tfm) | |
13383 | { | |
13384 | return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm)); | |
13385 | } | |
13386 | static inline __attribute__((always_inline)) unsigned int crypto_shash_blocksize(struct crypto_shash *tfm) | |
13387 | { | |
13388 | return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm)); | |
13389 | } | |
13390 | static inline __attribute__((always_inline)) struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg) | |
13391 | { | |
13392 | return ({ const typeof( ((struct shash_alg *)0)->base ) *__mptr = (alg); (struct shash_alg *)( (char *)__mptr - __builtin_offsetof(struct shash_alg,base) );}); | |
13393 | } | |
13394 | static inline __attribute__((always_inline)) struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm) | |
13395 | { | |
13396 | return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg); | |
13397 | } | |
13398 | static inline __attribute__((always_inline)) unsigned int crypto_shash_digestsize(struct crypto_shash *tfm) | |
13399 | { | |
13400 | return crypto_shash_alg(tfm)->digestsize; | |
13401 | } | |
13402 | static inline __attribute__((always_inline)) unsigned int crypto_shash_statesize(struct crypto_shash *tfm) | |
13403 | { | |
13404 | return crypto_shash_alg(tfm)->statesize; | |
13405 | } | |
13406 | static inline __attribute__((always_inline)) u32 crypto_shash_get_flags(struct crypto_shash *tfm) | |
13407 | { | |
13408 | return crypto_tfm_get_flags(crypto_shash_tfm(tfm)); | |
13409 | } | |
13410 | static inline __attribute__((always_inline)) void crypto_shash_set_flags(struct crypto_shash *tfm, u32 flags) | |
13411 | { | |
13412 | crypto_tfm_set_flags(crypto_shash_tfm(tfm), flags); | |
13413 | } | |
13414 | static inline __attribute__((always_inline)) void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags) | |
13415 | { | |
13416 | crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags); | |
13417 | } | |
13418 | static inline __attribute__((always_inline)) unsigned int crypto_shash_descsize(struct crypto_shash *tfm) | |
13419 | { | |
13420 | return tfm->descsize; | |
13421 | } | |
13422 | static inline __attribute__((always_inline)) void *shash_desc_ctx(struct shash_desc *desc) | |
13423 | { | |
13424 | return desc->__ctx; | |
13425 | } | |
13426 | int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, | |
13427 | unsigned int keylen); | |
13428 | int crypto_shash_digest(struct shash_desc *desc, const u8 *data, | |
13429 | unsigned int len, u8 *out); | |
13430 | static inline __attribute__((always_inline)) int crypto_shash_export(struct shash_desc *desc, void *out) | |
13431 | { | |
13432 | return crypto_shash_alg(desc->tfm)->export(desc, out); | |
13433 | } | |
13434 | static inline __attribute__((always_inline)) int crypto_shash_import(struct shash_desc *desc, const void *in) | |
13435 | { | |
13436 | return crypto_shash_alg(desc->tfm)->import(desc, in); | |
13437 | } | |
13438 | static inline __attribute__((always_inline)) int crypto_shash_init(struct shash_desc *desc) | |
13439 | { | |
13440 | return crypto_shash_alg(desc->tfm)->init(desc); | |
13441 | } | |
13442 | int crypto_shash_update(struct shash_desc *desc, const u8 *data, | |
13443 | unsigned int len); | |
13444 | int crypto_shash_final(struct shash_desc *desc, u8 *out); | |
13445 | int crypto_shash_finup(struct shash_desc *desc, const u8 *data, | |
13446 | unsigned int len, u8 *out); | |
13447 | struct cryptd_ablkcipher { | |
13448 | struct crypto_ablkcipher base; | |
13449 | }; | |
13450 | static inline __attribute__((always_inline)) struct cryptd_ablkcipher *__cryptd_ablkcipher_cast( | |
13451 | struct crypto_ablkcipher *tfm) | |
13452 | { | |
13453 | return (struct cryptd_ablkcipher *)tfm; | |
13454 | } | |
13455 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, | |
13456 | u32 type, u32 mask); | |
13457 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); | |
13458 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); | |
13459 | struct cryptd_ahash { | |
13460 | struct crypto_ahash base; | |
13461 | }; | |
13462 | static inline __attribute__((always_inline)) struct cryptd_ahash *__cryptd_ahash_cast( | |
13463 | struct crypto_ahash *tfm) | |
13464 | { | |
13465 | return (struct cryptd_ahash *)tfm; | |
13466 | } | |
13467 | struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, | |
13468 | u32 type, u32 mask); | |
13469 | struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); | |
13470 | struct shash_desc *cryptd_shash_desc(struct ahash_request *req); | |
13471 | void cryptd_free_ahash(struct cryptd_ahash *tfm); | |
13472 | struct cryptd_aead { | |
13473 | struct crypto_aead base; | |
13474 | }; | |
13475 | static inline __attribute__((always_inline)) struct cryptd_aead *__cryptd_aead_cast( | |
13476 | struct crypto_aead *tfm) | |
13477 | { | |
13478 | return (struct cryptd_aead *)tfm; | |
13479 | } | |
13480 | struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, | |
13481 | u32 type, u32 mask); | |
13482 | struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm); | |
13483 | void cryptd_free_aead(struct cryptd_aead *tfm); | |
13484 | struct sched_param { | |
13485 | int sched_priority; | |
13486 | }; | |
13487 | struct task_struct; | |
13488 | typedef struct __user_cap_header_struct { | |
13489 | __u32 version; | |
13490 | int pid; | |
13491 | } *cap_user_header_t; | |
13492 | typedef struct __user_cap_data_struct { | |
13493 | __u32 effective; | |
13494 | __u32 permitted; | |
13495 | __u32 inheritable; | |
13496 | } *cap_user_data_t; | |
13497 | struct vfs_cap_data { | |
13498 | __le32 magic_etc; | |
13499 | struct { | |
13500 | __le32 permitted; | |
13501 | __le32 inheritable; | |
13502 | } data[2]; | |
13503 | }; | |
13504 | extern int file_caps_enabled; | |
13505 | typedef struct kernel_cap_struct { | |
13506 | __u32 cap[2]; | |
13507 | } kernel_cap_t; | |
13508 | struct cpu_vfs_cap_data { | |
13509 | __u32 magic_etc; | |
13510 | kernel_cap_t permitted; | |
13511 | kernel_cap_t inheritable; | |
13512 | }; | |
13513 | struct dentry; | |
13514 | struct user_namespace; | |
13515 | struct user_namespace *current_user_ns(void); | |
13516 | extern const kernel_cap_t __cap_empty_set; | |
13517 | extern const kernel_cap_t __cap_full_set; | |
13518 | extern const kernel_cap_t __cap_init_eff_set; | |
13519 | static inline __attribute__((always_inline)) kernel_cap_t cap_combine(const kernel_cap_t a, | |
13520 | const kernel_cap_t b) | |
13521 | { | |
13522 | kernel_cap_t dest; | |
13523 | do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] | b.cap[__capi]; } } while (0); | |
13524 | return dest; | |
13525 | } | |
13526 | static inline __attribute__((always_inline)) kernel_cap_t cap_intersect(const kernel_cap_t a, | |
13527 | const kernel_cap_t b) | |
13528 | { | |
13529 | kernel_cap_t dest; | |
13530 | do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] & b.cap[__capi]; } } while (0); | |
13531 | return dest; | |
13532 | } | |
13533 | static inline __attribute__((always_inline)) kernel_cap_t cap_drop(const kernel_cap_t a, | |
13534 | const kernel_cap_t drop) | |
13535 | { | |
13536 | kernel_cap_t dest; | |
13537 | do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] &~ drop.cap[__capi]; } } while (0); | |
13538 | return dest; | |
13539 | } | |
13540 | static inline __attribute__((always_inline)) kernel_cap_t cap_invert(const kernel_cap_t c) | |
13541 | { | |
13542 | kernel_cap_t dest; | |
13543 | do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = ~ c.cap[__capi]; } } while (0); | |
13544 | return dest; | |
13545 | } | |
13546 | static inline __attribute__((always_inline)) int cap_isclear(const kernel_cap_t a) | |
13547 | { | |
13548 | unsigned __capi; | |
13549 | for (__capi = 0; __capi < 2; ++__capi) { | |
13550 | if (__builtin_constant_p(((a.cap[__capi] != 0))) ? !!((a.cap[__capi] != 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/capability.h", .line = 486, }; ______r = !!((a.cap[__capi] != 0)); ______f.miss_hit[______r]++; ______r; })) | |
13551 | return 0; | |
13552 | } | |
13553 | return 1; | |
13554 | } | |
13555 | static inline __attribute__((always_inline)) int cap_issubset(const kernel_cap_t a, const kernel_cap_t set) | |
13556 | { | |
13557 | kernel_cap_t dest; | |
13558 | dest = cap_drop(a, set); | |
13559 | return cap_isclear(dest); | |
13560 | } | |
13561 | static inline __attribute__((always_inline)) int cap_is_fs_cap(int cap) | |
13562 | { | |
13563 | const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } }); | |
13564 | return !!((1 << ((cap) & 31)) & __cap_fs_set.cap[((cap) >> 5)]); | |
13565 | } | |
13566 | static inline __attribute__((always_inline)) kernel_cap_t cap_drop_fs_set(const kernel_cap_t a) | |
13567 | { | |
13568 | const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } }); | |
13569 | return cap_drop(a, __cap_fs_set); | |
13570 | } | |
13571 | static inline __attribute__((always_inline)) kernel_cap_t cap_raise_fs_set(const kernel_cap_t a, | |
13572 | const kernel_cap_t permitted) | |
13573 | { | |
13574 | const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } }); | |
13575 | return cap_combine(a, | |
13576 | cap_intersect(permitted, __cap_fs_set)); | |
13577 | } | |
13578 | static inline __attribute__((always_inline)) kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a) | |
13579 | { | |
13580 | const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((24) & 31)), ((1 << ((32) & 31))) } }); | |
13581 | return cap_drop(a, __cap_fs_set); | |
13582 | } | |
13583 | static inline __attribute__((always_inline)) kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a, | |
13584 | const kernel_cap_t permitted) | |
13585 | { | |
13586 | const kernel_cap_t __cap_nfsd_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((24) & 31)), ((1 << ((32) & 31))) } }); | |
13587 | return cap_combine(a, | |
13588 | cap_intersect(permitted, __cap_nfsd_set)); | |
13589 | } | |
13590 | extern bool has_capability(struct task_struct *t, int cap); | |
13591 | extern bool has_ns_capability(struct task_struct *t, | |
13592 | struct user_namespace *ns, int cap); | |
13593 | extern bool has_capability_noaudit(struct task_struct *t, int cap); | |
13594 | extern bool capable(int cap); | |
13595 | extern bool ns_capable(struct user_namespace *ns, int cap); | |
13596 | extern bool task_ns_capable(struct task_struct *t, int cap); | |
13597 | extern bool nsown_capable(int cap); | |
13598 | extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); | |
13599 | struct rb_node | |
13600 | { | |
13601 | unsigned long rb_parent_color; | |
13602 | struct rb_node *rb_right; | |
13603 | struct rb_node *rb_left; | |
13604 | } __attribute__((aligned(sizeof(long)))); | |
13605 | struct rb_root | |
13606 | { | |
13607 | struct rb_node *rb_node; | |
13608 | }; | |
13609 | static inline __attribute__((always_inline)) void rb_set_parent(struct rb_node *rb, struct rb_node *p) | |
13610 | { | |
13611 | rb->rb_parent_color = (rb->rb_parent_color & 3) | (unsigned long)p; | |
13612 | } | |
13613 | static inline __attribute__((always_inline)) void rb_set_color(struct rb_node *rb, int color) | |
13614 | { | |
13615 | rb->rb_parent_color = (rb->rb_parent_color & ~1) | color; | |
13616 | } | |
13617 | static inline __attribute__((always_inline)) void rb_init_node(struct rb_node *rb) | |
13618 | { | |
13619 | rb->rb_parent_color = 0; | |
13620 | rb->rb_right = ((void *)0); | |
13621 | rb->rb_left = ((void *)0); | |
13622 | (rb_set_parent(rb, rb)); | |
13623 | } | |
13624 | extern void rb_insert_color(struct rb_node *, struct rb_root *); | |
13625 | extern void rb_erase(struct rb_node *, struct rb_root *); | |
13626 | typedef void (*rb_augment_f)(struct rb_node *node, void *data); | |
13627 | extern void rb_augment_insert(struct rb_node *node, | |
13628 | rb_augment_f func, void *data); | |
13629 | extern struct rb_node *rb_augment_erase_begin(struct rb_node *node); | |
13630 | extern void rb_augment_erase_end(struct rb_node *node, | |
13631 | rb_augment_f func, void *data); | |
13632 | extern struct rb_node *rb_next(const struct rb_node *); | |
13633 | extern struct rb_node *rb_prev(const struct rb_node *); | |
13634 | extern struct rb_node *rb_first(const struct rb_root *); | |
13635 | extern struct rb_node *rb_last(const struct rb_root *); | |
13636 | extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, | |
13637 | struct rb_root *root); | |
13638 | static inline __attribute__((always_inline)) void rb_link_node(struct rb_node * node, struct rb_node * parent, | |
13639 | struct rb_node ** rb_link) | |
13640 | { | |
13641 | node->rb_parent_color = (unsigned long )parent; | |
13642 | node->rb_left = node->rb_right = ((void *)0); | |
13643 | *rb_link = node; | |
13644 | } | |
13645 | struct raw_prio_tree_node { | |
13646 | struct prio_tree_node *left; | |
13647 | struct prio_tree_node *right; | |
13648 | struct prio_tree_node *parent; | |
13649 | }; | |
13650 | struct prio_tree_node { | |
13651 | struct prio_tree_node *left; | |
13652 | struct prio_tree_node *right; | |
13653 | struct prio_tree_node *parent; | |
13654 | unsigned long start; | |
13655 | unsigned long last; | |
13656 | }; | |
13657 | struct prio_tree_root { | |
13658 | struct prio_tree_node *prio_tree_node; | |
13659 | unsigned short index_bits; | |
13660 | unsigned short raw; | |
13661 | }; | |
13662 | struct prio_tree_iter { | |
13663 | struct prio_tree_node *cur; | |
13664 | unsigned long mask; | |
13665 | unsigned long value; | |
13666 | int size_level; | |
13667 | struct prio_tree_root *root; | |
13668 | unsigned long r_index; | |
13669 | unsigned long h_index; | |
13670 | }; | |
13671 | static inline __attribute__((always_inline)) void prio_tree_iter_init(struct prio_tree_iter *iter, | |
13672 | struct prio_tree_root *root, unsigned long r_index, unsigned long h_index) | |
13673 | { | |
13674 | iter->root = root; | |
13675 | iter->r_index = r_index; | |
13676 | iter->h_index = h_index; | |
13677 | iter->cur = ((void *)0); | |
13678 | } | |
13679 | static inline __attribute__((always_inline)) int prio_tree_empty(const struct prio_tree_root *root) | |
13680 | { | |
13681 | return root->prio_tree_node == ((void *)0); | |
13682 | } | |
13683 | static inline __attribute__((always_inline)) int prio_tree_root(const struct prio_tree_node *node) | |
13684 | { | |
13685 | return node->parent == node; | |
13686 | } | |
13687 | static inline __attribute__((always_inline)) int prio_tree_left_empty(const struct prio_tree_node *node) | |
13688 | { | |
13689 | return node->left == node; | |
13690 | } | |
13691 | static inline __attribute__((always_inline)) int prio_tree_right_empty(const struct prio_tree_node *node) | |
13692 | { | |
13693 | return node->right == node; | |
13694 | } | |
13695 | struct prio_tree_node *prio_tree_replace(struct prio_tree_root *root, | |
13696 | struct prio_tree_node *old, struct prio_tree_node *node); | |
13697 | struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root, | |
13698 | struct prio_tree_node *node); | |
13699 | void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node); | |
13700 | struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter); | |
13701 | enum page_debug_flags { | |
13702 | PAGE_DEBUG_FLAG_POISON, | |
13703 | }; | |
13704 | struct address_space; | |
13705 | struct page { | |
13706 | unsigned long flags; | |
13707 | atomic_t _count; | |
13708 | union { | |
13709 | atomic_t _mapcount; | |
13710 | struct { | |
13711 | u16 inuse; | |
13712 | u16 objects; | |
13713 | }; | |
13714 | }; | |
13715 | union { | |
13716 | struct { | |
13717 | unsigned long private; | |
13718 | struct address_space *mapping; | |
13719 | }; | |
13720 | struct kmem_cache *slab; | |
13721 | struct page *first_page; | |
13722 | }; | |
13723 | union { | |
13724 | unsigned long index; | |
13725 | void *freelist; | |
13726 | }; | |
13727 | struct list_head lru; | |
13728 | }; | |
13729 | typedef unsigned long vm_flags_t; | |
13730 | struct vm_region { | |
13731 | struct rb_node vm_rb; | |
13732 | vm_flags_t vm_flags; | |
13733 | unsigned long vm_start; | |
13734 | unsigned long vm_end; | |
13735 | unsigned long vm_top; | |
13736 | unsigned long vm_pgoff; | |
13737 | struct file *vm_file; | |
13738 | int vm_usage; | |
13739 | bool vm_icache_flushed : 1; | |
13740 | }; | |
13741 | struct vm_area_struct { | |
13742 | struct mm_struct * vm_mm; | |
13743 | unsigned long vm_start; | |
13744 | unsigned long vm_end; | |
13745 | struct vm_area_struct *vm_next, *vm_prev; | |
13746 | pgprot_t vm_page_prot; | |
13747 | unsigned long vm_flags; | |
13748 | struct rb_node vm_rb; | |
13749 | union { | |
13750 | struct { | |
13751 | struct list_head list; | |
13752 | void *parent; | |
13753 | struct vm_area_struct *head; | |
13754 | } vm_set; | |
13755 | struct raw_prio_tree_node prio_tree_node; | |
13756 | } shared; | |
13757 | struct list_head anon_vma_chain; | |
13758 | struct anon_vma *anon_vma; | |
13759 | const struct vm_operations_struct *vm_ops; | |
13760 | unsigned long vm_pgoff; | |
13761 | struct file * vm_file; | |
13762 | void * vm_private_data; | |
13763 | }; | |
13764 | struct core_thread { | |
13765 | struct task_struct *task; | |
13766 | struct core_thread *next; | |
13767 | }; | |
13768 | struct core_state { | |
13769 | atomic_t nr_threads; | |
13770 | struct core_thread dumper; | |
13771 | struct completion startup; | |
13772 | }; | |
13773 | enum { | |
13774 | MM_FILEPAGES, | |
13775 | MM_ANONPAGES, | |
13776 | MM_SWAPENTS, | |
13777 | NR_MM_COUNTERS | |
13778 | }; | |
13779 | struct mm_rss_stat { | |
13780 | atomic_long_t count[NR_MM_COUNTERS]; | |
13781 | }; | |
13782 | struct mm_struct { | |
13783 | struct vm_area_struct * mmap; | |
13784 | struct rb_root mm_rb; | |
13785 | struct vm_area_struct * mmap_cache; | |
13786 | unsigned long (*get_unmapped_area) (struct file *filp, | |
13787 | unsigned long addr, unsigned long len, | |
13788 | unsigned long pgoff, unsigned long flags); | |
13789 | void (*unmap_area) (struct mm_struct *mm, unsigned long addr); | |
13790 | unsigned long mmap_base; | |
13791 | unsigned long task_size; | |
13792 | unsigned long cached_hole_size; | |
13793 | unsigned long free_area_cache; | |
13794 | pgd_t * pgd; | |
13795 | atomic_t mm_users; | |
13796 | atomic_t mm_count; | |
13797 | int map_count; | |
13798 | spinlock_t page_table_lock; | |
13799 | struct rw_semaphore mmap_sem; | |
13800 | struct list_head mmlist; | |
13801 | unsigned long hiwater_rss; | |
13802 | unsigned long hiwater_vm; | |
13803 | unsigned long total_vm, locked_vm, shared_vm, exec_vm; | |
13804 | unsigned long stack_vm, reserved_vm, def_flags, nr_ptes; | |
13805 | unsigned long start_code, end_code, start_data, end_data; | |
13806 | unsigned long start_brk, brk, start_stack; | |
13807 | unsigned long arg_start, arg_end, env_start, env_end; | |
13808 | unsigned long saved_auxv[(2*(2 + 19 + 1))]; | |
13809 | struct mm_rss_stat rss_stat; | |
13810 | struct linux_binfmt *binfmt; | |
13811 | cpumask_var_t cpu_vm_mask_var; | |
13812 | mm_context_t context; | |
13813 | unsigned int faultstamp; | |
13814 | unsigned int token_priority; | |
13815 | unsigned int last_interval; | |
13816 | atomic_t oom_disable_count; | |
13817 | unsigned long flags; | |
13818 | struct core_state *core_state; | |
13819 | spinlock_t ioctx_lock; | |
13820 | struct hlist_head ioctx_list; | |
13821 | struct file *exe_file; | |
13822 | unsigned long num_exe_file_vmas; | |
13823 | struct mmu_notifier_mm *mmu_notifier_mm; | |
13824 | pgtable_t pmd_huge_pte; | |
13825 | }; | |
13826 | static inline __attribute__((always_inline)) void mm_init_cpumask(struct mm_struct *mm) | |
13827 | { | |
13828 | } | |
13829 | static inline __attribute__((always_inline)) cpumask_t *mm_cpumask(struct mm_struct *mm) | |
13830 | { | |
13831 | return mm->cpu_vm_mask_var; | |
13832 | } | |
13833 | typedef unsigned long cputime_t; | |
13834 | typedef u64 cputime64_t; | |
13835 | struct ipc_perm | |
13836 | { | |
13837 | __kernel_key_t key; | |
13838 | __kernel_uid_t uid; | |
13839 | __kernel_gid_t gid; | |
13840 | __kernel_uid_t cuid; | |
13841 | __kernel_gid_t cgid; | |
13842 | __kernel_mode_t mode; | |
13843 | unsigned short seq; | |
13844 | }; | |
13845 | struct ipc64_perm { | |
13846 | __kernel_key_t key; | |
13847 | __kernel_uid32_t uid; | |
13848 | __kernel_gid32_t gid; | |
13849 | __kernel_uid32_t cuid; | |
13850 | __kernel_gid32_t cgid; | |
13851 | __kernel_mode_t mode; | |
13852 | unsigned char __pad1[4 - sizeof(__kernel_mode_t)]; | |
13853 | unsigned short seq; | |
13854 | unsigned short __pad2; | |
13855 | unsigned long __unused1; | |
13856 | unsigned long __unused2; | |
13857 | }; | |
13858 | struct ipc_kludge { | |
13859 | struct msgbuf *msgp; | |
13860 | long msgtyp; | |
13861 | }; | |
13862 | struct kern_ipc_perm | |
13863 | { | |
13864 | spinlock_t lock; | |
13865 | int deleted; | |
13866 | int id; | |
13867 | key_t key; | |
13868 | uid_t uid; | |
13869 | gid_t gid; | |
13870 | uid_t cuid; | |
13871 | gid_t cgid; | |
13872 | mode_t mode; | |
13873 | unsigned long seq; | |
13874 | void *security; | |
13875 | }; | |
13876 | struct semid_ds { | |
13877 | struct ipc_perm sem_perm; | |
13878 | __kernel_time_t sem_otime; | |
13879 | __kernel_time_t sem_ctime; | |
13880 | struct sem *sem_base; | |
13881 | struct sem_queue *sem_pending; | |
13882 | struct sem_queue **sem_pending_last; | |
13883 | struct sem_undo *undo; | |
13884 | unsigned short sem_nsems; | |
13885 | }; | |
13886 | struct semid64_ds { | |
13887 | struct ipc64_perm sem_perm; | |
13888 | __kernel_time_t sem_otime; | |
13889 | unsigned long __unused1; | |
13890 | __kernel_time_t sem_ctime; | |
13891 | unsigned long __unused2; | |
13892 | unsigned long sem_nsems; | |
13893 | unsigned long __unused3; | |
13894 | unsigned long __unused4; | |
13895 | }; | |
13896 | struct sembuf { | |
13897 | unsigned short sem_num; | |
13898 | short sem_op; | |
13899 | short sem_flg; | |
13900 | }; | |
13901 | union semun { | |
13902 | int val; | |
13903 | struct semid_ds *buf; | |
13904 | unsigned short *array; | |
13905 | struct seminfo *__buf; | |
13906 | void *__pad; | |
13907 | }; | |
13908 | struct seminfo { | |
13909 | int semmap; | |
13910 | int semmni; | |
13911 | int semmns; | |
13912 | int semmnu; | |
13913 | int semmsl; | |
13914 | int semopm; | |
13915 | int semume; | |
13916 | int semusz; | |
13917 | int semvmx; | |
13918 | int semaem; | |
13919 | }; | |
13920 | struct task_struct; | |
13921 | struct sem { | |
13922 | int semval; | |
13923 | int sempid; | |
13924 | struct list_head sem_pending; | |
13925 | }; | |
13926 | struct sem_array { | |
13927 | struct kern_ipc_perm __attribute__((__aligned__((1 << (6))))) | |
13928 | sem_perm; | |
13929 | time_t sem_otime; | |
13930 | time_t sem_ctime; | |
13931 | struct sem *sem_base; | |
13932 | struct list_head sem_pending; | |
13933 | struct list_head list_id; | |
13934 | int sem_nsems; | |
13935 | int complex_count; | |
13936 | }; | |
13937 | struct sem_queue { | |
13938 | struct list_head simple_list; | |
13939 | struct list_head list; | |
13940 | struct task_struct *sleeper; | |
13941 | struct sem_undo *undo; | |
13942 | int pid; | |
13943 | int status; | |
13944 | struct sembuf *sops; | |
13945 | int nsops; | |
13946 | int alter; | |
13947 | }; | |
13948 | struct sem_undo { | |
13949 | struct list_head list_proc; | |
13950 | struct rcu_head rcu; | |
13951 | struct sem_undo_list *ulp; | |
13952 | struct list_head list_id; | |
13953 | int semid; | |
13954 | short * semadj; | |
13955 | }; | |
13956 | struct sem_undo_list { | |
13957 | atomic_t refcnt; | |
13958 | spinlock_t lock; | |
13959 | struct list_head list_proc; | |
13960 | }; | |
13961 | struct sysv_sem { | |
13962 | struct sem_undo_list *undo_list; | |
13963 | }; | |
13964 | extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk); | |
13965 | extern void exit_sem(struct task_struct *tsk); | |
13966 | struct siginfo; | |
13967 | typedef unsigned long old_sigset_t; | |
13968 | typedef struct { | |
13969 | unsigned long sig[(64 / 32)]; | |
13970 | } sigset_t; | |
13971 | typedef void __signalfn_t(int); | |
13972 | typedef __signalfn_t *__sighandler_t; | |
13973 | typedef void __restorefn_t(void); | |
13974 | typedef __restorefn_t *__sigrestore_t; | |
13975 | extern void do_notify_resume(struct pt_regs *, void *, __u32); | |
13976 | struct old_sigaction { | |
13977 | __sighandler_t sa_handler; | |
13978 | old_sigset_t sa_mask; | |
13979 | unsigned long sa_flags; | |
13980 | __sigrestore_t sa_restorer; | |
13981 | }; | |
13982 | struct sigaction { | |
13983 | __sighandler_t sa_handler; | |
13984 | unsigned long sa_flags; | |
13985 | __sigrestore_t sa_restorer; | |
13986 | sigset_t sa_mask; | |
13987 | }; | |
13988 | struct k_sigaction { | |
13989 | struct sigaction sa; | |
13990 | }; | |
13991 | typedef struct sigaltstack { | |
13992 | void *ss_sp; | |
13993 | int ss_flags; | |
13994 | size_t ss_size; | |
13995 | } stack_t; | |
13996 | static inline __attribute__((always_inline)) void __gen_sigaddset(sigset_t *set, int _sig) | |
13997 | { | |
13998 | asm("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); | |
13999 | } | |
14000 | static inline __attribute__((always_inline)) void __const_sigaddset(sigset_t *set, int _sig) | |
14001 | { | |
14002 | unsigned long sig = _sig - 1; | |
14003 | set->sig[sig / 32] |= 1 << (sig % 32); | |
14004 | } | |
14005 | static inline __attribute__((always_inline)) void __gen_sigdelset(sigset_t *set, int _sig) | |
14006 | { | |
14007 | asm("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); | |
14008 | } | |
14009 | static inline __attribute__((always_inline)) void __const_sigdelset(sigset_t *set, int _sig) | |
14010 | { | |
14011 | unsigned long sig = _sig - 1; | |
14012 | set->sig[sig / 32] &= ~(1 << (sig % 32)); | |
14013 | } | |
14014 | static inline __attribute__((always_inline)) int __const_sigismember(sigset_t *set, int _sig) | |
14015 | { | |
14016 | unsigned long sig = _sig - 1; | |
14017 | return 1 & (set->sig[sig / 32] >> (sig % 32)); | |
14018 | } | |
14019 | static inline __attribute__((always_inline)) int __gen_sigismember(sigset_t *set, int _sig) | |
14020 | { | |
14021 | int ret; | |
14022 | asm("btl %2,%1\n\tsbbl %0,%0" | |
14023 | : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); | |
14024 | return ret; | |
14025 | } | |
14026 | static inline __attribute__((always_inline)) int sigfindinword(unsigned long word) | |
14027 | { | |
14028 | asm("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc"); | |
14029 | return word; | |
14030 | } | |
14031 | struct pt_regs; | |
14032 | typedef union sigval { | |
14033 | int sival_int; | |
14034 | void *sival_ptr; | |
14035 | } sigval_t; | |
14036 | typedef struct siginfo { | |
14037 | int si_signo; | |
14038 | int si_errno; | |
14039 | int si_code; | |
14040 | union { | |
14041 | int _pad[((128 - (3 * sizeof(int))) / sizeof(int))]; | |
14042 | struct { | |
14043 | __kernel_pid_t _pid; | |
14044 | __kernel_uid32_t _uid; | |
14045 | } _kill; | |
14046 | struct { | |
14047 | __kernel_timer_t _tid; | |
14048 | int _overrun; | |
14049 | char _pad[sizeof( __kernel_uid32_t) - sizeof(int)]; | |
14050 | sigval_t _sigval; | |
14051 | int _sys_private; | |
14052 | } _timer; | |
14053 | struct { | |
14054 | __kernel_pid_t _pid; | |
14055 | __kernel_uid32_t _uid; | |
14056 | sigval_t _sigval; | |
14057 | } _rt; | |
14058 | struct { | |
14059 | __kernel_pid_t _pid; | |
14060 | __kernel_uid32_t _uid; | |
14061 | int _status; | |
14062 | __kernel_clock_t _utime; | |
14063 | __kernel_clock_t _stime; | |
14064 | } _sigchld; | |
14065 | struct { | |
14066 | void *_addr; | |
14067 | short _addr_lsb; | |
14068 | } _sigfault; | |
14069 | struct { | |
14070 | long _band; | |
14071 | int _fd; | |
14072 | } _sigpoll; | |
14073 | } _sifields; | |
14074 | } siginfo_t; | |
14075 | typedef struct sigevent { | |
14076 | sigval_t sigev_value; | |
14077 | int sigev_signo; | |
14078 | int sigev_notify; | |
14079 | union { | |
14080 | int _pad[((64 - (sizeof(int) * 2 + sizeof(sigval_t))) / sizeof(int))]; | |
14081 | int _tid; | |
14082 | struct { | |
14083 | void (*_function)(sigval_t); | |
14084 | void *_attribute; | |
14085 | } _sigev_thread; | |
14086 | } _sigev_un; | |
14087 | } sigevent_t; | |
14088 | struct siginfo; | |
14089 | void do_schedule_next_timer(struct siginfo *info); | |
14090 | static inline __attribute__((always_inline)) void copy_siginfo(struct siginfo *to, struct siginfo *from) | |
14091 | { | |
14092 | if (__builtin_constant_p(((from->si_code < 0))) ? !!((from->si_code < 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/siginfo.h", .line = 289, }; ______r = !!((from->si_code < 0)); ______f.miss_hit[______r]++; ______r; })) | |
14093 | __builtin_memcpy(to, from, sizeof(*to)); | |
14094 | else | |
14095 | __builtin_memcpy(to, from, (3 * sizeof(int)) + sizeof(from->_sifields._sigchld)); | |
14096 | } | |
14097 | extern int copy_siginfo_to_user(struct siginfo *to, struct siginfo *from); | |
14098 | struct task_struct; | |
14099 | extern int print_fatal_signals; | |
14100 | struct sigqueue { | |
14101 | struct list_head list; | |
14102 | int flags; | |
14103 | siginfo_t info; | |
14104 | struct user_struct *user; | |
14105 | }; | |
14106 | struct sigpending { | |
14107 | struct list_head list; | |
14108 | sigset_t signal; | |
14109 | }; | |
14110 | static inline __attribute__((always_inline)) int sigisemptyset(sigset_t *set) | |
14111 | { | |
14112 | extern void _NSIG_WORDS_is_unsupported_size(void); | |
14113 | switch ((64 / 32)) { | |
14114 | case 4: | |
14115 | return (set->sig[3] | set->sig[2] | | |
14116 | set->sig[1] | set->sig[0]) == 0; | |
14117 | case 2: | |
14118 | return (set->sig[1] | set->sig[0]) == 0; | |
14119 | case 1: | |
14120 | return set->sig[0] == 0; | |
14121 | default: | |
14122 | _NSIG_WORDS_is_unsupported_size(); | |
14123 | return 0; | |
14124 | } | |
14125 | } | |
14126 | static inline __attribute__((always_inline)) void sigorsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) | (b3)); r->sig[2] = ((a2) | (b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) | (b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) | (b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } } | |
14127 | static inline __attribute__((always_inline)) void sigandsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & (b3)); r->sig[2] = ((a2) & (b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & (b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & (b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } } | |
14128 | static inline __attribute__((always_inline)) void sigandnsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & ~(b3)); r->sig[2] = ((a2) & ~(b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & ~(b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & ~(b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } } | |
14129 | static inline __attribute__((always_inline)) void signotset(sigset_t *set) { extern void _NSIG_WORDS_is_unsupported_size(void); switch ((64 / 32)) { case 4: set->sig[3] = (~(set->sig[3])); set->sig[2] = (~(set->sig[2])); case 2: set->sig[1] = (~(set->sig[1])); case 1: set->sig[0] = (~(set->sig[0])); break; default: _NSIG_WORDS_is_unsupported_size(); } } | |
14130 | static inline __attribute__((always_inline)) void sigemptyset(sigset_t *set) | |
14131 | { | |
14132 | switch ((64 / 32)) { | |
14133 | default: | |
14134 | __builtin_memset(set, 0, sizeof(sigset_t)); | |
14135 | break; | |
14136 | case 2: set->sig[1] = 0; | |
14137 | case 1: set->sig[0] = 0; | |
14138 | break; | |
14139 | } | |
14140 | } | |
14141 | static inline __attribute__((always_inline)) void sigfillset(sigset_t *set) | |
14142 | { | |
14143 | switch ((64 / 32)) { | |
14144 | default: | |
14145 | __builtin_memset(set, -1, sizeof(sigset_t)); | |
14146 | break; | |
14147 | case 2: set->sig[1] = -1; | |
14148 | case 1: set->sig[0] = -1; | |
14149 | break; | |
14150 | } | |
14151 | } | |
14152 | static inline __attribute__((always_inline)) void sigaddsetmask(sigset_t *set, unsigned long mask) | |
14153 | { | |
14154 | set->sig[0] |= mask; | |
14155 | } | |
14156 | static inline __attribute__((always_inline)) void sigdelsetmask(sigset_t *set, unsigned long mask) | |
14157 | { | |
14158 | set->sig[0] &= ~mask; | |
14159 | } | |
14160 | static inline __attribute__((always_inline)) int sigtestsetmask(sigset_t *set, unsigned long mask) | |
14161 | { | |
14162 | return (set->sig[0] & mask) != 0; | |
14163 | } | |
14164 | static inline __attribute__((always_inline)) void siginitset(sigset_t *set, unsigned long mask) | |
14165 | { | |
14166 | set->sig[0] = mask; | |
14167 | switch ((64 / 32)) { | |
14168 | default: | |
14169 | __builtin_memset(&set->sig[1], 0, sizeof(long)*((64 / 32)-1)); | |
14170 | break; | |
14171 | case 2: set->sig[1] = 0; | |
14172 | case 1: ; | |
14173 | } | |
14174 | } | |
14175 | static inline __attribute__((always_inline)) void siginitsetinv(sigset_t *set, unsigned long mask) | |
14176 | { | |
14177 | set->sig[0] = ~mask; | |
14178 | switch ((64 / 32)) { | |
14179 | default: | |
14180 | __builtin_memset(&set->sig[1], -1, sizeof(long)*((64 / 32)-1)); | |
14181 | break; | |
14182 | case 2: set->sig[1] = -1; | |
14183 | case 1: ; | |
14184 | } | |
14185 | } | |
14186 | static inline __attribute__((always_inline)) void init_sigpending(struct sigpending *sig) | |
14187 | { | |
14188 | sigemptyset(&sig->signal); | |
14189 | INIT_LIST_HEAD(&sig->list); | |
14190 | } | |
14191 | extern void flush_sigqueue(struct sigpending *queue); | |
14192 | static inline __attribute__((always_inline)) int valid_signal(unsigned long sig) | |
14193 | { | |
14194 | return sig <= 64 ? 1 : 0; | |
14195 | } | |
14196 | struct timespec; | |
14197 | struct pt_regs; | |
14198 | extern int next_signal(struct sigpending *pending, sigset_t *mask); | |
14199 | extern int do_send_sig_info(int sig, struct siginfo *info, | |
14200 | struct task_struct *p, bool group); | |
14201 | extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p); | |
14202 | extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); | |
14203 | extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, | |
14204 | siginfo_t *info); | |
14205 | extern long do_sigpending(void *, unsigned long); | |
14206 | extern int do_sigtimedwait(const sigset_t *, siginfo_t *, | |
14207 | const struct timespec *); | |
14208 | extern int sigprocmask(int, sigset_t *, sigset_t *); | |
14209 | extern void set_current_blocked(const sigset_t *); | |
14210 | extern int show_unhandled_signals; | |
14211 | extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie); | |
14212 | extern void exit_signals(struct task_struct *tsk); | |
14213 | extern struct kmem_cache *sighand_cachep; | |
14214 | int unhandled_signal(struct task_struct *tsk, int sig); | |
14215 | void signals_init(void); | |
14216 | enum pid_type | |
14217 | { | |
14218 | PIDTYPE_PID, | |
14219 | PIDTYPE_PGID, | |
14220 | PIDTYPE_SID, | |
14221 | PIDTYPE_MAX | |
14222 | }; | |
14223 | struct upid { | |
14224 | int nr; | |
14225 | struct pid_namespace *ns; | |
14226 | struct hlist_node pid_chain; | |
14227 | }; | |
14228 | struct pid | |
14229 | { | |
14230 | atomic_t count; | |
14231 | unsigned int level; | |
14232 | struct hlist_head tasks[PIDTYPE_MAX]; | |
14233 | struct rcu_head rcu; | |
14234 | struct upid numbers[1]; | |
14235 | }; | |
14236 | extern struct pid init_struct_pid; | |
14237 | struct pid_link | |
14238 | { | |
14239 | struct hlist_node node; | |
14240 | struct pid *pid; | |
14241 | }; | |
14242 | static inline __attribute__((always_inline)) struct pid *get_pid(struct pid *pid) | |
14243 | { | |
14244 | if (__builtin_constant_p(((pid))) ? !!((pid)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/pid.h", .line = 77, }; ______r = !!((pid)); ______f.miss_hit[______r]++; ______r; })) | |
14245 | atomic_inc(&pid->count); | |
14246 | return pid; | |
14247 | } | |
14248 | extern void put_pid(struct pid *pid); | |
14249 | extern struct task_struct *pid_task(struct pid *pid, enum pid_type); | |
14250 | extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type); | |
14251 | extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type); | |
14252 | extern void attach_pid(struct task_struct *task, enum pid_type type, | |
14253 | struct pid *pid); | |
14254 | extern void detach_pid(struct task_struct *task, enum pid_type); | |
14255 | extern void change_pid(struct task_struct *task, enum pid_type, | |
14256 | struct pid *pid); | |
14257 | extern void transfer_pid(struct task_struct *old, struct task_struct *new, | |
14258 | enum pid_type); | |
14259 | struct pid_namespace; | |
14260 | extern struct pid_namespace init_pid_ns; | |
14261 | extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns); | |
14262 | extern struct pid *find_vpid(int nr); | |
14263 | extern struct pid *find_get_pid(int nr); | |
14264 | extern struct pid *find_ge_pid(int nr, struct pid_namespace *); | |
14265 | int next_pidmap(struct pid_namespace *pid_ns, unsigned int last); | |
14266 | extern struct pid *alloc_pid(struct pid_namespace *ns); | |
14267 | extern void free_pid(struct pid *pid); | |
14268 | static inline __attribute__((always_inline)) struct pid_namespace *ns_of_pid(struct pid *pid) | |
14269 | { | |
14270 | struct pid_namespace *ns = ((void *)0); | |
14271 | if (__builtin_constant_p(((pid))) ? !!((pid)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/pid.h", .line = 138, }; ______r = !!((pid)); ______f.miss_hit[______r]++; ______r; })) | |
14272 | ns = pid->numbers[pid->level].ns; | |
14273 | return ns; | |
14274 | } | |
14275 | static inline __attribute__((always_inline)) bool is_child_reaper(struct pid *pid) | |
14276 | { | |
14277 | return pid->numbers[pid->level].nr == 1; | |
14278 | } | |
14279 | static inline __attribute__((always_inline)) pid_t pid_nr(struct pid *pid) | |
14280 | { | |
14281 | pid_t nr = 0; | |
14282 | if (__builtin_constant_p(((pid))) ? !!((pid)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/pid.h", .line = 168, }; ______r = !!((pid)); ______f.miss_hit[______r]++; ______r; })) | |
14283 | nr = pid->numbers[0].nr; | |
14284 | return nr; | |
14285 | } | |
14286 | pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns); | |
14287 | pid_t pid_vnr(struct pid *pid); | |
14288 | struct percpu_counter { | |
14289 | spinlock_t lock; | |
14290 | s64 count; | |
14291 | struct list_head list; | |
14292 | s32 *counters; | |
14293 | }; | |
14294 | extern int percpu_counter_batch; | |
14295 | int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, | |
14296 | struct lock_class_key *key); | |
14297 | void percpu_counter_destroy(struct percpu_counter *fbc); | |
14298 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount); | |
14299 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); | |
14300 | s64 __percpu_counter_sum(struct percpu_counter *fbc); | |
14301 | int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs); | |
14302 | static inline __attribute__((always_inline)) void percpu_counter_add(struct percpu_counter *fbc, s64 amount) | |
14303 | { | |
14304 | __percpu_counter_add(fbc, amount, percpu_counter_batch); | |
14305 | } | |
14306 | static inline __attribute__((always_inline)) s64 percpu_counter_sum_positive(struct percpu_counter *fbc) | |
14307 | { | |
14308 | s64 ret = __percpu_counter_sum(fbc); | |
14309 | return ret < 0 ? 0 : ret; | |
14310 | } | |
14311 | static inline __attribute__((always_inline)) s64 percpu_counter_sum(struct percpu_counter *fbc) | |
14312 | { | |
14313 | return __percpu_counter_sum(fbc); | |
14314 | } | |
14315 | static inline __attribute__((always_inline)) s64 percpu_counter_read(struct percpu_counter *fbc) | |
14316 | { | |
14317 | return fbc->count; | |
14318 | } | |
14319 | static inline __attribute__((always_inline)) s64 percpu_counter_read_positive(struct percpu_counter *fbc) | |
14320 | { | |
14321 | s64 ret = fbc->count; | |
14322 | __asm__ __volatile__("": : :"memory"); | |
14323 | if (__builtin_constant_p(((ret >= 0))) ? !!((ret >= 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/percpu_counter.h", .line = 76, }; ______r = !!((ret >= 0)); ______f.miss_hit[______r]++; ______r; })) | |
14324 | return ret; | |
14325 | return 0; | |
14326 | } | |
14327 | static inline __attribute__((always_inline)) int percpu_counter_initialized(struct percpu_counter *fbc) | |
14328 | { | |
14329 | return (fbc->counters != ((void *)0)); | |
14330 | } | |
14331 | static inline __attribute__((always_inline)) void percpu_counter_inc(struct percpu_counter *fbc) | |
14332 | { | |
14333 | percpu_counter_add(fbc, 1); | |
14334 | } | |
14335 | static inline __attribute__((always_inline)) void percpu_counter_dec(struct percpu_counter *fbc) | |
14336 | { | |
14337 | percpu_counter_add(fbc, -1); | |
14338 | } | |
14339 | static inline __attribute__((always_inline)) void percpu_counter_sub(struct percpu_counter *fbc, s64 amount) | |
14340 | { | |
14341 | percpu_counter_add(fbc, -amount); | |
14342 | } | |
14343 | struct prop_global { | |
14344 | int shift; | |
14345 | struct percpu_counter events; | |
14346 | }; | |
14347 | struct prop_descriptor { | |
14348 | int index; | |
14349 | struct prop_global pg[2]; | |
14350 | struct mutex mutex; | |
14351 | }; | |
14352 | int prop_descriptor_init(struct prop_descriptor *pd, int shift); | |
14353 | void prop_change_shift(struct prop_descriptor *pd, int new_shift); | |
14354 | struct prop_local_percpu { | |
14355 | struct percpu_counter events; | |
14356 | int shift; | |
14357 | unsigned long period; | |
14358 | spinlock_t lock; | |
14359 | }; | |
14360 | int prop_local_init_percpu(struct prop_local_percpu *pl); | |
14361 | void prop_local_destroy_percpu(struct prop_local_percpu *pl); | |
14362 | void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl); | |
14363 | void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl, | |
14364 | long *numerator, long *denominator); | |
14365 | static inline __attribute__((always_inline)) | |
14366 | void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl) | |
14367 | { | |
14368 | unsigned long flags; | |
14369 | do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); | |
14370 | __prop_inc_percpu(pd, pl); | |
14371 | do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/proportions.h", .line = 77, }; ______r = !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); | |
14372 | } | |
14373 | void __prop_inc_percpu_max(struct prop_descriptor *pd, | |
14374 | struct prop_local_percpu *pl, long frac); | |
14375 | struct prop_local_single { | |
14376 | unsigned long events; | |
14377 | unsigned long period; | |
14378 | int shift; | |
14379 | spinlock_t lock; | |
14380 | }; | |
14381 | int prop_local_init_single(struct prop_local_single *pl); | |
14382 | void prop_local_destroy_single(struct prop_local_single *pl); | |
14383 | void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl); | |
14384 | void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl, | |
14385 | long *numerator, long *denominator); | |
14386 | static inline __attribute__((always_inline)) | |
14387 | void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl) | |
14388 | { | |
14389 | unsigned long flags; | |
14390 | do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); | |
14391 | __prop_inc_single(pd, pl); | |
14392 | do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/proportions.h", .line = 129, }; ______r = !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); | |
14393 | } | |
14394 | typedef struct { int mode; } seccomp_t; | |
14395 | extern void __secure_computing(int); | |
14396 | static inline __attribute__((always_inline)) void secure_computing(int this_syscall) | |
14397 | { | |
14398 | if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 8)) ? !!(test_ti_thread_flag(current_thread_info(), 8)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seccomp.h", .line = 15, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 8)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 8)) ? !!(test_ti_thread_flag(current_thread_info(), 8)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seccomp.h", .line = 15, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 8)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/seccomp.h", .line = 15, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 8)) ? !!(test_ti_thread_flag(current_thread_info(), 8)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seccomp.h", .line = 15, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 8)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
14399 | __secure_computing(this_syscall); | |
14400 | } | |
14401 | extern long prctl_get_seccomp(void); | |
14402 | extern long prctl_set_seccomp(unsigned long); | |
14403 | static inline __attribute__((always_inline)) void __list_add_rcu(struct list_head *new, | |
14404 | struct list_head *prev, struct list_head *next) | |
14405 | { | |
14406 | new->next = next; | |
14407 | new->prev = prev; | |
14408 | ({ if (__builtin_constant_p(((!__builtin_constant_p((new)) || (((new)) != ((void *)0))))) ? !!((!__builtin_constant_p((new)) || (((new)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 38, }; ______r = !!((!__builtin_constant_p((new)) || (((new)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); (((*((struct list_head **)(&(prev)->next))))) = (typeof(*(new)) *)((new)); }); | |
14409 | next->prev = new; | |
14410 | } | |
14411 | static inline __attribute__((always_inline)) void list_add_rcu(struct list_head *new, struct list_head *head) | |
14412 | { | |
14413 | __list_add_rcu(new, head, head->next); | |
14414 | } | |
14415 | static inline __attribute__((always_inline)) void list_add_tail_rcu(struct list_head *new, | |
14416 | struct list_head *head) | |
14417 | { | |
14418 | __list_add_rcu(new, head->prev, head); | |
14419 | } | |
14420 | static inline __attribute__((always_inline)) void list_del_rcu(struct list_head *entry) | |
14421 | { | |
14422 | __list_del(entry->prev, entry->next); | |
14423 | entry->prev = ((void *) 0x00200200 + (0x0UL)); | |
14424 | } | |
14425 | static inline __attribute__((always_inline)) void hlist_del_init_rcu(struct hlist_node *n) | |
14426 | { | |
14427 | if (__builtin_constant_p(((!hlist_unhashed(n)))) ? !!((!hlist_unhashed(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 137, }; ______r = !!((!hlist_unhashed(n))); ______f.miss_hit[______r]++; ______r; })) { | |
14428 | __hlist_del(n); | |
14429 | n->pprev = ((void *)0); | |
14430 | } | |
14431 | } | |
14432 | static inline __attribute__((always_inline)) void list_replace_rcu(struct list_head *old, | |
14433 | struct list_head *new) | |
14434 | { | |
14435 | new->next = old->next; | |
14436 | new->prev = old->prev; | |
14437 | ({ if (__builtin_constant_p(((!__builtin_constant_p((new)) || (((new)) != ((void *)0))))) ? !!((!__builtin_constant_p((new)) || (((new)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 156, }; ______r = !!((!__builtin_constant_p((new)) || (((new)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); (((*((struct list_head **)(&(new->prev)->next))))) = (typeof(*(new)) *)((new)); }); | |
14438 | new->next->prev = new; | |
14439 | old->prev = ((void *) 0x00200200 + (0x0UL)); | |
14440 | } | |
14441 | static inline __attribute__((always_inline)) void list_splice_init_rcu(struct list_head *list, | |
14442 | struct list_head *head, | |
14443 | void (*sync)(void)) | |
14444 | { | |
14445 | struct list_head *first = list->next; | |
14446 | struct list_head *last = list->prev; | |
14447 | struct list_head *at = head->next; | |
14448 | if (__builtin_constant_p(((list_empty(head)))) ? !!((list_empty(head))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 186, }; ______r = !!((list_empty(head))); ______f.miss_hit[______r]++; ______r; })) | |
14449 | return; | |
14450 | INIT_LIST_HEAD(list); | |
14451 | sync(); | |
14452 | last->next = at; | |
14453 | ({ if (__builtin_constant_p(((!__builtin_constant_p((first)) || (((first)) != ((void *)0))))) ? !!((!__builtin_constant_p((first)) || (((first)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 211, }; ______r = !!((!__builtin_constant_p((first)) || (((first)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); (((*((struct list_head **)(&(head)->next))))) = (typeof(*(first)) *)((first)); }); | |
14454 | first->prev = head; | |
14455 | at->prev = last; | |
14456 | } | |
14457 | static inline __attribute__((always_inline)) void hlist_del_rcu(struct hlist_node *n) | |
14458 | { | |
14459 | __hlist_del(n); | |
14460 | n->pprev = ((void *) 0x00200200 + (0x0UL)); | |
14461 | } | |
14462 | static inline __attribute__((always_inline)) void hlist_replace_rcu(struct hlist_node *old, | |
14463 | struct hlist_node *new) | |
14464 | { | |
14465 | struct hlist_node *next = old->next; | |
14466 | new->next = next; | |
14467 | new->pprev = old->pprev; | |
14468 | ({ if (__builtin_constant_p(((!__builtin_constant_p((new)) || (((new)) != ((void *)0))))) ? !!((!__builtin_constant_p((new)) || (((new)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 329, }; ______r = !!((!__builtin_constant_p((new)) || (((new)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); ((*(struct hlist_node **)new->pprev)) = (typeof(*(new)) *)((new)); }); | |
14469 | if (__builtin_constant_p(((next))) ? !!((next)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 330, }; ______r = !!((next)); ______f.miss_hit[______r]++; ______r; })) | |
14470 | new->next->pprev = &new->next; | |
14471 | old->pprev = ((void *) 0x00200200 + (0x0UL)); | |
14472 | } | |
14473 | static inline __attribute__((always_inline)) void hlist_add_head_rcu(struct hlist_node *n, | |
14474 | struct hlist_head *h) | |
14475 | { | |
14476 | struct hlist_node *first = h->first; | |
14477 | n->next = first; | |
14478 | n->pprev = &h->first; | |
14479 | ({ if (__builtin_constant_p(((!__builtin_constant_p((n)) || (((n)) != ((void *)0))))) ? !!((!__builtin_constant_p((n)) || (((n)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 368, }; ______r = !!((!__builtin_constant_p((n)) || (((n)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); (((*((struct hlist_node **)(&(h)->first))))) = (typeof(*(n)) *)((n)); }); | |
14480 | if (__builtin_constant_p(((first))) ? !!((first)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 369, }; ______r = !!((first)); ______f.miss_hit[______r]++; ______r; })) | |
14481 | first->pprev = &n->next; | |
14482 | } | |
14483 | static inline __attribute__((always_inline)) void hlist_add_before_rcu(struct hlist_node *n, | |
14484 | struct hlist_node *next) | |
14485 | { | |
14486 | n->pprev = next->pprev; | |
14487 | n->next = next; | |
14488 | ({ if (__builtin_constant_p(((!__builtin_constant_p((n)) || (((n)) != ((void *)0))))) ? !!((!__builtin_constant_p((n)) || (((n)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 396, }; ______r = !!((!__builtin_constant_p((n)) || (((n)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); (((*((struct hlist_node **)((n)->pprev))))) = (typeof(*(n)) *)((n)); }); | |
14489 | next->pprev = &n->next; | |
14490 | } | |
14491 | static inline __attribute__((always_inline)) void hlist_add_after_rcu(struct hlist_node *prev, | |
14492 | struct hlist_node *n) | |
14493 | { | |
14494 | n->next = prev->next; | |
14495 | n->pprev = &prev->next; | |
14496 | ({ if (__builtin_constant_p(((!__builtin_constant_p((n)) || (((n)) != ((void *)0))))) ? !!((!__builtin_constant_p((n)) || (((n)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 423, }; ______r = !!((!__builtin_constant_p((n)) || (((n)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); (((*((struct hlist_node **)(&(prev)->next))))) = (typeof(*(n)) *)((n)); }); | |
14497 | if (__builtin_constant_p(((n->next))) ? !!((n->next)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 424, }; ______r = !!((n->next)); ______f.miss_hit[______r]++; ______r; })) | |
14498 | n->next->pprev = &n->next; | |
14499 | } | |
14500 | struct plist_head { | |
14501 | struct list_head node_list; | |
14502 | raw_spinlock_t *rawlock; | |
14503 | spinlock_t *spinlock; | |
14504 | }; | |
14505 | struct plist_node { | |
14506 | int prio; | |
14507 | struct list_head prio_list; | |
14508 | struct list_head node_list; | |
14509 | }; | |
14510 | static inline __attribute__((always_inline)) void | |
14511 | plist_head_init(struct plist_head *head, spinlock_t *lock) | |
14512 | { | |
14513 | INIT_LIST_HEAD(&head->node_list); | |
14514 | head->spinlock = lock; | |
14515 | head->rawlock = ((void *)0); | |
14516 | } | |
14517 | static inline __attribute__((always_inline)) void | |
14518 | plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock) | |
14519 | { | |
14520 | INIT_LIST_HEAD(&head->node_list); | |
14521 | head->rawlock = lock; | |
14522 | head->spinlock = ((void *)0); | |
14523 | } | |
14524 | static inline __attribute__((always_inline)) void plist_node_init(struct plist_node *node, int prio) | |
14525 | { | |
14526 | node->prio = prio; | |
14527 | INIT_LIST_HEAD(&node->prio_list); | |
14528 | INIT_LIST_HEAD(&node->node_list); | |
14529 | } | |
14530 | extern void plist_add(struct plist_node *node, struct plist_head *head); | |
14531 | extern void plist_del(struct plist_node *node, struct plist_head *head); | |
14532 | static inline __attribute__((always_inline)) int plist_head_empty(const struct plist_head *head) | |
14533 | { | |
14534 | return list_empty(&head->node_list); | |
14535 | } | |
14536 | static inline __attribute__((always_inline)) int plist_node_empty(const struct plist_node *node) | |
14537 | { | |
14538 | return list_empty(&node->node_list); | |
14539 | } | |
14540 | static inline __attribute__((always_inline)) struct plist_node *plist_first(const struct plist_head *head) | |
14541 | { | |
14542 | return ({ const typeof( ((struct plist_node *)0)->node_list ) *__mptr = (head->node_list.next); (struct plist_node *)( (char *)__mptr - __builtin_offsetof(struct plist_node,node_list) );}) | |
14543 | ; | |
14544 | } | |
14545 | static inline __attribute__((always_inline)) struct plist_node *plist_last(const struct plist_head *head) | |
14546 | { | |
14547 | return ({ const typeof( ((struct plist_node *)0)->node_list ) *__mptr = (head->node_list.prev); (struct plist_node *)( (char *)__mptr - __builtin_offsetof(struct plist_node,node_list) );}) | |
14548 | ; | |
14549 | } | |
14550 | extern int max_lock_depth; | |
14551 | struct rt_mutex { | |
14552 | raw_spinlock_t wait_lock; | |
14553 | struct plist_head wait_list; | |
14554 | struct task_struct *owner; | |
14555 | int save_state; | |
14556 | const char *name, *file; | |
14557 | int line; | |
14558 | void *magic; | |
14559 | }; | |
14560 | struct rt_mutex_waiter; | |
14561 | struct hrtimer_sleeper; | |
14562 | extern int rt_mutex_debug_check_no_locks_freed(const void *from, | |
14563 | unsigned long len); | |
14564 | extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task); | |
14565 | extern void rt_mutex_debug_task_free(struct task_struct *tsk); | |
14566 | static inline __attribute__((always_inline)) int rt_mutex_is_locked(struct rt_mutex *lock) | |
14567 | { | |
14568 | return lock->owner != ((void *)0); | |
14569 | } | |
14570 | extern void __rt_mutex_init(struct rt_mutex *lock, const char *name); | |
14571 | extern void rt_mutex_destroy(struct rt_mutex *lock); | |
14572 | extern void rt_mutex_lock(struct rt_mutex *lock); | |
14573 | extern int rt_mutex_lock_interruptible(struct rt_mutex *lock, | |
14574 | int detect_deadlock); | |
14575 | extern int rt_mutex_timed_lock(struct rt_mutex *lock, | |
14576 | struct hrtimer_sleeper *timeout, | |
14577 | int detect_deadlock); | |
14578 | extern int rt_mutex_trylock(struct rt_mutex *lock); | |
14579 | extern void rt_mutex_unlock(struct rt_mutex *lock); | |
14580 | struct rusage { | |
14581 | struct timeval ru_utime; | |
14582 | struct timeval ru_stime; | |
14583 | long ru_maxrss; | |
14584 | long ru_ixrss; | |
14585 | long ru_idrss; | |
14586 | long ru_isrss; | |
14587 | long ru_minflt; | |
14588 | long ru_majflt; | |
14589 | long ru_nswap; | |
14590 | long ru_inblock; | |
14591 | long ru_oublock; | |
14592 | long ru_msgsnd; | |
14593 | long ru_msgrcv; | |
14594 | long ru_nsignals; | |
14595 | long ru_nvcsw; | |
14596 | long ru_nivcsw; | |
14597 | }; | |
14598 | struct rlimit { | |
14599 | unsigned long rlim_cur; | |
14600 | unsigned long rlim_max; | |
14601 | }; | |
14602 | struct rlimit64 { | |
14603 | __u64 rlim_cur; | |
14604 | __u64 rlim_max; | |
14605 | }; | |
14606 | struct task_struct; | |
14607 | int getrusage(struct task_struct *p, int who, struct rusage *ru); | |
14608 | int do_prlimit(struct task_struct *tsk, unsigned int resource, | |
14609 | struct rlimit *new_rlim, struct rlimit *old_rlim); | |
14610 | struct timerqueue_node { | |
14611 | struct rb_node node; | |
14612 | ktime_t expires; | |
14613 | }; | |
14614 | struct timerqueue_head { | |
14615 | struct rb_root head; | |
14616 | struct timerqueue_node *next; | |
14617 | }; | |
14618 | extern void timerqueue_add(struct timerqueue_head *head, | |
14619 | struct timerqueue_node *node); | |
14620 | extern void timerqueue_del(struct timerqueue_head *head, | |
14621 | struct timerqueue_node *node); | |
14622 | extern struct timerqueue_node *timerqueue_iterate_next( | |
14623 | struct timerqueue_node *node); | |
14624 | static inline __attribute__((always_inline)) | |
14625 | struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head) | |
14626 | { | |
14627 | return head->next; | |
14628 | } | |
14629 | static inline __attribute__((always_inline)) void timerqueue_init(struct timerqueue_node *node) | |
14630 | { | |
14631 | rb_init_node(&node->node); | |
14632 | } | |
14633 | static inline __attribute__((always_inline)) void timerqueue_init_head(struct timerqueue_head *head) | |
14634 | { | |
14635 | head->head = (struct rb_root) { ((void *)0), }; | |
14636 | head->next = ((void *)0); | |
14637 | } | |
14638 | struct hrtimer_clock_base; | |
14639 | struct hrtimer_cpu_base; | |
14640 | enum hrtimer_mode { | |
14641 | HRTIMER_MODE_ABS = 0x0, | |
14642 | HRTIMER_MODE_REL = 0x1, | |
14643 | HRTIMER_MODE_PINNED = 0x02, | |
14644 | HRTIMER_MODE_ABS_PINNED = 0x02, | |
14645 | HRTIMER_MODE_REL_PINNED = 0x03, | |
14646 | }; | |
14647 | enum hrtimer_restart { | |
14648 | HRTIMER_NORESTART, | |
14649 | HRTIMER_RESTART, | |
14650 | }; | |
14651 | struct hrtimer { | |
14652 | struct timerqueue_node node; | |
14653 | ktime_t _softexpires; | |
14654 | enum hrtimer_restart (*function)(struct hrtimer *); | |
14655 | struct hrtimer_clock_base *base; | |
14656 | unsigned long state; | |
14657 | int start_pid; | |
14658 | void *start_site; | |
14659 | char start_comm[16]; | |
14660 | }; | |
14661 | struct hrtimer_sleeper { | |
14662 | struct hrtimer timer; | |
14663 | struct task_struct *task; | |
14664 | }; | |
14665 | struct hrtimer_clock_base { | |
14666 | struct hrtimer_cpu_base *cpu_base; | |
14667 | int index; | |
14668 | clockid_t clockid; | |
14669 | struct timerqueue_head active; | |
14670 | ktime_t resolution; | |
14671 | ktime_t (*get_time)(void); | |
14672 | ktime_t softirq_time; | |
14673 | ktime_t offset; | |
14674 | }; | |
14675 | enum hrtimer_base_type { | |
14676 | HRTIMER_BASE_MONOTONIC, | |
14677 | HRTIMER_BASE_REALTIME, | |
14678 | HRTIMER_BASE_BOOTTIME, | |
14679 | HRTIMER_MAX_CLOCK_BASES, | |
14680 | }; | |
14681 | struct hrtimer_cpu_base { | |
14682 | raw_spinlock_t lock; | |
14683 | unsigned long active_bases; | |
14684 | ktime_t expires_next; | |
14685 | int hres_active; | |
14686 | int hang_detected; | |
14687 | unsigned long nr_events; | |
14688 | unsigned long nr_retries; | |
14689 | unsigned long nr_hangs; | |
14690 | ktime_t max_hang_time; | |
14691 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; | |
14692 | }; | |
14693 | static inline __attribute__((always_inline)) void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) | |
14694 | { | |
14695 | timer->node.expires = time; | |
14696 | timer->_softexpires = time; | |
14697 | } | |
14698 | static inline __attribute__((always_inline)) void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta) | |
14699 | { | |
14700 | timer->_softexpires = time; | |
14701 | timer->node.expires = ktime_add_safe(time, delta); | |
14702 | } | |
14703 | static inline __attribute__((always_inline)) void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta) | |
14704 | { | |
14705 | timer->_softexpires = time; | |
14706 | timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta)); | |
14707 | } | |
14708 | static inline __attribute__((always_inline)) void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) | |
14709 | { | |
14710 | timer->node.expires.tv64 = tv64; | |
14711 | timer->_softexpires.tv64 = tv64; | |
14712 | } | |
14713 | static inline __attribute__((always_inline)) void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) | |
14714 | { | |
14715 | timer->node.expires = ktime_add_safe(timer->node.expires, time); | |
14716 | timer->_softexpires = ktime_add_safe(timer->_softexpires, time); | |
14717 | } | |
14718 | static inline __attribute__((always_inline)) void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns) | |
14719 | { | |
14720 | timer->node.expires = ({ (ktime_t){ .tv64 = (timer->node.expires).tv64 + (ns) }; }); | |
14721 | timer->_softexpires = ({ (ktime_t){ .tv64 = (timer->_softexpires).tv64 + (ns) }; }); | |
14722 | } | |
14723 | static inline __attribute__((always_inline)) ktime_t hrtimer_get_expires(const struct hrtimer *timer) | |
14724 | { | |
14725 | return timer->node.expires; | |
14726 | } | |
14727 | static inline __attribute__((always_inline)) ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) | |
14728 | { | |
14729 | return timer->_softexpires; | |
14730 | } | |
14731 | static inline __attribute__((always_inline)) s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) | |
14732 | { | |
14733 | return timer->node.expires.tv64; | |
14734 | } | |
14735 | static inline __attribute__((always_inline)) s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) | |
14736 | { | |
14737 | return timer->_softexpires.tv64; | |
14738 | } | |
14739 | static inline __attribute__((always_inline)) s64 hrtimer_get_expires_ns(const struct hrtimer *timer) | |
14740 | { | |
14741 | return ((timer->node.expires).tv64); | |
14742 | } | |
14743 | static inline __attribute__((always_inline)) ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) | |
14744 | { | |
14745 | return ({ (ktime_t){ .tv64 = (timer->node.expires).tv64 - (timer->base->get_time()).tv64 }; }); | |
14746 | } | |
14747 | struct clock_event_device; | |
14748 | extern void hrtimer_interrupt(struct clock_event_device *dev); | |
14749 | static inline __attribute__((always_inline)) ktime_t hrtimer_cb_get_time(struct hrtimer *timer) | |
14750 | { | |
14751 | return timer->base->get_time(); | |
14752 | } | |
14753 | static inline __attribute__((always_inline)) int hrtimer_is_hres_active(struct hrtimer *timer) | |
14754 | { | |
14755 | return timer->base->cpu_base->hres_active; | |
14756 | } | |
14757 | extern void hrtimer_peek_ahead_timers(void); | |
14758 | extern void clock_was_set(void); | |
14759 | extern void timerfd_clock_was_set(void); | |
14760 | extern void hrtimers_resume(void); | |
14761 | extern ktime_t ktime_get(void); | |
14762 | extern ktime_t ktime_get_real(void); | |
14763 | extern ktime_t ktime_get_boottime(void); | |
14764 | extern ktime_t ktime_get_monotonic_offset(void); | |
14765 | extern __attribute__((section(".data..percpu" ""))) __typeof__(struct tick_device) tick_cpu_device; | |
14766 | extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock, | |
14767 | enum hrtimer_mode mode); | |
14768 | static inline __attribute__((always_inline)) void hrtimer_init_on_stack(struct hrtimer *timer, | |
14769 | clockid_t which_clock, | |
14770 | enum hrtimer_mode mode) | |
14771 | { | |
14772 | hrtimer_init(timer, which_clock, mode); | |
14773 | } | |
14774 | static inline __attribute__((always_inline)) void destroy_hrtimer_on_stack(struct hrtimer *timer) { } | |
14775 | extern int hrtimer_start(struct hrtimer *timer, ktime_t tim, | |
14776 | const enum hrtimer_mode mode); | |
14777 | extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | |
14778 | unsigned long range_ns, const enum hrtimer_mode mode); | |
14779 | extern int | |
14780 | __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | |
14781 | unsigned long delta_ns, | |
14782 | const enum hrtimer_mode mode, int wakeup); | |
14783 | extern int hrtimer_cancel(struct hrtimer *timer); | |
14784 | extern int hrtimer_try_to_cancel(struct hrtimer *timer); | |
14785 | static inline __attribute__((always_inline)) int hrtimer_start_expires(struct hrtimer *timer, | |
14786 | enum hrtimer_mode mode) | |
14787 | { | |
14788 | unsigned long delta; | |
14789 | ktime_t soft, hard; | |
14790 | soft = hrtimer_get_softexpires(timer); | |
14791 | hard = hrtimer_get_expires(timer); | |
14792 | delta = ((({ (ktime_t){ .tv64 = (hard).tv64 - (soft).tv64 }; })).tv64); | |
14793 | return hrtimer_start_range_ns(timer, soft, delta, mode); | |
14794 | } | |
14795 | static inline __attribute__((always_inline)) int hrtimer_restart(struct hrtimer *timer) | |
14796 | { | |
14797 | return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); | |
14798 | } | |
14799 | extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); | |
14800 | extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); | |
14801 | extern ktime_t hrtimer_get_next_event(void); | |
14802 | static inline __attribute__((always_inline)) int hrtimer_active(const struct hrtimer *timer) | |
14803 | { | |
14804 | return timer->state != 0x00; | |
14805 | } | |
14806 | static inline __attribute__((always_inline)) int hrtimer_is_queued(struct hrtimer *timer) | |
14807 | { | |
14808 | return timer->state & 0x01; | |
14809 | } | |
14810 | static inline __attribute__((always_inline)) int hrtimer_callback_running(struct hrtimer *timer) | |
14811 | { | |
14812 | return timer->state & 0x02; | |
14813 | } | |
14814 | extern u64 | |
14815 | hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); | |
14816 | static inline __attribute__((always_inline)) u64 hrtimer_forward_now(struct hrtimer *timer, | |
14817 | ktime_t interval) | |
14818 | { | |
14819 | return hrtimer_forward(timer, timer->base->get_time(), interval); | |
14820 | } | |
14821 | extern long hrtimer_nanosleep(struct timespec *rqtp, | |
14822 | struct timespec *rmtp, | |
14823 | const enum hrtimer_mode mode, | |
14824 | const clockid_t clockid); | |
14825 | extern long hrtimer_nanosleep_restart(struct restart_block *restart_block); | |
14826 | extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, | |
14827 | struct task_struct *tsk); | |
14828 | extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, | |
14829 | const enum hrtimer_mode mode); | |
14830 | extern int schedule_hrtimeout_range_clock(ktime_t *expires, | |
14831 | unsigned long delta, const enum hrtimer_mode mode, int clock); | |
14832 | extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); | |
14833 | extern void hrtimer_run_queues(void); | |
14834 | extern void hrtimer_run_pending(void); | |
14835 | extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) hrtimers_init(void); | |
14836 | extern u64 ktime_divns(const ktime_t kt, s64 div); | |
14837 | extern void sysrq_timer_list_show(void); | |
14838 | struct task_io_accounting { | |
14839 | u64 rchar; | |
14840 | u64 wchar; | |
14841 | u64 syscr; | |
14842 | u64 syscw; | |
14843 | u64 read_bytes; | |
14844 | u64 write_bytes; | |
14845 | u64 cancelled_write_bytes; | |
14846 | }; | |
14847 | struct latency_record { | |
14848 | unsigned long backtrace[12]; | |
14849 | unsigned int count; | |
14850 | unsigned long time; | |
14851 | unsigned long max; | |
14852 | }; | |
14853 | struct task_struct; | |
14854 | extern int latencytop_enabled; | |
14855 | void __account_scheduler_latency(struct task_struct *task, int usecs, int inter); | |
14856 | static inline __attribute__((always_inline)) void | |
14857 | account_scheduler_latency(struct task_struct *task, int usecs, int inter) | |
14858 | { | |
14859 | if (__builtin_constant_p((((__builtin_constant_p(latencytop_enabled) ? !!(latencytop_enabled) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/latencytop.h", .line = 33, }; ______r = __builtin_expect(!!(latencytop_enabled), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(latencytop_enabled) ? !!(latencytop_enabled) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/latencytop.h", .line = 33, }; ______r = __builtin_expect(!!(latencytop_enabled), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/latencytop.h", .line = 33, }; ______r = !!(((__builtin_constant_p(latencytop_enabled) ? !!(latencytop_enabled) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/latencytop.h", .line = 33, }; ______r = __builtin_expect(!!(latencytop_enabled), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
14860 | __account_scheduler_latency(task, usecs, inter); | |
14861 | } | |
14862 | void clear_all_latency_tracing(struct task_struct *p); | |
14863 | typedef int32_t key_serial_t; | |
14864 | typedef uint32_t key_perm_t; | |
14865 | struct key; | |
14866 | struct selinux_audit_rule; | |
14867 | struct audit_context; | |
14868 | struct kern_ipc_perm; | |
14869 | static inline __attribute__((always_inline)) bool selinux_is_enabled(void) | |
14870 | { | |
14871 | return false; | |
14872 | } | |
14873 | struct user_struct; | |
14874 | struct cred; | |
14875 | struct inode; | |
14876 | struct group_info { | |
14877 | atomic_t usage; | |
14878 | int ngroups; | |
14879 | int nblocks; | |
14880 | gid_t small_block[32]; | |
14881 | gid_t *blocks[0]; | |
14882 | }; | |
14883 | static inline __attribute__((always_inline)) struct group_info *get_group_info(struct group_info *gi) | |
14884 | { | |
14885 | atomic_inc(&gi->usage); | |
14886 | return gi; | |
14887 | } | |
14888 | extern struct group_info *groups_alloc(int); | |
14889 | extern struct group_info init_groups; | |
14890 | extern void groups_free(struct group_info *); | |
14891 | extern int set_current_groups(struct group_info *); | |
14892 | extern int set_groups(struct cred *, struct group_info *); | |
14893 | extern int groups_search(const struct group_info *, gid_t); | |
14894 | extern int in_group_p(gid_t); | |
14895 | extern int in_egroup_p(gid_t); | |
14896 | struct cred { | |
14897 | atomic_t usage; | |
14898 | uid_t uid; | |
14899 | gid_t gid; | |
14900 | uid_t suid; | |
14901 | gid_t sgid; | |
14902 | uid_t euid; | |
14903 | gid_t egid; | |
14904 | uid_t fsuid; | |
14905 | gid_t fsgid; | |
14906 | unsigned securebits; | |
14907 | kernel_cap_t cap_inheritable; | |
14908 | kernel_cap_t cap_permitted; | |
14909 | kernel_cap_t cap_effective; | |
14910 | kernel_cap_t cap_bset; | |
14911 | void *security; | |
14912 | struct user_struct *user; | |
14913 | struct user_namespace *user_ns; | |
14914 | struct group_info *group_info; | |
14915 | struct rcu_head rcu; | |
14916 | }; | |
14917 | extern void __put_cred(struct cred *); | |
14918 | extern void exit_creds(struct task_struct *); | |
14919 | extern int copy_creds(struct task_struct *, unsigned long); | |
14920 | extern const struct cred *get_task_cred(struct task_struct *); | |
14921 | extern struct cred *cred_alloc_blank(void); | |
14922 | extern struct cred *prepare_creds(void); | |
14923 | extern struct cred *prepare_exec_creds(void); | |
14924 | extern int commit_creds(struct cred *); | |
14925 | extern void abort_creds(struct cred *); | |
14926 | extern const struct cred *override_creds(const struct cred *); | |
14927 | extern void revert_creds(const struct cred *); | |
14928 | extern struct cred *prepare_kernel_cred(struct task_struct *); | |
14929 | extern int change_create_files_as(struct cred *, struct inode *); | |
14930 | extern int set_security_override(struct cred *, u32); | |
14931 | extern int set_security_override_from_ctx(struct cred *, const char *); | |
14932 | extern int set_create_files_as(struct cred *, struct inode *); | |
14933 | extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) cred_init(void); | |
14934 | static inline __attribute__((always_inline)) void validate_creds(const struct cred *cred) | |
14935 | { | |
14936 | } | |
14937 | static inline __attribute__((always_inline)) void validate_creds_for_do_exit(struct task_struct *tsk) | |
14938 | { | |
14939 | } | |
14940 | static inline __attribute__((always_inline)) void validate_process_creds(void) | |
14941 | { | |
14942 | } | |
14943 | static inline __attribute__((always_inline)) struct cred *get_new_cred(struct cred *cred) | |
14944 | { | |
14945 | atomic_inc(&cred->usage); | |
14946 | return cred; | |
14947 | } | |
14948 | static inline __attribute__((always_inline)) const struct cred *get_cred(const struct cred *cred) | |
14949 | { | |
14950 | struct cred *nonconst_cred = (struct cred *) cred; | |
14951 | validate_creds(cred); | |
14952 | return get_new_cred(nonconst_cred); | |
14953 | } | |
14954 | static inline __attribute__((always_inline)) void put_cred(const struct cred *_cred) | |
14955 | { | |
14956 | struct cred *cred = (struct cred *) _cred; | |
14957 | validate_creds(cred); | |
14958 | if (__builtin_constant_p(((atomic_dec_and_test(&(cred)->usage)))) ? !!((atomic_dec_and_test(&(cred)->usage))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/cred.h", .line = 261, }; ______r = !!((atomic_dec_and_test(&(cred)->usage))); ______f.miss_hit[______r]++; ______r; })) | |
14959 | __put_cred(cred); | |
14960 | } | |
14961 | extern struct user_namespace init_user_ns; | |
14962 | struct exec_domain; | |
14963 | struct futex_pi_state; | |
14964 | struct robust_list_head; | |
14965 | struct bio_list; | |
14966 | struct fs_struct; | |
14967 | struct perf_event_context; | |
14968 | struct blk_plug; | |
14969 | extern unsigned long avenrun[]; | |
14970 | extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); | |
14971 | extern unsigned long total_forks; | |
14972 | extern int nr_threads; | |
14973 | extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned long) process_counts; | |
14974 | extern int nr_processes(void); | |
14975 | extern unsigned long nr_running(void); | |
14976 | extern unsigned long nr_uninterruptible(void); | |
14977 | extern unsigned long nr_iowait(void); | |
14978 | extern unsigned long nr_iowait_cpu(int cpu); | |
14979 | extern unsigned long this_cpu_load(void); | |
14980 | extern void calc_global_load(unsigned long ticks); | |
14981 | extern unsigned long get_parent_ip(unsigned long addr); | |
14982 | struct seq_file; | |
14983 | struct cfs_rq; | |
14984 | struct task_group; | |
14985 | extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); | |
14986 | extern void proc_sched_set_task(struct task_struct *p); | |
14987 | extern void | |
14988 | print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); | |
14989 | extern char ___assert_task_state[1 - 2*!!( | |
14990 | sizeof("RSDTtZXxKW")-1 != ( __builtin_constant_p(512) ? ( (512) < 1 ? ____ilog2_NaN() : (512) & (1ULL << 63) ? 63 : (512) & (1ULL << 62) ? 62 : (512) & (1ULL << 61) ? 61 : (512) & (1ULL << 60) ? 60 : (512) & (1ULL << 59) ? 59 : (512) & (1ULL << 58) ? 58 : (512) & (1ULL << 57) ? 57 : (512) & (1ULL << 56) ? 56 : (512) & (1ULL << 55) ? 55 : (512) & (1ULL << 54) ? 54 : (512) & (1ULL << 53) ? 53 : (512) & (1ULL << 52) ? 52 : (512) & (1ULL << 51) ? 51 : (512) & (1ULL << 50) ? 50 : (512) & (1ULL << 49) ? 49 : (512) & (1ULL << 48) ? 48 : (512) & (1ULL << 47) ? 47 : (512) & (1ULL << 46) ? 46 : (512) & (1ULL << 45) ? 45 : (512) & (1ULL << 44) ? 44 : (512) & (1ULL << 43) ? 43 : (512) & (1ULL << 42) ? 42 : (512) & (1ULL << 41) ? 41 : (512) & (1ULL << 40) ? 40 : (512) & (1ULL << 39) ? 39 : (512) & (1ULL << 38) ? 38 : (512) & (1ULL << 37) ? 37 : (512) & (1ULL << 36) ? 36 : (512) & (1ULL << 35) ? 35 : (512) & (1ULL << 34) ? 34 : (512) & (1ULL << 33) ? 33 : (512) & (1ULL << 32) ? 32 : (512) & (1ULL << 31) ? 31 : (512) & (1ULL << 30) ? 30 : (512) & (1ULL << 29) ? 29 : (512) & (1ULL << 28) ? 28 : (512) & (1ULL << 27) ? 27 : (512) & (1ULL << 26) ? 26 : (512) & (1ULL << 25) ? 25 : (512) & (1ULL << 24) ? 24 : (512) & (1ULL << 23) ? 23 : (512) & (1ULL << 22) ? 22 : (512) & (1ULL << 21) ? 21 : (512) & (1ULL << 20) ? 20 : (512) & (1ULL << 19) ? 19 : (512) & (1ULL << 18) ? 18 : (512) & (1ULL << 17) ? 17 : (512) & (1ULL << 16) ? 16 : (512) & (1ULL << 15) ? 15 : (512) & (1ULL << 14) ? 14 : (512) & (1ULL << 13) ? 13 : (512) & (1ULL << 12) ? 12 : (512) & (1ULL << 11) ? 11 : (512) & (1ULL << 10) ? 10 : (512) & (1ULL << 9) ? 9 : (512) & (1ULL << 8) ? 8 : (512) & (1ULL << 7) ? 7 : (512) & (1ULL << 6) ? 6 : (512) & (1ULL << 5) ? 5 : (512) & (1ULL << 4) ? 4 : (512) & (1ULL << 3) ? 3 : (512) & (1ULL << 2) ? 2 : (512) & (1ULL << 1) ? 1 : (512) & (1ULL << 0) ? 0 : ____ilog2_NaN() ) : (sizeof(512) <= 4) ? __ilog2_u32(512) : __ilog2_u64(512) )+1)]; | |
14991 | extern rwlock_t tasklist_lock; | |
14992 | extern spinlock_t mmlist_lock; | |
14993 | struct task_struct; | |
14994 | extern void sched_init(void); | |
14995 | extern void sched_init_smp(void); | |
14996 | extern __attribute__((regparm(0))) void schedule_tail(struct task_struct *prev); | |
14997 | extern void init_idle(struct task_struct *idle, int cpu); | |
14998 | extern void init_idle_bootup_task(struct task_struct *idle); | |
14999 | extern int runqueue_is_locked(int cpu); | |
15000 | extern cpumask_var_t nohz_cpu_mask; | |
15001 | static inline __attribute__((always_inline)) void select_nohz_load_balancer(int stop_tick) { } | |
15002 | extern void show_state_filter(unsigned long state_filter); | |
15003 | static inline __attribute__((always_inline)) void show_state(void) | |
15004 | { | |
15005 | show_state_filter(0); | |
15006 | } | |
15007 | extern void show_regs(struct pt_regs *); | |
15008 | extern void show_stack(struct task_struct *task, unsigned long *sp); | |
15009 | void io_schedule(void); | |
15010 | long io_schedule_timeout(long timeout); | |
15011 | extern void cpu_init (void); | |
15012 | extern void trap_init(void); | |
15013 | extern void update_process_times(int user); | |
15014 | extern void scheduler_tick(void); | |
15015 | extern void sched_show_task(struct task_struct *p); | |
15016 | extern void touch_softlockup_watchdog(void); | |
15017 | extern void touch_softlockup_watchdog_sync(void); | |
15018 | extern void touch_all_softlockup_watchdogs(void); | |
15019 | extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, | |
15020 | void *buffer, | |
15021 | size_t *lenp, loff_t *ppos); | |
15022 | extern unsigned int softlockup_panic; | |
15023 | void lockup_detector_init(void); | |
15024 | extern unsigned int sysctl_hung_task_panic; | |
15025 | extern unsigned long sysctl_hung_task_check_count; | |
15026 | extern unsigned long sysctl_hung_task_timeout_secs; | |
15027 | extern unsigned long sysctl_hung_task_warnings; | |
15028 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, | |
15029 | void *buffer, | |
15030 | size_t *lenp, loff_t *ppos); | |
15031 | extern char __sched_text_start[], __sched_text_end[]; | |
15032 | extern int in_sched_functions(unsigned long addr); | |
15033 | extern signed long schedule_timeout(signed long timeout); | |
15034 | extern signed long schedule_timeout_interruptible(signed long timeout); | |
15035 | extern signed long schedule_timeout_killable(signed long timeout); | |
15036 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | |
15037 | __attribute__((regparm(0))) void schedule(void); | |
15038 | extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); | |
15039 | struct nsproxy; | |
15040 | struct user_namespace; | |
15041 | extern int sysctl_max_map_count; | |
15042 | typedef unsigned long aio_context_t; | |
15043 | enum { | |
15044 | IOCB_CMD_PREAD = 0, | |
15045 | IOCB_CMD_PWRITE = 1, | |
15046 | IOCB_CMD_FSYNC = 2, | |
15047 | IOCB_CMD_FDSYNC = 3, | |
15048 | IOCB_CMD_NOOP = 6, | |
15049 | IOCB_CMD_PREADV = 7, | |
15050 | IOCB_CMD_PWRITEV = 8, | |
15051 | }; | |
15052 | struct io_event { | |
15053 | __u64 data; | |
15054 | __u64 obj; | |
15055 | __s64 res; | |
15056 | __s64 res2; | |
15057 | }; | |
15058 | struct iocb { | |
15059 | __u64 aio_data; | |
15060 | __u32 aio_key, aio_reserved1; | |
15061 | __u16 aio_lio_opcode; | |
15062 | __s16 aio_reqprio; | |
15063 | __u32 aio_fildes; | |
15064 | __u64 aio_buf; | |
15065 | __u64 aio_nbytes; | |
15066 | __s64 aio_offset; | |
15067 | __u64 aio_reserved2; | |
15068 | __u32 aio_flags; | |
15069 | __u32 aio_resfd; | |
15070 | }; | |
15071 | struct iovec | |
15072 | { | |
15073 | void *iov_base; | |
15074 | __kernel_size_t iov_len; | |
15075 | }; | |
15076 | struct kvec { | |
15077 | void *iov_base; | |
15078 | size_t iov_len; | |
15079 | }; | |
15080 | static inline __attribute__((always_inline)) size_t iov_length(const struct iovec *iov, unsigned long nr_segs) | |
15081 | { | |
15082 | unsigned long seg; | |
15083 | size_t ret = 0; | |
15084 | for (seg = 0; seg < nr_segs; seg++) | |
15085 | ret += iov[seg].iov_len; | |
15086 | return ret; | |
15087 | } | |
15088 | unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to); | |
15089 | struct kioctx; | |
15090 | struct kiocb { | |
15091 | struct list_head ki_run_list; | |
15092 | unsigned long ki_flags; | |
15093 | int ki_users; | |
15094 | unsigned ki_key; | |
15095 | struct file *ki_filp; | |
15096 | struct kioctx *ki_ctx; | |
15097 | int (*ki_cancel)(struct kiocb *, struct io_event *); | |
15098 | ssize_t (*ki_retry)(struct kiocb *); | |
15099 | void (*ki_dtor)(struct kiocb *); | |
15100 | union { | |
15101 | void *user; | |
15102 | struct task_struct *tsk; | |
15103 | } ki_obj; | |
15104 | __u64 ki_user_data; | |
15105 | loff_t ki_pos; | |
15106 | void *private; | |
15107 | unsigned short ki_opcode; | |
15108 | size_t ki_nbytes; | |
15109 | char *ki_buf; | |
15110 | size_t ki_left; | |
15111 | struct iovec ki_inline_vec; | |
15112 | struct iovec *ki_iovec; | |
15113 | unsigned long ki_nr_segs; | |
15114 | unsigned long ki_cur_seg; | |
15115 | struct list_head ki_list; | |
15116 | struct eventfd_ctx *ki_eventfd; | |
15117 | }; | |
15118 | struct aio_ring { | |
15119 | unsigned id; | |
15120 | unsigned nr; | |
15121 | unsigned head; | |
15122 | unsigned tail; | |
15123 | unsigned magic; | |
15124 | unsigned compat_features; | |
15125 | unsigned incompat_features; | |
15126 | unsigned header_length; | |
15127 | struct io_event io_events[0]; | |
15128 | }; | |
15129 | struct aio_ring_info { | |
15130 | unsigned long mmap_base; | |
15131 | unsigned long mmap_size; | |
15132 | struct page **ring_pages; | |
15133 | spinlock_t ring_lock; | |
15134 | long nr_pages; | |
15135 | unsigned nr, tail; | |
15136 | struct page *internal_pages[8]; | |
15137 | }; | |
15138 | struct kioctx { | |
15139 | atomic_t users; | |
15140 | int dead; | |
15141 | struct mm_struct *mm; | |
15142 | unsigned long user_id; | |
15143 | struct hlist_node list; | |
15144 | wait_queue_head_t wait; | |
15145 | spinlock_t ctx_lock; | |
15146 | int reqs_active; | |
15147 | struct list_head active_reqs; | |
15148 | struct list_head run_list; | |
15149 | unsigned max_reqs; | |
15150 | struct aio_ring_info ring_info; | |
15151 | struct delayed_work wq; | |
15152 | struct rcu_head rcu_head; | |
15153 | }; | |
15154 | extern unsigned aio_max_size; | |
15155 | extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb); | |
15156 | extern int aio_put_req(struct kiocb *iocb); | |
15157 | extern void kick_iocb(struct kiocb *iocb); | |
15158 | extern int aio_complete(struct kiocb *iocb, long res, long res2); | |
15159 | struct mm_struct; | |
15160 | extern void exit_aio(struct mm_struct *mm); | |
15161 | extern long do_io_submit(aio_context_t ctx_id, long nr, | |
15162 | struct iocb * *iocbpp, bool compat); | |
15163 | static inline __attribute__((always_inline)) struct kiocb *list_kiocb(struct list_head *h) | |
15164 | { | |
15165 | return ({ const typeof( ((struct kiocb *)0)->ki_list ) *__mptr = (h); (struct kiocb *)( (char *)__mptr - __builtin_offsetof(struct kiocb,ki_list) );}); | |
15166 | } | |
15167 | extern unsigned long aio_nr; | |
15168 | extern unsigned long aio_max_nr; | |
15169 | extern void arch_pick_mmap_layout(struct mm_struct *mm); | |
15170 | extern unsigned long | |
15171 | arch_get_unmapped_area(struct file *, unsigned long, unsigned long, | |
15172 | unsigned long, unsigned long); | |
15173 | extern unsigned long | |
15174 | arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, | |
15175 | unsigned long len, unsigned long pgoff, | |
15176 | unsigned long flags); | |
15177 | extern void arch_unmap_area(struct mm_struct *, unsigned long); | |
15178 | extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); | |
15179 | extern void set_dumpable(struct mm_struct *mm, int value); | |
15180 | extern int get_dumpable(struct mm_struct *mm); | |
15181 | struct sighand_struct { | |
15182 | atomic_t count; | |
15183 | struct k_sigaction action[64]; | |
15184 | spinlock_t siglock; | |
15185 | wait_queue_head_t signalfd_wqh; | |
15186 | }; | |
15187 | struct pacct_struct { | |
15188 | int ac_flag; | |
15189 | long ac_exitcode; | |
15190 | unsigned long ac_mem; | |
15191 | cputime_t ac_utime, ac_stime; | |
15192 | unsigned long ac_minflt, ac_majflt; | |
15193 | }; | |
15194 | struct cpu_itimer { | |
15195 | cputime_t expires; | |
15196 | cputime_t incr; | |
15197 | u32 error; | |
15198 | u32 incr_error; | |
15199 | }; | |
15200 | struct task_cputime { | |
15201 | cputime_t utime; | |
15202 | cputime_t stime; | |
15203 | unsigned long long sum_exec_runtime; | |
15204 | }; | |
15205 | struct thread_group_cputimer { | |
15206 | struct task_cputime cputime; | |
15207 | int running; | |
15208 | spinlock_t lock; | |
15209 | }; | |
15210 | struct autogroup; | |
15211 | struct signal_struct { | |
15212 | atomic_t sigcnt; | |
15213 | atomic_t live; | |
15214 | int nr_threads; | |
15215 | wait_queue_head_t wait_chldexit; | |
15216 | struct task_struct *curr_target; | |
15217 | struct sigpending shared_pending; | |
15218 | int group_exit_code; | |
15219 | int notify_count; | |
15220 | struct task_struct *group_exit_task; | |
15221 | int group_stop_count; | |
15222 | unsigned int flags; | |
15223 | struct list_head posix_timers; | |
15224 | struct hrtimer real_timer; | |
15225 | struct pid *leader_pid; | |
15226 | ktime_t it_real_incr; | |
15227 | struct cpu_itimer it[2]; | |
15228 | struct thread_group_cputimer cputimer; | |
15229 | struct task_cputime cputime_expires; | |
15230 | struct list_head cpu_timers[3]; | |
15231 | struct pid *tty_old_pgrp; | |
15232 | int leader; | |
15233 | struct tty_struct *tty; | |
15234 | struct autogroup *autogroup; | |
15235 | cputime_t utime, stime, cutime, cstime; | |
15236 | cputime_t gtime; | |
15237 | cputime_t cgtime; | |
15238 | cputime_t prev_utime, prev_stime; | |
15239 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; | |
15240 | unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; | |
15241 | unsigned long inblock, oublock, cinblock, coublock; | |
15242 | unsigned long maxrss, cmaxrss; | |
15243 | struct task_io_accounting ioac; | |
15244 | unsigned long long sum_sched_runtime; | |
15245 | struct rlimit rlim[16]; | |
15246 | struct pacct_struct pacct; | |
15247 | struct taskstats *stats; | |
15248 | unsigned audit_tty; | |
15249 | struct tty_audit_buf *tty_audit_buf; | |
15250 | struct rw_semaphore threadgroup_fork_lock; | |
15251 | int oom_adj; | |
15252 | int oom_score_adj; | |
15253 | int oom_score_adj_min; | |
15254 | struct mutex cred_guard_mutex; | |
15255 | }; | |
15256 | static inline __attribute__((always_inline)) int signal_group_exit(const struct signal_struct *sig) | |
15257 | { | |
15258 | return (sig->flags & 0x00000004) || | |
15259 | (sig->group_exit_task != ((void *)0)); | |
15260 | } | |
15261 | struct user_struct { | |
15262 | atomic_t __count; | |
15263 | atomic_t processes; | |
15264 | atomic_t files; | |
15265 | atomic_t sigpending; | |
15266 | atomic_t inotify_watches; | |
15267 | atomic_t inotify_devs; | |
15268 | atomic_t fanotify_listeners; | |
15269 | atomic_long_t epoll_watches; | |
15270 | unsigned long mq_bytes; | |
15271 | unsigned long locked_shm; | |
15272 | struct hlist_node uidhash_node; | |
15273 | uid_t uid; | |
15274 | struct user_namespace *user_ns; | |
15275 | atomic_long_t locked_vm; | |
15276 | }; | |
15277 | extern int uids_sysfs_init(void); | |
15278 | extern struct user_struct *find_user(uid_t); | |
15279 | extern struct user_struct root_user; | |
15280 | struct backing_dev_info; | |
15281 | struct reclaim_state; | |
15282 | struct sched_info { | |
15283 | unsigned long pcount; | |
15284 | unsigned long long run_delay; | |
15285 | unsigned long long last_arrival, | |
15286 | last_queued; | |
15287 | }; | |
15288 | struct task_delay_info { | |
15289 | spinlock_t lock; | |
15290 | unsigned int flags; | |
15291 | struct timespec blkio_start, blkio_end; | |
15292 | u64 blkio_delay; | |
15293 | u64 swapin_delay; | |
15294 | u32 blkio_count; | |
15295 | u32 swapin_count; | |
15296 | struct timespec freepages_start, freepages_end; | |
15297 | u64 freepages_delay; | |
15298 | u32 freepages_count; | |
15299 | }; | |
15300 | static inline __attribute__((always_inline)) int sched_info_on(void) | |
15301 | { | |
15302 | return 1; | |
15303 | } | |
15304 | enum cpu_idle_type { | |
15305 | CPU_IDLE, | |
15306 | CPU_NOT_IDLE, | |
15307 | CPU_NEWLY_IDLE, | |
15308 | CPU_MAX_IDLE_TYPES | |
15309 | }; | |
15310 | enum powersavings_balance_level { | |
15311 | POWERSAVINGS_BALANCE_NONE = 0, | |
15312 | POWERSAVINGS_BALANCE_BASIC, | |
15313 | POWERSAVINGS_BALANCE_WAKEUP, | |
15314 | MAX_POWERSAVINGS_BALANCE_LEVELS | |
15315 | }; | |
15316 | extern int sched_mc_power_savings, sched_smt_power_savings; | |
15317 | static inline __attribute__((always_inline)) int sd_balance_for_mc_power(void) | |
15318 | { | |
15319 | if (__builtin_constant_p(((sched_smt_power_savings))) ? !!((sched_smt_power_savings)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 864, }; ______r = !!((sched_smt_power_savings)); ______f.miss_hit[______r]++; ______r; })) | |
15320 | return 0x0100; | |
15321 | if (__builtin_constant_p(((!sched_mc_power_savings))) ? !!((!sched_mc_power_savings)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 867, }; ______r = !!((!sched_mc_power_savings)); ______f.miss_hit[______r]++; ______r; })) | |
15322 | return 0x1000; | |
15323 | return 0; | |
15324 | } | |
15325 | static inline __attribute__((always_inline)) int sd_balance_for_package_power(void) | |
15326 | { | |
15327 | if (__builtin_constant_p(((sched_mc_power_savings | sched_smt_power_savings))) ? !!((sched_mc_power_savings | sched_smt_power_savings)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 875, }; ______r = !!((sched_mc_power_savings | sched_smt_power_savings)); ______f.miss_hit[______r]++; ______r; })) | |
15328 | return 0x0100; | |
15329 | return 0x1000; | |
15330 | } | |
15331 | extern int __attribute__((weak)) arch_sd_sibiling_asym_packing(void); | |
15332 | static inline __attribute__((always_inline)) int sd_power_saving_flags(void) | |
15333 | { | |
15334 | if (__builtin_constant_p(((sched_mc_power_savings | sched_smt_power_savings))) ? !!((sched_mc_power_savings | sched_smt_power_savings)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 891, }; ______r = !!((sched_mc_power_savings | sched_smt_power_savings)); ______f.miss_hit[______r]++; ______r; })) | |
15335 | return 0x0002; | |
15336 | return 0; | |
15337 | } | |
15338 | struct sched_group_power { | |
15339 | atomic_t ref; | |
15340 | unsigned int power, power_orig; | |
15341 | }; | |
15342 | struct sched_group { | |
15343 | struct sched_group *next; | |
15344 | atomic_t ref; | |
15345 | unsigned int group_weight; | |
15346 | struct sched_group_power *sgp; | |
15347 | unsigned long cpumask[0]; | |
15348 | }; | |
15349 | static inline __attribute__((always_inline)) struct cpumask *sched_group_cpus(struct sched_group *sg) | |
15350 | { | |
15351 | return ((struct cpumask *)(1 ? (sg->cpumask) : (void *)sizeof(__check_is_bitmap(sg->cpumask)))); | |
15352 | } | |
15353 | struct sched_domain_attr { | |
15354 | int relax_domain_level; | |
15355 | }; | |
15356 | extern int sched_domain_level_max; | |
15357 | struct sched_domain { | |
15358 | struct sched_domain *parent; | |
15359 | struct sched_domain *child; | |
15360 | struct sched_group *groups; | |
15361 | unsigned long min_interval; | |
15362 | unsigned long max_interval; | |
15363 | unsigned int busy_factor; | |
15364 | unsigned int imbalance_pct; | |
15365 | unsigned int cache_nice_tries; | |
15366 | unsigned int busy_idx; | |
15367 | unsigned int idle_idx; | |
15368 | unsigned int newidle_idx; | |
15369 | unsigned int wake_idx; | |
15370 | unsigned int forkexec_idx; | |
15371 | unsigned int smt_gain; | |
15372 | int flags; | |
15373 | int level; | |
15374 | unsigned long last_balance; | |
15375 | unsigned int balance_interval; | |
15376 | unsigned int nr_balance_failed; | |
15377 | u64 last_update; | |
15378 | unsigned int lb_count[CPU_MAX_IDLE_TYPES]; | |
15379 | unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; | |
15380 | unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; | |
15381 | unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; | |
15382 | unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; | |
15383 | unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; | |
15384 | unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; | |
15385 | unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; | |
15386 | unsigned int alb_count; | |
15387 | unsigned int alb_failed; | |
15388 | unsigned int alb_pushed; | |
15389 | unsigned int sbe_count; | |
15390 | unsigned int sbe_balanced; | |
15391 | unsigned int sbe_pushed; | |
15392 | unsigned int sbf_count; | |
15393 | unsigned int sbf_balanced; | |
15394 | unsigned int sbf_pushed; | |
15395 | unsigned int ttwu_wake_remote; | |
15396 | unsigned int ttwu_move_affine; | |
15397 | unsigned int ttwu_move_balance; | |
15398 | char *name; | |
15399 | union { | |
15400 | void *private; | |
15401 | struct rcu_head rcu; | |
15402 | }; | |
15403 | unsigned int span_weight; | |
15404 | unsigned long span[0]; | |
15405 | }; | |
15406 | static inline __attribute__((always_inline)) struct cpumask *sched_domain_span(struct sched_domain *sd) | |
15407 | { | |
15408 | return ((struct cpumask *)(1 ? (sd->span) : (void *)sizeof(__check_is_bitmap(sd->span)))); | |
15409 | } | |
15410 | extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], | |
15411 | struct sched_domain_attr *dattr_new); | |
15412 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms); | |
15413 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); | |
15414 | static inline __attribute__((always_inline)) int test_sd_parent(struct sched_domain *sd, int flag) | |
15415 | { | |
15416 | if (__builtin_constant_p(((sd->parent && (sd->parent->flags & flag)))) ? !!((sd->parent && (sd->parent->flags & flag))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 1029, }; ______r = !!((sd->parent && (sd->parent->flags & flag))); ______f.miss_hit[______r]++; ______r; })) | |
15417 | return 1; | |
15418 | return 0; | |
15419 | } | |
15420 | unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu); | |
15421 | unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); | |
15422 | struct io_context; | |
15423 | static inline __attribute__((always_inline)) void prefetch_stack(struct task_struct *t) { } | |
15424 | struct audit_context; | |
15425 | struct mempolicy; | |
15426 | struct pipe_inode_info; | |
15427 | struct uts_namespace; | |
15428 | struct rq; | |
15429 | struct sched_domain; | |
15430 | struct sched_class { | |
15431 | const struct sched_class *next; | |
15432 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); | |
15433 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); | |
15434 | void (*yield_task) (struct rq *rq); | |
15435 | bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); | |
15436 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); | |
15437 | struct task_struct * (*pick_next_task) (struct rq *rq); | |
15438 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | |
15439 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); | |
15440 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | |
15441 | void (*post_schedule) (struct rq *this_rq); | |
15442 | void (*task_waking) (struct task_struct *task); | |
15443 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); | |
15444 | void (*set_cpus_allowed)(struct task_struct *p, | |
15445 | const struct cpumask *newmask); | |
15446 | void (*rq_online)(struct rq *rq); | |
15447 | void (*rq_offline)(struct rq *rq); | |
15448 | void (*set_curr_task) (struct rq *rq); | |
15449 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); | |
15450 | void (*task_fork) (struct task_struct *p); | |
15451 | void (*switched_from) (struct rq *this_rq, struct task_struct *task); | |
15452 | void (*switched_to) (struct rq *this_rq, struct task_struct *task); | |
15453 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, | |
15454 | int oldprio); | |
15455 | unsigned int (*get_rr_interval) (struct rq *rq, | |
15456 | struct task_struct *task); | |
15457 | void (*task_move_group) (struct task_struct *p, int on_rq); | |
15458 | }; | |
15459 | struct load_weight { | |
15460 | unsigned long weight, inv_weight; | |
15461 | }; | |
15462 | struct sched_statistics { | |
15463 | u64 wait_start; | |
15464 | u64 wait_max; | |
15465 | u64 wait_count; | |
15466 | u64 wait_sum; | |
15467 | u64 iowait_count; | |
15468 | u64 iowait_sum; | |
15469 | u64 sleep_start; | |
15470 | u64 sleep_max; | |
15471 | s64 sum_sleep_runtime; | |
15472 | u64 block_start; | |
15473 | u64 block_max; | |
15474 | u64 exec_max; | |
15475 | u64 slice_max; | |
15476 | u64 nr_migrations_cold; | |
15477 | u64 nr_failed_migrations_affine; | |
15478 | u64 nr_failed_migrations_running; | |
15479 | u64 nr_failed_migrations_hot; | |
15480 | u64 nr_forced_migrations; | |
15481 | u64 nr_wakeups; | |
15482 | u64 nr_wakeups_sync; | |
15483 | u64 nr_wakeups_migrate; | |
15484 | u64 nr_wakeups_local; | |
15485 | u64 nr_wakeups_remote; | |
15486 | u64 nr_wakeups_affine; | |
15487 | u64 nr_wakeups_affine_attempts; | |
15488 | u64 nr_wakeups_passive; | |
15489 | u64 nr_wakeups_idle; | |
15490 | }; | |
15491 | struct sched_entity { | |
15492 | struct load_weight load; | |
15493 | struct rb_node run_node; | |
15494 | struct list_head group_node; | |
15495 | unsigned int on_rq; | |
15496 | u64 exec_start; | |
15497 | u64 sum_exec_runtime; | |
15498 | u64 vruntime; | |
15499 | u64 prev_sum_exec_runtime; | |
15500 | u64 nr_migrations; | |
15501 | struct sched_statistics statistics; | |
15502 | struct sched_entity *parent; | |
15503 | struct cfs_rq *cfs_rq; | |
15504 | struct cfs_rq *my_q; | |
15505 | }; | |
15506 | struct sched_rt_entity { | |
15507 | struct list_head run_list; | |
15508 | unsigned long timeout; | |
15509 | unsigned int time_slice; | |
15510 | int nr_cpus_allowed; | |
15511 | struct sched_rt_entity *back; | |
15512 | }; | |
15513 | struct rcu_node; | |
15514 | enum perf_event_task_context { | |
15515 | perf_invalid_context = -1, | |
15516 | perf_hw_context = 0, | |
15517 | perf_sw_context, | |
15518 | perf_nr_task_contexts, | |
15519 | }; | |
15520 | struct task_struct { | |
15521 | volatile long state; | |
15522 | void *stack; | |
15523 | atomic_t usage; | |
15524 | unsigned int flags; | |
15525 | unsigned int ptrace; | |
15526 | struct task_struct *wake_entry; | |
15527 | int on_cpu; | |
15528 | int on_rq; | |
15529 | int prio, static_prio, normal_prio; | |
15530 | unsigned int rt_priority; | |
15531 | const struct sched_class *sched_class; | |
15532 | struct sched_entity se; | |
15533 | struct sched_rt_entity rt; | |
15534 | struct hlist_head preempt_notifiers; | |
15535 | unsigned char fpu_counter; | |
15536 | unsigned int btrace_seq; | |
15537 | unsigned int policy; | |
15538 | cpumask_t cpus_allowed; | |
15539 | int rcu_read_lock_nesting; | |
15540 | char rcu_read_unlock_special; | |
15541 | int rcu_boosted; | |
15542 | struct list_head rcu_node_entry; | |
15543 | struct rcu_node *rcu_blocked_node; | |
15544 | struct rt_mutex *rcu_boost_mutex; | |
15545 | struct sched_info sched_info; | |
15546 | struct list_head tasks; | |
15547 | struct plist_node pushable_tasks; | |
15548 | struct mm_struct *mm, *active_mm; | |
15549 | int exit_state; | |
15550 | int exit_code, exit_signal; | |
15551 | int pdeath_signal; | |
15552 | unsigned int group_stop; | |
15553 | unsigned int personality; | |
15554 | unsigned did_exec:1; | |
15555 | unsigned in_execve:1; | |
15556 | unsigned in_iowait:1; | |
15557 | unsigned sched_reset_on_fork:1; | |
15558 | unsigned sched_contributes_to_load:1; | |
15559 | pid_t pid; | |
15560 | pid_t tgid; | |
15561 | unsigned long stack_canary; | |
15562 | struct task_struct *real_parent; | |
15563 | struct task_struct *parent; | |
15564 | struct list_head children; | |
15565 | struct list_head sibling; | |
15566 | struct task_struct *group_leader; | |
15567 | struct list_head ptraced; | |
15568 | struct list_head ptrace_entry; | |
15569 | struct pid_link pids[PIDTYPE_MAX]; | |
15570 | struct list_head thread_group; | |
15571 | struct completion *vfork_done; | |
15572 | int *set_child_tid; | |
15573 | int *clear_child_tid; | |
15574 | cputime_t utime, stime, utimescaled, stimescaled; | |
15575 | cputime_t gtime; | |
15576 | cputime_t prev_utime, prev_stime; | |
15577 | unsigned long nvcsw, nivcsw; | |
15578 | struct timespec start_time; | |
15579 | struct timespec real_start_time; | |
15580 | unsigned long min_flt, maj_flt; | |
15581 | struct task_cputime cputime_expires; | |
15582 | struct list_head cpu_timers[3]; | |
15583 | const struct cred *real_cred; | |
15584 | const struct cred *cred; | |
15585 | struct cred *replacement_session_keyring; | |
15586 | char comm[16]; | |
15587 | int link_count, total_link_count; | |
15588 | struct sysv_sem sysvsem; | |
15589 | unsigned long last_switch_count; | |
15590 | struct thread_struct thread; | |
15591 | struct fs_struct *fs; | |
15592 | struct files_struct *files; | |
15593 | struct nsproxy *nsproxy; | |
15594 | struct signal_struct *signal; | |
15595 | struct sighand_struct *sighand; | |
15596 | sigset_t blocked, real_blocked; | |
15597 | sigset_t saved_sigmask; | |
15598 | struct sigpending pending; | |
15599 | unsigned long sas_ss_sp; | |
15600 | size_t sas_ss_size; | |
15601 | int (*notifier)(void *priv); | |
15602 | void *notifier_data; | |
15603 | sigset_t *notifier_mask; | |
15604 | struct audit_context *audit_context; | |
15605 | uid_t loginuid; | |
15606 | unsigned int sessionid; | |
15607 | seccomp_t seccomp; | |
15608 | u32 parent_exec_id; | |
15609 | u32 self_exec_id; | |
15610 | spinlock_t alloc_lock; | |
15611 | struct irqaction *irqaction; | |
15612 | raw_spinlock_t pi_lock; | |
15613 | struct plist_head pi_waiters; | |
15614 | struct rt_mutex_waiter *pi_blocked_on; | |
15615 | struct mutex_waiter *blocked_on; | |
15616 | unsigned int irq_events; | |
15617 | unsigned long hardirq_enable_ip; | |
15618 | unsigned long hardirq_disable_ip; | |
15619 | unsigned int hardirq_enable_event; | |
15620 | unsigned int hardirq_disable_event; | |
15621 | int hardirqs_enabled; | |
15622 | int hardirq_context; | |
15623 | unsigned long softirq_disable_ip; | |
15624 | unsigned long softirq_enable_ip; | |
15625 | unsigned int softirq_disable_event; | |
15626 | unsigned int softirq_enable_event; | |
15627 | int softirqs_enabled; | |
15628 | int softirq_context; | |
15629 | u64 curr_chain_key; | |
15630 | int lockdep_depth; | |
15631 | unsigned int lockdep_recursion; | |
15632 | struct held_lock held_locks[48UL]; | |
15633 | gfp_t lockdep_reclaim_gfp; | |
15634 | void *journal_info; | |
15635 | struct bio_list *bio_list; | |
15636 | struct blk_plug *plug; | |
15637 | struct reclaim_state *reclaim_state; | |
15638 | struct backing_dev_info *backing_dev_info; | |
15639 | struct io_context *io_context; | |
15640 | unsigned long ptrace_message; | |
15641 | siginfo_t *last_siginfo; | |
15642 | struct task_io_accounting ioac; | |
15643 | u64 acct_rss_mem1; | |
15644 | u64 acct_vm_mem1; | |
15645 | cputime_t acct_timexpd; | |
15646 | struct css_set *cgroups; | |
15647 | struct list_head cg_list; | |
15648 | struct robust_list_head *robust_list; | |
15649 | struct list_head pi_state_list; | |
15650 | struct futex_pi_state *pi_state_cache; | |
15651 | struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; | |
15652 | struct mutex perf_event_mutex; | |
15653 | struct list_head perf_event_list; | |
15654 | atomic_t fs_excl; | |
15655 | struct rcu_head rcu; | |
15656 | struct pipe_inode_info *splice_pipe; | |
15657 | struct task_delay_info *delays; | |
15658 | struct prop_local_single dirties; | |
15659 | int latency_record_count; | |
15660 | struct latency_record latency_record[32]; | |
15661 | unsigned long timer_slack_ns; | |
15662 | unsigned long default_timer_slack_ns; | |
15663 | struct list_head *scm_work_list; | |
15664 | int curr_ret_stack; | |
15665 | struct ftrace_ret_stack *ret_stack; | |
15666 | unsigned long long ftrace_timestamp; | |
15667 | atomic_t trace_overrun; | |
15668 | atomic_t tracing_graph_pause; | |
15669 | unsigned long trace; | |
15670 | unsigned long trace_recursion; | |
15671 | atomic_t ptrace_bp_refcnt; | |
15672 | }; | |
15673 | static inline __attribute__((always_inline)) int rt_prio(int prio) | |
15674 | { | |
15675 | if (__builtin_constant_p((((__builtin_constant_p(prio < 100) ? !!(prio < 100) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 1599, }; ______r = __builtin_expect(!!(prio < 100), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(prio < 100) ? !!(prio < 100) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 1599, }; ______r = __builtin_expect(!!(prio < 100), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 1599, }; ______r = !!(((__builtin_constant_p(prio < 100) ? !!(prio < 100) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 1599, }; ______r = __builtin_expect(!!(prio < 100), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
15676 | return 1; | |
15677 | return 0; | |
15678 | } | |
15679 | static inline __attribute__((always_inline)) int rt_task(struct task_struct *p) | |
15680 | { | |
15681 | return rt_prio(p->prio); | |
15682 | } | |
15683 | static inline __attribute__((always_inline)) struct pid *task_pid(struct task_struct *task) | |
15684 | { | |
15685 | return task->pids[PIDTYPE_PID].pid; | |
15686 | } | |
15687 | static inline __attribute__((always_inline)) struct pid *task_tgid(struct task_struct *task) | |
15688 | { | |
15689 | return task->group_leader->pids[PIDTYPE_PID].pid; | |
15690 | } | |
15691 | static inline __attribute__((always_inline)) struct pid *task_pgrp(struct task_struct *task) | |
15692 | { | |
15693 | return task->group_leader->pids[PIDTYPE_PGID].pid; | |
15694 | } | |
15695 | static inline __attribute__((always_inline)) struct pid *task_session(struct task_struct *task) | |
15696 | { | |
15697 | return task->group_leader->pids[PIDTYPE_SID].pid; | |
15698 | } | |
15699 | struct pid_namespace; | |
15700 | pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, | |
15701 | struct pid_namespace *ns); | |
15702 | static inline __attribute__((always_inline)) pid_t task_pid_nr(struct task_struct *tsk) | |
15703 | { | |
15704 | return tsk->pid; | |
15705 | } | |
15706 | static inline __attribute__((always_inline)) pid_t task_pid_nr_ns(struct task_struct *tsk, | |
15707 | struct pid_namespace *ns) | |
15708 | { | |
15709 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); | |
15710 | } | |
15711 | static inline __attribute__((always_inline)) pid_t task_pid_vnr(struct task_struct *tsk) | |
15712 | { | |
15713 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, ((void *)0)); | |
15714 | } | |
15715 | static inline __attribute__((always_inline)) pid_t task_tgid_nr(struct task_struct *tsk) | |
15716 | { | |
15717 | return tsk->tgid; | |
15718 | } | |
15719 | pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); | |
15720 | static inline __attribute__((always_inline)) pid_t task_tgid_vnr(struct task_struct *tsk) | |
15721 | { | |
15722 | return pid_vnr(task_tgid(tsk)); | |
15723 | } | |
15724 | static inline __attribute__((always_inline)) pid_t task_pgrp_nr_ns(struct task_struct *tsk, | |
15725 | struct pid_namespace *ns) | |
15726 | { | |
15727 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); | |
15728 | } | |
15729 | static inline __attribute__((always_inline)) pid_t task_pgrp_vnr(struct task_struct *tsk) | |
15730 | { | |
15731 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ((void *)0)); | |
15732 | } | |
15733 | static inline __attribute__((always_inline)) pid_t task_session_nr_ns(struct task_struct *tsk, | |
15734 | struct pid_namespace *ns) | |
15735 | { | |
15736 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); | |
15737 | } | |
15738 | static inline __attribute__((always_inline)) pid_t task_session_vnr(struct task_struct *tsk) | |
15739 | { | |
15740 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, ((void *)0)); | |
15741 | } | |
15742 | static inline __attribute__((always_inline)) pid_t task_pgrp_nr(struct task_struct *tsk) | |
15743 | { | |
15744 | return task_pgrp_nr_ns(tsk, &init_pid_ns); | |
15745 | } | |
15746 | static inline __attribute__((always_inline)) int pid_alive(struct task_struct *p) | |
15747 | { | |
15748 | return p->pids[PIDTYPE_PID].pid != ((void *)0); | |
15749 | } | |
15750 | static inline __attribute__((always_inline)) int is_global_init(struct task_struct *tsk) | |
15751 | { | |
15752 | return tsk->pid == 1; | |
15753 | } | |
15754 | extern int is_container_init(struct task_struct *tsk); | |
15755 | extern struct pid *cad_pid; | |
15756 | extern void free_task(struct task_struct *tsk); | |
15757 | extern void __put_task_struct(struct task_struct *t); | |
15758 | static inline __attribute__((always_inline)) void put_task_struct(struct task_struct *t) | |
15759 | { | |
15760 | if (__builtin_constant_p(((atomic_dec_and_test(&t->usage)))) ? !!((atomic_dec_and_test(&t->usage))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 1750, }; ______r = !!((atomic_dec_and_test(&t->usage))); ______f.miss_hit[______r]++; ______r; })) | |
15761 | __put_task_struct(t); | |
15762 | } | |
15763 | extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st); | |
15764 | extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st); | |
15765 | extern void task_clear_group_stop_pending(struct task_struct *task); | |
15766 | static inline __attribute__((always_inline)) void rcu_copy_process(struct task_struct *p) | |
15767 | { | |
15768 | p->rcu_read_lock_nesting = 0; | |
15769 | p->rcu_read_unlock_special = 0; | |
15770 | p->rcu_blocked_node = ((void *)0); | |
15771 | p->rcu_boost_mutex = ((void *)0); | |
15772 | INIT_LIST_HEAD(&p->rcu_node_entry); | |
15773 | } | |
15774 | extern void do_set_cpus_allowed(struct task_struct *p, | |
15775 | const struct cpumask *new_mask); | |
15776 | extern int set_cpus_allowed_ptr(struct task_struct *p, | |
15777 | const struct cpumask *new_mask); | |
15778 | static inline __attribute__((always_inline)) int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |
15779 | { | |
15780 | return set_cpus_allowed_ptr(p, &new_mask); | |
15781 | } | |
15782 | extern unsigned long long __attribute__((no_instrument_function)) sched_clock(void); | |
15783 | extern u64 cpu_clock(int cpu); | |
15784 | extern u64 local_clock(void); | |
15785 | extern u64 sched_clock_cpu(int cpu); | |
15786 | extern void sched_clock_init(void); | |
15787 | extern int sched_clock_stable; | |
15788 | extern void sched_clock_tick(void); | |
15789 | extern void sched_clock_idle_sleep_event(void); | |
15790 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | |
15791 | extern void enable_sched_clock_irqtime(void); | |
15792 | extern void disable_sched_clock_irqtime(void); | |
15793 | extern unsigned long long | |
15794 | task_sched_runtime(struct task_struct *task); | |
15795 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); | |
15796 | extern void sched_exec(void); | |
15797 | extern void sched_clock_idle_sleep_event(void); | |
15798 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | |
15799 | extern void idle_task_exit(void); | |
15800 | static inline __attribute__((always_inline)) void wake_up_idle_cpu(int cpu) { } | |
15801 | extern unsigned int sysctl_sched_latency; | |
15802 | extern unsigned int sysctl_sched_min_granularity; | |
15803 | extern unsigned int sysctl_sched_wakeup_granularity; | |
15804 | extern unsigned int sysctl_sched_child_runs_first; | |
15805 | enum sched_tunable_scaling { | |
15806 | SCHED_TUNABLESCALING_NONE, | |
15807 | SCHED_TUNABLESCALING_LOG, | |
15808 | SCHED_TUNABLESCALING_LINEAR, | |
15809 | SCHED_TUNABLESCALING_END, | |
15810 | }; | |
15811 | extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; | |
15812 | extern unsigned int sysctl_sched_migration_cost; | |
15813 | extern unsigned int sysctl_sched_nr_migrate; | |
15814 | extern unsigned int sysctl_sched_time_avg; | |
15815 | extern unsigned int sysctl_timer_migration; | |
15816 | extern unsigned int sysctl_sched_shares_window; | |
15817 | int sched_proc_update_handler(struct ctl_table *table, int write, | |
15818 | void *buffer, size_t *length, | |
15819 | loff_t *ppos); | |
15820 | static inline __attribute__((always_inline)) unsigned int get_sysctl_timer_migration(void) | |
15821 | { | |
15822 | return sysctl_timer_migration; | |
15823 | } | |
15824 | extern unsigned int sysctl_sched_rt_period; | |
15825 | extern int sysctl_sched_rt_runtime; | |
15826 | int sched_rt_handler(struct ctl_table *table, int write, | |
15827 | void *buffer, size_t *lenp, | |
15828 | loff_t *ppos); | |
15829 | extern unsigned int sysctl_sched_autogroup_enabled; | |
15830 | extern void sched_autogroup_create_attach(struct task_struct *p); | |
15831 | extern void sched_autogroup_detach(struct task_struct *p); | |
15832 | extern void sched_autogroup_fork(struct signal_struct *sig); | |
15833 | extern void sched_autogroup_exit(struct signal_struct *sig); | |
15834 | extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); | |
15835 | extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice); | |
15836 | extern int rt_mutex_getprio(struct task_struct *p); | |
15837 | extern void rt_mutex_setprio(struct task_struct *p, int prio); | |
15838 | extern void rt_mutex_adjust_pi(struct task_struct *p); | |
15839 | extern bool yield_to(struct task_struct *p, bool preempt); | |
15840 | extern void set_user_nice(struct task_struct *p, long nice); | |
15841 | extern int task_prio(const struct task_struct *p); | |
15842 | extern int task_nice(const struct task_struct *p); | |
15843 | extern int can_nice(const struct task_struct *p, const int nice); | |
15844 | extern int task_curr(const struct task_struct *p); | |
15845 | extern int idle_cpu(int cpu); | |
15846 | extern int sched_setscheduler(struct task_struct *, int, | |
15847 | const struct sched_param *); | |
15848 | extern int sched_setscheduler_nocheck(struct task_struct *, int, | |
15849 | const struct sched_param *); | |
15850 | extern struct task_struct *idle_task(int cpu); | |
15851 | extern struct task_struct *curr_task(int cpu); | |
15852 | extern void set_curr_task(int cpu, struct task_struct *p); | |
15853 | void yield(void); | |
15854 | extern struct exec_domain default_exec_domain; | |
15855 | union thread_union { | |
15856 | struct thread_info thread_info; | |
15857 | unsigned long stack[(((1UL) << 12) << 1)/sizeof(long)]; | |
15858 | }; | |
15859 | static inline __attribute__((always_inline)) int kstack_end(void *addr) | |
15860 | { | |
15861 | return !(((unsigned long)addr+sizeof(void*)-1) & ((((1UL) << 12) << 1)-sizeof(void*))); | |
15862 | } | |
15863 | extern union thread_union init_thread_union; | |
15864 | extern struct task_struct init_task; | |
15865 | extern struct mm_struct init_mm; | |
15866 | extern struct pid_namespace init_pid_ns; | |
15867 | extern struct task_struct *find_task_by_vpid(pid_t nr); | |
15868 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, | |
15869 | struct pid_namespace *ns); | |
15870 | extern void __set_special_pids(struct pid *pid); | |
15871 | extern struct user_struct * alloc_uid(struct user_namespace *, uid_t); | |
15872 | static inline __attribute__((always_inline)) struct user_struct *get_uid(struct user_struct *u) | |
15873 | { | |
15874 | atomic_inc(&u->__count); | |
15875 | return u; | |
15876 | } | |
15877 | extern void free_uid(struct user_struct *); | |
15878 | extern void release_uids(struct user_namespace *ns); | |
15879 | extern void xtime_update(unsigned long ticks); | |
15880 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); | |
15881 | extern int wake_up_process(struct task_struct *tsk); | |
15882 | extern void wake_up_new_task(struct task_struct *tsk); | |
15883 | extern void kick_process(struct task_struct *tsk); | |
15884 | extern void sched_fork(struct task_struct *p); | |
15885 | extern void sched_dead(struct task_struct *p); | |
15886 | extern void proc_caches_init(void); | |
15887 | extern void flush_signals(struct task_struct *); | |
15888 | extern void __flush_signals(struct task_struct *); | |
15889 | extern void ignore_signals(struct task_struct *); | |
15890 | extern void flush_signal_handlers(struct task_struct *, int force_default); | |
15891 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); | |
15892 | static inline __attribute__((always_inline)) int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |
15893 | { | |
15894 | unsigned long flags; | |
15895 | int ret; | |
15896 | do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&tsk->sighand->siglock)); } while (0); } while (0); | |
15897 | ret = dequeue_signal(tsk, mask, info); | |
15898 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); | |
15899 | return ret; | |
15900 | } | |
15901 | extern void block_all_signals(int (*notifier)(void *priv), void *priv, | |
15902 | sigset_t *mask); | |
15903 | extern void unblock_all_signals(void); | |
15904 | extern void release_task(struct task_struct * p); | |
15905 | extern int send_sig_info(int, struct siginfo *, struct task_struct *); | |
15906 | extern int force_sigsegv(int, struct task_struct *); | |
15907 | extern int force_sig_info(int, struct siginfo *, struct task_struct *); | |
15908 | extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); | |
15909 | extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); | |
15910 | extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32); | |
15911 | extern int kill_pgrp(struct pid *pid, int sig, int priv); | |
15912 | extern int kill_pid(struct pid *pid, int sig, int priv); | |
15913 | extern int kill_proc_info(int, struct siginfo *, pid_t); | |
15914 | extern int do_notify_parent(struct task_struct *, int); | |
15915 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); | |
15916 | extern void force_sig(int, struct task_struct *); | |
15917 | extern int send_sig(int, struct task_struct *, int); | |
15918 | extern int zap_other_threads(struct task_struct *p); | |
15919 | extern struct sigqueue *sigqueue_alloc(void); | |
15920 | extern void sigqueue_free(struct sigqueue *); | |
15921 | extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); | |
15922 | extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); | |
15923 | extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long); | |
15924 | static inline __attribute__((always_inline)) int kill_cad_pid(int sig, int priv) | |
15925 | { | |
15926 | return kill_pid(cad_pid, sig, priv); | |
15927 | } | |
15928 | static inline __attribute__((always_inline)) int on_sig_stack(unsigned long sp) | |
15929 | { | |
15930 | return sp > get_current()->sas_ss_sp && | |
15931 | sp - get_current()->sas_ss_sp <= get_current()->sas_ss_size; | |
15932 | } | |
15933 | static inline __attribute__((always_inline)) int sas_ss_flags(unsigned long sp) | |
15934 | { | |
15935 | return (get_current()->sas_ss_size == 0 ? 2 | |
15936 | : on_sig_stack(sp) ? 1 : 0); | |
15937 | } | |
15938 | extern struct mm_struct * mm_alloc(void); | |
15939 | extern void __mmdrop(struct mm_struct *); | |
15940 | static inline __attribute__((always_inline)) void mmdrop(struct mm_struct * mm) | |
15941 | { | |
15942 | if (__builtin_constant_p((((__builtin_constant_p(atomic_dec_and_test(&mm->mm_count)) ? !!(atomic_dec_and_test(&mm->mm_count)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2204, }; ______r = __builtin_expect(!!(atomic_dec_and_test(&mm->mm_count)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(atomic_dec_and_test(&mm->mm_count)) ? !!(atomic_dec_and_test(&mm->mm_count)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2204, }; ______r = __builtin_expect(!!(atomic_dec_and_test(&mm->mm_count)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2204, }; ______r = !!(((__builtin_constant_p(atomic_dec_and_test(&mm->mm_count)) ? !!(atomic_dec_and_test(&mm->mm_count)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2204, }; ______r = __builtin_expect(!!(atomic_dec_and_test(&mm->mm_count)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
15943 | __mmdrop(mm); | |
15944 | } | |
15945 | extern void mmput(struct mm_struct *); | |
15946 | extern struct mm_struct *get_task_mm(struct task_struct *task); | |
15947 | extern void mm_release(struct task_struct *, struct mm_struct *); | |
15948 | extern struct mm_struct *dup_mm(struct task_struct *tsk); | |
15949 | extern int copy_thread(unsigned long, unsigned long, unsigned long, | |
15950 | struct task_struct *, struct pt_regs *); | |
15951 | extern void flush_thread(void); | |
15952 | extern void exit_thread(void); | |
15953 | extern void exit_files(struct task_struct *); | |
15954 | extern void __cleanup_sighand(struct sighand_struct *); | |
15955 | extern void exit_itimers(struct signal_struct *); | |
15956 | extern void flush_itimer_signals(void); | |
15957 | extern void do_group_exit(int); | |
15958 | extern void daemonize(const char *, ...); | |
15959 | extern int allow_signal(int); | |
15960 | extern int disallow_signal(int); | |
15961 | extern int do_execve(const char *, | |
15962 | const char * const *, | |
15963 | const char * const *, struct pt_regs *); | |
15964 | extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int *, int *); | |
15965 | struct task_struct *fork_idle(int); | |
15966 | extern void set_task_comm(struct task_struct *tsk, char *from); | |
15967 | extern char *get_task_comm(char *to, struct task_struct *tsk); | |
15968 | void scheduler_ipi(void); | |
15969 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); | |
15970 | extern bool current_is_single_threaded(void); | |
15971 | static inline __attribute__((always_inline)) int get_nr_threads(struct task_struct *tsk) | |
15972 | { | |
15973 | return tsk->signal->nr_threads; | |
15974 | } | |
15975 | static inline __attribute__((always_inline)) int has_group_leader_pid(struct task_struct *p) | |
15976 | { | |
15977 | return p->pid == p->tgid; | |
15978 | } | |
15979 | static inline __attribute__((always_inline)) | |
15980 | int same_thread_group(struct task_struct *p1, struct task_struct *p2) | |
15981 | { | |
15982 | return p1->tgid == p2->tgid; | |
15983 | } | |
15984 | static inline __attribute__((always_inline)) struct task_struct *next_thread(const struct task_struct *p) | |
15985 | { | |
15986 | return ({typeof (*p->thread_group.next) *__ptr = (typeof (*p->thread_group.next) *)p->thread_group.next; ({ const typeof( ((struct task_struct *)0)->thread_group ) *__mptr = ((typeof(p->thread_group.next))({ typeof(*(__ptr)) *_________p1 = (typeof(*(__ptr))* )(*(volatile typeof((__ptr)) *)&((__ptr))); do { } while (0); ; do { } while (0); ((typeof(*(__ptr)) *)(_________p1)); })); (struct task_struct *)( (char *)__mptr - __builtin_offsetof(struct task_struct,thread_group) );}); }) | |
15987 | ; | |
15988 | } | |
15989 | static inline __attribute__((always_inline)) int thread_group_empty(struct task_struct *p) | |
15990 | { | |
15991 | return list_empty(&p->thread_group); | |
15992 | } | |
15993 | static inline __attribute__((always_inline)) int task_detached(struct task_struct *p) | |
15994 | { | |
15995 | return p->exit_signal == -1; | |
15996 | } | |
15997 | static inline __attribute__((always_inline)) void task_lock(struct task_struct *p) | |
15998 | { | |
15999 | spin_lock(&p->alloc_lock); | |
16000 | } | |
16001 | static inline __attribute__((always_inline)) void task_unlock(struct task_struct *p) | |
16002 | { | |
16003 | spin_unlock(&p->alloc_lock); | |
16004 | } | |
16005 | extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, | |
16006 | unsigned long *flags); | |
16007 | static inline __attribute__((always_inline)) void unlock_task_sighand(struct task_struct *tsk, | |
16008 | unsigned long *flags) | |
16009 | { | |
16010 | spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); | |
16011 | } | |
16012 | static inline __attribute__((always_inline)) void threadgroup_fork_read_lock(struct task_struct *tsk) | |
16013 | { | |
16014 | down_read(&tsk->signal->threadgroup_fork_lock); | |
16015 | } | |
16016 | static inline __attribute__((always_inline)) void threadgroup_fork_read_unlock(struct task_struct *tsk) | |
16017 | { | |
16018 | up_read(&tsk->signal->threadgroup_fork_lock); | |
16019 | } | |
16020 | static inline __attribute__((always_inline)) void threadgroup_fork_write_lock(struct task_struct *tsk) | |
16021 | { | |
16022 | down_write(&tsk->signal->threadgroup_fork_lock); | |
16023 | } | |
16024 | static inline __attribute__((always_inline)) void threadgroup_fork_write_unlock(struct task_struct *tsk) | |
16025 | { | |
16026 | up_write(&tsk->signal->threadgroup_fork_lock); | |
16027 | } | |
16028 | static inline __attribute__((always_inline)) void setup_thread_stack(struct task_struct *p, struct task_struct *org) | |
16029 | { | |
16030 | *((struct thread_info *)(p)->stack) = *((struct thread_info *)(org)->stack); | |
16031 | ((struct thread_info *)(p)->stack)->task = p; | |
16032 | } | |
16033 | static inline __attribute__((always_inline)) unsigned long *end_of_stack(struct task_struct *p) | |
16034 | { | |
16035 | return (unsigned long *)(((struct thread_info *)(p)->stack) + 1); | |
16036 | } | |
16037 | static inline __attribute__((always_inline)) int object_is_on_stack(void *obj) | |
16038 | { | |
16039 | void *stack = ((get_current())->stack); | |
16040 | return (obj >= stack) && (obj < (stack + (((1UL) << 12) << 1))); | |
16041 | } | |
16042 | extern void thread_info_cache_init(void); | |
16043 | static inline __attribute__((always_inline)) void set_tsk_thread_flag(struct task_struct *tsk, int flag) | |
16044 | { | |
16045 | set_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag); | |
16046 | } | |
16047 | static inline __attribute__((always_inline)) void clear_tsk_thread_flag(struct task_struct *tsk, int flag) | |
16048 | { | |
16049 | clear_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag); | |
16050 | } | |
16051 | static inline __attribute__((always_inline)) int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) | |
16052 | { | |
16053 | return test_and_set_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag); | |
16054 | } | |
16055 | static inline __attribute__((always_inline)) int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) | |
16056 | { | |
16057 | return test_and_clear_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag); | |
16058 | } | |
16059 | static inline __attribute__((always_inline)) int test_tsk_thread_flag(struct task_struct *tsk, int flag) | |
16060 | { | |
16061 | return test_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag); | |
16062 | } | |
16063 | static inline __attribute__((always_inline)) void set_tsk_need_resched(struct task_struct *tsk) | |
16064 | { | |
16065 | set_tsk_thread_flag(tsk,3); | |
16066 | } | |
16067 | static inline __attribute__((always_inline)) void clear_tsk_need_resched(struct task_struct *tsk) | |
16068 | { | |
16069 | clear_tsk_thread_flag(tsk,3); | |
16070 | } | |
16071 | static inline __attribute__((always_inline)) int test_tsk_need_resched(struct task_struct *tsk) | |
16072 | { | |
16073 | return (__builtin_constant_p(test_tsk_thread_flag(tsk,3)) ? !!(test_tsk_thread_flag(tsk,3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2458, }; ______r = __builtin_expect(!!(test_tsk_thread_flag(tsk,3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); | |
16074 | } | |
16075 | static inline __attribute__((always_inline)) int restart_syscall(void) | |
16076 | { | |
16077 | set_tsk_thread_flag(get_current(), 2); | |
16078 | return -513; | |
16079 | } | |
16080 | static inline __attribute__((always_inline)) int signal_pending(struct task_struct *p) | |
16081 | { | |
16082 | return (__builtin_constant_p(test_tsk_thread_flag(p,2)) ? !!(test_tsk_thread_flag(p,2)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2469, }; ______r = __builtin_expect(!!(test_tsk_thread_flag(p,2)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); | |
16083 | } | |
16084 | static inline __attribute__((always_inline)) int __fatal_signal_pending(struct task_struct *p) | |
16085 | { | |
16086 | return (__builtin_constant_p((__builtin_constant_p(9) ? __const_sigismember((&p->pending.signal), (9)) : __gen_sigismember((&p->pending.signal), (9)))) ? !!((__builtin_constant_p(9) ? __const_sigismember((&p->pending.signal), (9)) : __gen_sigismember((&p->pending.signal), (9)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2474, }; ______r = __builtin_expect(!!((__builtin_constant_p(9) ? __const_sigismember((&p->pending.signal), (9)) : __gen_sigismember((&p->pending.signal), (9)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); | |
16087 | } | |
16088 | static inline __attribute__((always_inline)) int fatal_signal_pending(struct task_struct *p) | |
16089 | { | |
16090 | return signal_pending(p) && __fatal_signal_pending(p); | |
16091 | } | |
16092 | static inline __attribute__((always_inline)) int signal_pending_state(long state, struct task_struct *p) | |
16093 | { | |
16094 | if (__builtin_constant_p(((!(state & (1 | 128))))) ? !!((!(state & (1 | 128)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2484, }; ______r = !!((!(state & (1 | 128)))); ______f.miss_hit[______r]++; ______r; })) | |
16095 | return 0; | |
16096 | if (__builtin_constant_p(((!signal_pending(p)))) ? !!((!signal_pending(p))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2486, }; ______r = !!((!signal_pending(p))); ______f.miss_hit[______r]++; ______r; })) | |
16097 | return 0; | |
16098 | return (state & 1) || __fatal_signal_pending(p); | |
16099 | } | |
16100 | static inline __attribute__((always_inline)) int need_resched(void) | |
16101 | { | |
16102 | return (__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2494, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); | |
16103 | } | |
16104 | extern int _cond_resched(void); | |
16105 | extern int __cond_resched_lock(spinlock_t *lock); | |
16106 | extern int __cond_resched_softirq(void); | |
16107 | static inline __attribute__((always_inline)) int spin_needbreak(spinlock_t *lock) | |
16108 | { | |
16109 | return spin_is_contended(lock); | |
16110 | } | |
16111 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); | |
16112 | void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); | |
16113 | static inline __attribute__((always_inline)) void thread_group_cputime_init(struct signal_struct *sig) | |
16114 | { | |
16115 | do { spinlock_check(&sig->cputimer.lock); do { static struct lock_class_key __key; __raw_spin_lock_init((&(&sig->cputimer.lock)->rlock), "&(&sig->cputimer.lock)->rlock", &__key); } while (0); } while (0); | |
16116 | } | |
16117 | extern void recalc_sigpending_and_wake(struct task_struct *t); | |
16118 | extern void recalc_sigpending(void); | |
16119 | extern void signal_wake_up(struct task_struct *t, int resume_stopped); | |
16120 | static inline __attribute__((always_inline)) unsigned int task_cpu(const struct task_struct *p) | |
16121 | { | |
16122 | return ((struct thread_info *)(p)->stack)->cpu; | |
16123 | } | |
16124 | extern void set_task_cpu(struct task_struct *p, unsigned int cpu); | |
16125 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); | |
16126 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); | |
16127 | extern void normalize_rt_tasks(void); | |
16128 | extern struct task_group root_task_group; | |
16129 | extern struct task_group *sched_create_group(struct task_group *parent); | |
16130 | extern void sched_destroy_group(struct task_group *tg); | |
16131 | extern void sched_move_task(struct task_struct *tsk); | |
16132 | extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); | |
16133 | extern unsigned long sched_group_shares(struct task_group *tg); | |
16134 | extern int task_can_switch_user(struct user_struct *up, | |
16135 | struct task_struct *tsk); | |
16136 | static inline __attribute__((always_inline)) void add_rchar(struct task_struct *tsk, ssize_t amt) | |
16137 | { | |
16138 | tsk->ioac.rchar += amt; | |
16139 | } | |
16140 | static inline __attribute__((always_inline)) void add_wchar(struct task_struct *tsk, ssize_t amt) | |
16141 | { | |
16142 | tsk->ioac.wchar += amt; | |
16143 | } | |
16144 | static inline __attribute__((always_inline)) void inc_syscr(struct task_struct *tsk) | |
16145 | { | |
16146 | tsk->ioac.syscr++; | |
16147 | } | |
16148 | static inline __attribute__((always_inline)) void inc_syscw(struct task_struct *tsk) | |
16149 | { | |
16150 | tsk->ioac.syscw++; | |
16151 | } | |
16152 | static inline __attribute__((always_inline)) void mm_update_next_owner(struct mm_struct *mm) | |
16153 | { | |
16154 | } | |
16155 | static inline __attribute__((always_inline)) void mm_init_owner(struct mm_struct *mm, struct task_struct *p) | |
16156 | { | |
16157 | } | |
16158 | static inline __attribute__((always_inline)) unsigned long task_rlimit(const struct task_struct *tsk, | |
16159 | unsigned int limit) | |
16160 | { | |
16161 | return (*(volatile typeof(tsk->signal->rlim[limit].rlim_cur) *)&(tsk->signal->rlim[limit].rlim_cur)); | |
16162 | } | |
16163 | static inline __attribute__((always_inline)) unsigned long task_rlimit_max(const struct task_struct *tsk, | |
16164 | unsigned int limit) | |
16165 | { | |
16166 | return (*(volatile typeof(tsk->signal->rlim[limit].rlim_max) *)&(tsk->signal->rlim[limit].rlim_max)); | |
16167 | } | |
16168 | static inline __attribute__((always_inline)) unsigned long rlimit(unsigned int limit) | |
16169 | { | |
16170 | return task_rlimit(get_current(), limit); | |
16171 | } | |
16172 | static inline __attribute__((always_inline)) unsigned long rlimit_max(unsigned int limit) | |
16173 | { | |
16174 | return task_rlimit_max(get_current(), limit); | |
16175 | } | |
16176 | struct irqaction; | |
16177 | struct softirq_action; | |
16178 | extern struct tracepoint | |
16179 | __tracepoint_irq_handler_entry | |
16180 | ; static inline __attribute__((always_inline)) void | |
16181 | trace_irq_handler_entry | |
16182 | (int irq, struct irqaction *action) { if (__builtin_constant_p(((static_branch(&__tracepoint_irq_handler_entry.key)))) ? !!((static_branch(&__tracepoint_irq_handler_entry.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
16183 | "include/trace/events/irq.h" | |
16184 | , .line = | |
16185 | 54 | |
16186 | , }; ______r = !!((static_branch(&__tracepoint_irq_handler_entry.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
16187 | "include/trace/events/irq.h" | |
16188 | , .line = | |
16189 | 54 | |
16190 | , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_irq_handler_entry)->funcs)) *_________p1 = (typeof(*((&__tracepoint_irq_handler_entry)->funcs))* )(*(volatile typeof(((&__tracepoint_irq_handler_entry)->funcs)) *)&(((&__tracepoint_irq_handler_entry)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_irq_handler_entry)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
16191 | "include/trace/events/irq.h" | |
16192 | , .line = | |
16193 | 54 | |
16194 | , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, int irq, struct irqaction *action))(it_func))(__data, irq, action); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int | |
16195 | register_trace_irq_handler_entry | |
16196 | (void (*probe)(void *__data, int irq, struct irqaction *action), void *data) { return tracepoint_probe_register("irq_handler_entry", (void *)probe, data); } static inline __attribute__((always_inline)) int | |
16197 | unregister_trace_irq_handler_entry | |
16198 | (void (*probe)(void *__data, int irq, struct irqaction *action), void *data) { return tracepoint_probe_unregister("irq_handler_entry", (void *)probe, data); } static inline __attribute__((always_inline)) void | |
16199 | check_trace_callback_type_irq_handler_entry | |
16200 | (void (*cb)(void *__data, int irq, struct irqaction *action)) { } | |
16201 | ; | |
16202 | extern struct tracepoint | |
16203 | __tracepoint_irq_handler_exit | |
16204 | ; static inline __attribute__((always_inline)) void | |
16205 | trace_irq_handler_exit | |
16206 | (int irq, struct irqaction *action, int ret) { if (__builtin_constant_p(((static_branch(&__tracepoint_irq_handler_exit.key)))) ? !!((static_branch(&__tracepoint_irq_handler_exit.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
16207 | "include/trace/events/irq.h" | |
16208 | , .line = | |
16209 | 85 | |
16210 | , }; ______r = !!((static_branch(&__tracepoint_irq_handler_exit.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
16211 | "include/trace/events/irq.h" | |
16212 | , .line = | |
16213 | 85 | |
16214 | , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_irq_handler_exit)->funcs)) *_________p1 = (typeof(*((&__tracepoint_irq_handler_exit)->funcs))* )(*(volatile typeof(((&__tracepoint_irq_handler_exit)->funcs)) *)&(((&__tracepoint_irq_handler_exit)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_irq_handler_exit)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
16215 | "include/trace/events/irq.h" | |
16216 | , .line = | |
16217 | 85 | |
16218 | , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, int irq, struct irqaction *action, int ret))(it_func))(__data, irq, action, ret); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int | |
16219 | register_trace_irq_handler_exit | |
16220 | (void (*probe)(void *__data, int irq, struct irqaction *action, int ret), void *data) { return tracepoint_probe_register("irq_handler_exit", (void *)probe, data); } static inline __attribute__((always_inline)) int | |
16221 | unregister_trace_irq_handler_exit | |
16222 | (void (*probe)(void *__data, int irq, struct irqaction *action, int ret), void *data) { return tracepoint_probe_unregister("irq_handler_exit", (void *)probe, data); } static inline __attribute__((always_inline)) void | |
16223 | check_trace_callback_type_irq_handler_exit | |
16224 | (void (*cb)(void *__data, int irq, struct irqaction *action, int ret)) { } | |
16225 | ; | |
16226 | ; | |
16227 | extern struct tracepoint | |
16228 | __tracepoint_softirq_entry | |
16229 | ; static inline __attribute__((always_inline)) void | |
16230 | trace_softirq_entry | |
16231 | (unsigned int vec_nr) { if (__builtin_constant_p(((static_branch(&__tracepoint_softirq_entry.key)))) ? !!((static_branch(&__tracepoint_softirq_entry.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
16232 | "include/trace/events/irq.h" | |
16233 | , .line = | |
16234 | 117 | |
16235 | , }; ______r = !!((static_branch(&__tracepoint_softirq_entry.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
16236 | "include/trace/events/irq.h" | |
16237 | , .line = | |
16238 | 117 | |
16239 | , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_softirq_entry)->funcs)) *_________p1 = (typeof(*((&__tracepoint_softirq_entry)->funcs))* )(*(volatile typeof(((&__tracepoint_softirq_entry)->funcs)) *)&(((&__tracepoint_softirq_entry)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_softirq_entry)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
16240 | "include/trace/events/irq.h" | |
16241 | , .line = | |
16242 | 117 | |
16243 | , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, unsigned int vec_nr))(it_func))(__data, vec_nr); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int | |
16244 | register_trace_softirq_entry | |
16245 | (void (*probe)(void *__data, unsigned int vec_nr), void *data) { return tracepoint_probe_register("softirq_entry", (void *)probe, data); } static inline __attribute__((always_inline)) int | |
16246 | unregister_trace_softirq_entry | |
16247 | (void (*probe)(void *__data, unsigned int vec_nr), void *data) { return tracepoint_probe_unregister("softirq_entry", (void *)probe, data); } static inline __attribute__((always_inline)) void | |
16248 | check_trace_callback_type_softirq_entry | |
16249 | (void (*cb)(void *__data, unsigned int vec_nr)) { } | |
16250 | ; | |
16251 | extern struct tracepoint | |
16252 | __tracepoint_softirq_exit | |
16253 | ; static inline __attribute__((always_inline)) void | |
16254 | trace_softirq_exit | |
16255 | (unsigned int vec_nr) { if (__builtin_constant_p(((static_branch(&__tracepoint_softirq_exit.key)))) ? !!((static_branch(&__tracepoint_softirq_exit.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
16256 | "include/trace/events/irq.h" | |
16257 | , .line = | |
16258 | 131 | |
16259 | , }; ______r = !!((static_branch(&__tracepoint_softirq_exit.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
16260 | "include/trace/events/irq.h" | |
16261 | , .line = | |
16262 | 131 | |
16263 | , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_softirq_exit)->funcs)) *_________p1 = (typeof(*((&__tracepoint_softirq_exit)->funcs))* )(*(volatile typeof(((&__tracepoint_softirq_exit)->funcs)) *)&(((&__tracepoint_softirq_exit)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_softirq_exit)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
16264 | "include/trace/events/irq.h" | |
16265 | , .line = | |
16266 | 131 | |
16267 | , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, unsigned int vec_nr))(it_func))(__data, vec_nr); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int | |
16268 | register_trace_softirq_exit | |
16269 | (void (*probe)(void *__data, unsigned int vec_nr), void *data) { return tracepoint_probe_register("softirq_exit", (void *)probe, data); } static inline __attribute__((always_inline)) int | |
16270 | unregister_trace_softirq_exit | |
16271 | (void (*probe)(void *__data, unsigned int vec_nr), void *data) { return tracepoint_probe_unregister("softirq_exit", (void *)probe, data); } static inline __attribute__((always_inline)) void | |
16272 | check_trace_callback_type_softirq_exit | |
16273 | (void (*cb)(void *__data, unsigned int vec_nr)) { } | |
16274 | ; | |
16275 | extern struct tracepoint | |
16276 | __tracepoint_softirq_raise | |
16277 | ; static inline __attribute__((always_inline)) void | |
16278 | trace_softirq_raise | |
16279 | (unsigned int vec_nr) { if (__builtin_constant_p(((static_branch(&__tracepoint_softirq_raise.key)))) ? !!((static_branch(&__tracepoint_softirq_raise.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
16280 | "include/trace/events/irq.h" | |
16281 | , .line = | |
16282 | 145 | |
16283 | , }; ______r = !!((static_branch(&__tracepoint_softirq_raise.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
16284 | "include/trace/events/irq.h" | |
16285 | , .line = | |
16286 | 145 | |
16287 | , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_softirq_raise)->funcs)) *_________p1 = (typeof(*((&__tracepoint_softirq_raise)->funcs))* )(*(volatile typeof(((&__tracepoint_softirq_raise)->funcs)) *)&(((&__tracepoint_softirq_raise)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_softirq_raise)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
16288 | "include/trace/events/irq.h" | |
16289 | , .line = | |
16290 | 145 | |
16291 | , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, unsigned int vec_nr))(it_func))(__data, vec_nr); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int | |
16292 | register_trace_softirq_raise | |
16293 | (void (*probe)(void *__data, unsigned int vec_nr), void *data) { return tracepoint_probe_register("softirq_raise", (void *)probe, data); } static inline __attribute__((always_inline)) int | |
16294 | unregister_trace_softirq_raise | |
16295 | (void (*probe)(void *__data, unsigned int vec_nr), void *data) { return tracepoint_probe_unregister("softirq_raise", (void *)probe, data); } static inline __attribute__((always_inline)) void | |
16296 | check_trace_callback_type_softirq_raise | |
16297 | (void (*cb)(void *__data, unsigned int vec_nr)) { } | |
16298 | ; | |
16299 | enum { | |
16300 | IRQC_IS_HARDIRQ = 0, | |
16301 | IRQC_IS_NESTED, | |
16302 | }; | |
16303 | typedef irqreturn_t (*irq_handler_t)(int, void *); | |
16304 | struct irqaction { | |
16305 | irq_handler_t handler; | |
16306 | unsigned long flags; | |
16307 | void *dev_id; | |
16308 | struct irqaction *next; | |
16309 | int irq; | |
16310 | irq_handler_t thread_fn; | |
16311 | struct task_struct *thread; | |
16312 | unsigned long thread_flags; | |
16313 | unsigned long thread_mask; | |
16314 | const char *name; | |
16315 | struct proc_dir_entry *dir; | |
16316 | } __attribute__((__aligned__(1 << (6)))); | |
16317 | extern irqreturn_t no_action(int cpl, void *dev_id); | |
16318 | extern int __attribute__((warn_unused_result)) | |
16319 | request_threaded_irq(unsigned int irq, irq_handler_t handler, | |
16320 | irq_handler_t thread_fn, | |
16321 | unsigned long flags, const char *name, void *dev); | |
16322 | static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) | |
16323 | request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, | |
16324 | const char *name, void *dev) | |
16325 | { | |
16326 | return request_threaded_irq(irq, handler, ((void *)0), flags, name, dev); | |
16327 | } | |
16328 | extern int __attribute__((warn_unused_result)) | |
16329 | request_any_context_irq(unsigned int irq, irq_handler_t handler, | |
16330 | unsigned long flags, const char *name, void *dev_id); | |
16331 | extern void exit_irq_thread(void); | |
16332 | extern void free_irq(unsigned int, void *); | |
16333 | struct device; | |
16334 | extern int __attribute__((warn_unused_result)) | |
16335 | devm_request_threaded_irq(struct device *dev, unsigned int irq, | |
16336 | irq_handler_t handler, irq_handler_t thread_fn, | |
16337 | unsigned long irqflags, const char *devname, | |
16338 | void *dev_id); | |
16339 | static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) | |
16340 | devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, | |
16341 | unsigned long irqflags, const char *devname, void *dev_id) | |
16342 | { | |
16343 | return devm_request_threaded_irq(dev, irq, handler, ((void *)0), irqflags, | |
16344 | devname, dev_id); | |
16345 | } | |
16346 | extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); | |
16347 | extern void disable_irq_nosync(unsigned int irq); | |
16348 | extern void disable_irq(unsigned int irq); | |
16349 | extern void enable_irq(unsigned int irq); | |
16350 | extern void suspend_device_irqs(void); | |
16351 | extern void resume_device_irqs(void); | |
16352 | extern int check_wakeup_irqs(void); | |
16353 | extern cpumask_var_t irq_default_affinity; | |
16354 | extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); | |
16355 | extern int irq_can_set_affinity(unsigned int irq); | |
16356 | extern int irq_select_affinity(unsigned int irq); | |
16357 | extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); | |
16358 | struct irq_affinity_notify { | |
16359 | unsigned int irq; | |
16360 | struct kref kref; | |
16361 | struct work_struct work; | |
16362 | void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); | |
16363 | void (*release)(struct kref *ref); | |
16364 | }; | |
16365 | extern int | |
16366 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); | |
16367 | static inline __attribute__((always_inline)) void irq_run_affinity_notifiers(void) | |
16368 | { | |
16369 | flush_scheduled_work(); | |
16370 | } | |
16371 | static inline __attribute__((always_inline)) void disable_irq_nosync_lockdep(unsigned int irq) | |
16372 | { | |
16373 | disable_irq_nosync(irq); | |
16374 | do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0); | |
16375 | } | |
16376 | static inline __attribute__((always_inline)) void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) | |
16377 | { | |
16378 | disable_irq_nosync(irq); | |
16379 | do { do { ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); *flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); | |
16380 | } | |
16381 | static inline __attribute__((always_inline)) void disable_irq_lockdep(unsigned int irq) | |
16382 | { | |
16383 | disable_irq(irq); | |
16384 | do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0); | |
16385 | } | |
16386 | static inline __attribute__((always_inline)) void enable_irq_lockdep(unsigned int irq) | |
16387 | { | |
16388 | do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0); | |
16389 | enable_irq(irq); | |
16390 | } | |
16391 | static inline __attribute__((always_inline)) void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) | |
16392 | { | |
16393 | do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(*flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(*flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 333, }; ______r = !!((({ ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(*flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(*flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(*flags); } while (0); } } while (0); | |
16394 | enable_irq(irq); | |
16395 | } | |
16396 | extern int irq_set_irq_wake(unsigned int irq, unsigned int on); | |
16397 | static inline __attribute__((always_inline)) int enable_irq_wake(unsigned int irq) | |
16398 | { | |
16399 | return irq_set_irq_wake(irq, 1); | |
16400 | } | |
16401 | static inline __attribute__((always_inline)) int disable_irq_wake(unsigned int irq) | |
16402 | { | |
16403 | return irq_set_irq_wake(irq, 0); | |
16404 | } | |
16405 | extern bool force_irqthreads; | |
16406 | enum | |
16407 | { | |
16408 | HI_SOFTIRQ=0, | |
16409 | TIMER_SOFTIRQ, | |
16410 | NET_TX_SOFTIRQ, | |
16411 | NET_RX_SOFTIRQ, | |
16412 | BLOCK_SOFTIRQ, | |
16413 | BLOCK_IOPOLL_SOFTIRQ, | |
16414 | TASKLET_SOFTIRQ, | |
16415 | SCHED_SOFTIRQ, | |
16416 | HRTIMER_SOFTIRQ, | |
16417 | RCU_SOFTIRQ, | |
16418 | NR_SOFTIRQS | |
16419 | }; | |
16420 | extern char *softirq_to_name[NR_SOFTIRQS]; | |
16421 | struct softirq_action | |
16422 | { | |
16423 | void (*action)(struct softirq_action *); | |
16424 | }; | |
16425 | __attribute__((regparm(0))) void do_softirq(void); | |
16426 | __attribute__((regparm(0))) void __do_softirq(void); | |
16427 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); | |
16428 | extern void softirq_init(void); | |
16429 | static inline __attribute__((always_inline)) void __raise_softirq_irqoff(unsigned int nr) | |
16430 | { | |
16431 | trace_softirq_raise(nr); | |
16432 | do { typedef typeof(irq_stat.__softirq_pending) pto_T__; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 443, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pto_T__ pto_tmp__; pto_tmp__ = ((1UL << nr)); (void)pto_tmp__; } switch (sizeof(irq_stat.__softirq_pending)) { case 1: asm("or" "b %1,""%%""fs"":" "%P" "0" : "+m" (irq_stat.__softirq_pending) : "qi" ((pto_T__)((1UL << nr)))); break; case 2: asm("or" "w %1,""%%""fs"":" "%P" "0" : "+m" (irq_stat.__softirq_pending) : "ri" ((pto_T__)((1UL << nr)))); break; case 4: asm("or" "l %1,""%%""fs"":" "%P" "0" : "+m" (irq_stat.__softirq_pending) : "ri" ((pto_T__)((1UL << nr)))); break; case 8: asm("or" "q %1,""%%""fs"":" "%P" "0" : "+m" (irq_stat.__softirq_pending) : "re" ((pto_T__)((1UL << nr)))); break; default: __bad_percpu_size(); } } while (0); | |
16433 | } | |
16434 | extern void raise_softirq_irqoff(unsigned int nr); | |
16435 | extern void raise_softirq(unsigned int nr); | |
16436 | extern __attribute__((section(".data..percpu" ""))) __typeof__(struct list_head [NR_SOFTIRQS]) softirq_work_list; | |
16437 | extern __attribute__((section(".data..percpu" ""))) __typeof__(struct task_struct *) ksoftirqd; | |
16438 | static inline __attribute__((always_inline)) struct task_struct *this_cpu_ksoftirqd(void) | |
16439 | { | |
16440 | return ({ typeof((ksoftirqd)) pscr_ret__; do { const void *__vpp_verify = (typeof(&((ksoftirqd))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((ksoftirqd))) { case 1: pscr_ret__ = ({ typeof(((ksoftirqd))) pfo_ret__; switch (sizeof(((ksoftirqd)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((ksoftirqd))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 2: pscr_ret__ = ({ typeof(((ksoftirqd))) pfo_ret__; switch (sizeof(((ksoftirqd)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((ksoftirqd))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 4: pscr_ret__ = ({ typeof(((ksoftirqd))) pfo_ret__; switch (sizeof(((ksoftirqd)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((ksoftirqd))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 8: pscr_ret__ = ({ typeof((ksoftirqd)) ret__; do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); ret__ = *({ do { const void *__vpp_verify = (typeof((&((ksoftirqd)))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((ksoftirqd)))) *)(&((ksoftirqd))))); (typeof((typeof(*(&((ksoftirqd)))) *)(&((ksoftirqd))))) (__ptr + (((__per_cpu_offset[debug_smp_processor_id()])))); }); }); do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 462, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 462, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 462, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 462, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); ret__; });break; default: __bad_size_call_parameter();break; } pscr_ret__; }); | |
16441 | } | |
16442 | extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq); | |
16443 | extern void __send_remote_softirq(struct call_single_data *cp, int cpu, | |
16444 | int this_cpu, int softirq); | |
16445 | struct tasklet_struct | |
16446 | { | |
16447 | struct tasklet_struct *next; | |
16448 | unsigned long state; | |
16449 | atomic_t count; | |
16450 | void (*func)(unsigned long); | |
16451 | unsigned long data; | |
16452 | }; | |
16453 | enum | |
16454 | { | |
16455 | TASKLET_STATE_SCHED, | |
16456 | TASKLET_STATE_RUN | |
16457 | }; | |
16458 | static inline __attribute__((always_inline)) int tasklet_trylock(struct tasklet_struct *t) | |
16459 | { | |
16460 | return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); | |
16461 | } | |
16462 | static inline __attribute__((always_inline)) void tasklet_unlock(struct tasklet_struct *t) | |
16463 | { | |
16464 | __asm__ __volatile__("": : :"memory"); | |
16465 | clear_bit(TASKLET_STATE_RUN, &(t)->state); | |
16466 | } | |
16467 | static inline __attribute__((always_inline)) void tasklet_unlock_wait(struct tasklet_struct *t) | |
16468 | { | |
16469 | while ((__builtin_constant_p((TASKLET_STATE_RUN)) ? constant_test_bit((TASKLET_STATE_RUN), (&(t)->state)) : variable_test_bit((TASKLET_STATE_RUN), (&(t)->state)))) { __asm__ __volatile__("": : :"memory"); } | |
16470 | } | |
16471 | extern void __tasklet_schedule(struct tasklet_struct *t); | |
16472 | static inline __attribute__((always_inline)) void tasklet_schedule(struct tasklet_struct *t) | |
16473 | { | |
16474 | if (__builtin_constant_p(((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)))) ? !!((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 544, }; ______r = !!((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))); ______f.miss_hit[______r]++; ______r; })) | |
16475 | __tasklet_schedule(t); | |
16476 | } | |
16477 | extern void __tasklet_hi_schedule(struct tasklet_struct *t); | |
16478 | static inline __attribute__((always_inline)) void tasklet_hi_schedule(struct tasklet_struct *t) | |
16479 | { | |
16480 | if (__builtin_constant_p(((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)))) ? !!((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 552, }; ______r = !!((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))); ______f.miss_hit[______r]++; ______r; })) | |
16481 | __tasklet_hi_schedule(t); | |
16482 | } | |
16483 | extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); | |
16484 | static inline __attribute__((always_inline)) void tasklet_hi_schedule_first(struct tasklet_struct *t) | |
16485 | { | |
16486 | if (__builtin_constant_p(((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)))) ? !!((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 566, }; ______r = !!((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))); ______f.miss_hit[______r]++; ______r; })) | |
16487 | __tasklet_hi_schedule_first(t); | |
16488 | } | |
16489 | static inline __attribute__((always_inline)) void tasklet_disable_nosync(struct tasklet_struct *t) | |
16490 | { | |
16491 | atomic_inc(&t->count); | |
16492 | __asm__ __volatile__("": : :"memory"); | |
16493 | } | |
16494 | static inline __attribute__((always_inline)) void tasklet_disable(struct tasklet_struct *t) | |
16495 | { | |
16496 | tasklet_disable_nosync(t); | |
16497 | tasklet_unlock_wait(t); | |
16498 | asm volatile ("661:\n\t" "lock; addl $0,0(%%esp)" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(0*32+26)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "mfence" "\n664:\n" ".previous" : : : "memory"); | |
16499 | } | |
16500 | static inline __attribute__((always_inline)) void tasklet_enable(struct tasklet_struct *t) | |
16501 | { | |
16502 | __asm__ __volatile__("": : :"memory"); | |
16503 | atomic_dec(&t->count); | |
16504 | } | |
16505 | static inline __attribute__((always_inline)) void tasklet_hi_enable(struct tasklet_struct *t) | |
16506 | { | |
16507 | __asm__ __volatile__("": : :"memory"); | |
16508 | atomic_dec(&t->count); | |
16509 | } | |
16510 | extern void tasklet_kill(struct tasklet_struct *t); | |
16511 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); | |
16512 | extern void tasklet_init(struct tasklet_struct *t, | |
16513 | void (*func)(unsigned long), unsigned long data); | |
16514 | struct tasklet_hrtimer { | |
16515 | struct hrtimer timer; | |
16516 | struct tasklet_struct tasklet; | |
16517 | enum hrtimer_restart (*function)(struct hrtimer *); | |
16518 | }; | |
16519 | extern void | |
16520 | tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, | |
16521 | enum hrtimer_restart (*function)(struct hrtimer *), | |
16522 | clockid_t which_clock, enum hrtimer_mode mode); | |
16523 | static inline __attribute__((always_inline)) | |
16524 | int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, | |
16525 | const enum hrtimer_mode mode) | |
16526 | { | |
16527 | return hrtimer_start(&ttimer->timer, time, mode); | |
16528 | } | |
16529 | static inline __attribute__((always_inline)) | |
16530 | void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) | |
16531 | { | |
16532 | hrtimer_cancel(&ttimer->timer); | |
16533 | tasklet_kill(&ttimer->tasklet); | |
16534 | } | |
16535 | extern unsigned long probe_irq_on(void); | |
16536 | extern int probe_irq_off(unsigned long); | |
16537 | extern unsigned int probe_irq_mask(unsigned long); | |
16538 | extern void init_irq_proc(void); | |
16539 | struct seq_file; | |
16540 | int show_interrupts(struct seq_file *p, void *v); | |
16541 | int arch_show_interrupts(struct seq_file *p, int prec); | |
16542 | extern int early_irq_init(void); | |
16543 | extern int arch_probe_nr_irqs(void); | |
16544 | extern int arch_early_irq_init(void); | |
16545 | struct cpu_usage_stat { | |
16546 | cputime64_t user; | |
16547 | cputime64_t nice; | |
16548 | cputime64_t system; | |
16549 | cputime64_t softirq; | |
16550 | cputime64_t irq; | |
16551 | cputime64_t idle; | |
16552 | cputime64_t iowait; | |
16553 | cputime64_t steal; | |
16554 | cputime64_t guest; | |
16555 | cputime64_t guest_nice; | |
16556 | }; | |
16557 | struct kernel_stat { | |
16558 | struct cpu_usage_stat cpustat; | |
16559 | unsigned long irqs_sum; | |
16560 | unsigned int softirqs[NR_SOFTIRQS]; | |
16561 | }; | |
16562 | extern __attribute__((section(".data..percpu" ""))) __typeof__(struct kernel_stat) kstat; | |
16563 | extern unsigned long long nr_context_switches(void); | |
16564 | extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); | |
16565 | static inline __attribute__((always_inline)) void kstat_incr_softirqs_this_cpu(unsigned int irq) | |
16566 | { | |
16567 | do { do { const void *__vpp_verify = (typeof(&(((kstat.softirqs[irq])))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((kstat.softirqs[irq])))) { case 1: do { typedef typeof((((kstat.softirqs[irq])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((kstat.softirqs[irq]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((((kstat.softirqs[irq])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((kstat.softirqs[irq]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((((kstat.softirqs[irq])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((kstat.softirqs[irq]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((((kstat.softirqs[irq]))))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((((kstat.softirqs[irq])))))); (typeof(*(&((((kstat.softirqs[irq])))))) *)tcp_ptr__; }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); | |
16568 | } | |
16569 | static inline __attribute__((always_inline)) unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) | |
16570 | { | |
16571 | return (*({ do { const void *__vpp_verify = (typeof((&(kstat))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(kstat))) *)(&(kstat)))); (typeof((typeof(*(&(kstat))) *)(&(kstat)))) (__ptr + (((__per_cpu_offset[cpu])))); }); })).softirqs[irq]; | |
16572 | } | |
16573 | extern unsigned int kstat_irqs(unsigned int irq); | |
16574 | static inline __attribute__((always_inline)) unsigned int kstat_cpu_irqs_sum(unsigned int cpu) | |
16575 | { | |
16576 | return (*({ do { const void *__vpp_verify = (typeof((&(kstat))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(kstat))) *)(&(kstat)))); (typeof((typeof(*(&(kstat))) *)(&(kstat)))) (__ptr + (((__per_cpu_offset[cpu])))); }); })).irqs_sum; | |
16577 | } | |
16578 | extern unsigned long long task_delta_exec(struct task_struct *); | |
16579 | extern void account_user_time(struct task_struct *, cputime_t, cputime_t); | |
16580 | extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); | |
16581 | extern void account_steal_time(cputime_t); | |
16582 | extern void account_idle_time(cputime_t); | |
16583 | extern void account_process_tick(struct task_struct *, int user); | |
16584 | extern void account_steal_ticks(unsigned long ticks); | |
16585 | extern void account_idle_ticks(unsigned long ticks); | |
16586 | struct task_struct; | |
16587 | struct user_regset; | |
16588 | typedef int user_regset_active_fn(struct task_struct *target, | |
16589 | const struct user_regset *regset); | |
16590 | typedef int user_regset_get_fn(struct task_struct *target, | |
16591 | const struct user_regset *regset, | |
16592 | unsigned int pos, unsigned int count, | |
16593 | void *kbuf, void *ubuf); | |
16594 | typedef int user_regset_set_fn(struct task_struct *target, | |
16595 | const struct user_regset *regset, | |
16596 | unsigned int pos, unsigned int count, | |
16597 | const void *kbuf, const void *ubuf); | |
16598 | typedef int user_regset_writeback_fn(struct task_struct *target, | |
16599 | const struct user_regset *regset, | |
16600 | int immediate); | |
16601 | struct user_regset { | |
16602 | user_regset_get_fn *get; | |
16603 | user_regset_set_fn *set; | |
16604 | user_regset_active_fn *active; | |
16605 | user_regset_writeback_fn *writeback; | |
16606 | unsigned int n; | |
16607 | unsigned int size; | |
16608 | unsigned int align; | |
16609 | unsigned int bias; | |
16610 | unsigned int core_note_type; | |
16611 | }; | |
16612 | struct user_regset_view { | |
16613 | const char *name; | |
16614 | const struct user_regset *regsets; | |
16615 | unsigned int n; | |
16616 | u32 e_flags; | |
16617 | u16 e_machine; | |
16618 | u8 ei_osabi; | |
16619 | }; | |
16620 | const struct user_regset_view *task_user_regset_view(struct task_struct *tsk); | |
16621 | static inline __attribute__((always_inline)) int user_regset_copyout(unsigned int *pos, unsigned int *count, | |
16622 | void **kbuf, | |
16623 | void **ubuf, const void *data, | |
16624 | const int start_pos, const int end_pos) | |
16625 | { | |
16626 | if (__builtin_constant_p(((*count == 0))) ? !!((*count == 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 224, }; ______r = !!((*count == 0)); ______f.miss_hit[______r]++; ______r; })) | |
16627 | return 0; | |
16628 | do { if (__builtin_constant_p((((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 226, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 226, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 226, }; ______r = !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 226, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/regset.h"), "i" (226), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
16629 | if (__builtin_constant_p(((end_pos < 0 || *pos < end_pos))) ? !!((end_pos < 0 || *pos < end_pos)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 227, }; ______r = !!((end_pos < 0 || *pos < end_pos)); ______f.miss_hit[______r]++; ______r; })) { | |
16630 | unsigned int copy = (end_pos < 0 ? *count | |
16631 | : ({ typeof(*count) _min1 = (*count); typeof(end_pos - *pos) _min2 = (end_pos - *pos); (void) (&_min1 == &_min2); _min1 < _min2 ? _min1 : _min2; })); | |
16632 | data += *pos - start_pos; | |
16633 | if (__builtin_constant_p(((*kbuf))) ? !!((*kbuf)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 231, }; ______r = !!((*kbuf)); ______f.miss_hit[______r]++; ______r; })) { | |
16634 | __builtin_memcpy(*kbuf, data, copy); | |
16635 | *kbuf += copy; | |
16636 | } else if (__builtin_constant_p(((__copy_to_user(*ubuf, data, copy)))) ? !!((__copy_to_user(*ubuf, data, copy))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 234, }; ______r = !!((__copy_to_user(*ubuf, data, copy))); ______f.miss_hit[______r]++; ______r; })) | |
16637 | return -14; | |
16638 | else | |
16639 | *ubuf += copy; | |
16640 | *pos += copy; | |
16641 | *count -= copy; | |
16642 | } | |
16643 | return 0; | |
16644 | } | |
16645 | static inline __attribute__((always_inline)) int user_regset_copyin(unsigned int *pos, unsigned int *count, | |
16646 | const void **kbuf, | |
16647 | const void **ubuf, void *data, | |
16648 | const int start_pos, const int end_pos) | |
16649 | { | |
16650 | if (__builtin_constant_p(((*count == 0))) ? !!((*count == 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 249, }; ______r = !!((*count == 0)); ______f.miss_hit[______r]++; ______r; })) | |
16651 | return 0; | |
16652 | do { if (__builtin_constant_p((((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 251, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 251, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 251, }; ______r = !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 251, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/regset.h"), "i" (251), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
16653 | if (__builtin_constant_p(((end_pos < 0 || *pos < end_pos))) ? !!((end_pos < 0 || *pos < end_pos)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 252, }; ______r = !!((end_pos < 0 || *pos < end_pos)); ______f.miss_hit[______r]++; ______r; })) { | |
16654 | unsigned int copy = (end_pos < 0 ? *count | |
16655 | : ({ typeof(*count) _min1 = (*count); typeof(end_pos - *pos) _min2 = (end_pos - *pos); (void) (&_min1 == &_min2); _min1 < _min2 ? _min1 : _min2; })); | |
16656 | data += *pos - start_pos; | |
16657 | if (__builtin_constant_p(((*kbuf))) ? !!((*kbuf)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 256, }; ______r = !!((*kbuf)); ______f.miss_hit[______r]++; ______r; })) { | |
16658 | __builtin_memcpy(data, *kbuf, copy); | |
16659 | *kbuf += copy; | |
16660 | } else if (__builtin_constant_p(((__copy_from_user(data, *ubuf, copy)))) ? !!((__copy_from_user(data, *ubuf, copy))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 259, }; ______r = !!((__copy_from_user(data, *ubuf, copy))); ______f.miss_hit[______r]++; ______r; })) | |
16661 | return -14; | |
16662 | else | |
16663 | *ubuf += copy; | |
16664 | *pos += copy; | |
16665 | *count -= copy; | |
16666 | } | |
16667 | return 0; | |
16668 | } | |
16669 | static inline __attribute__((always_inline)) int user_regset_copyout_zero(unsigned int *pos, | |
16670 | unsigned int *count, | |
16671 | void **kbuf, void **ubuf, | |
16672 | const int start_pos, | |
16673 | const int end_pos) | |
16674 | { | |
16675 | if (__builtin_constant_p(((*count == 0))) ? !!((*count == 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 279, }; ______r = !!((*count == 0)); ______f.miss_hit[______r]++; ______r; })) | |
16676 | return 0; | |
16677 | do { if (__builtin_constant_p((((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 281, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 281, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 281, }; ______r = !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 281, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/regset.h"), "i" (281), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
16678 | if (__builtin_constant_p(((end_pos < 0 || *pos < end_pos))) ? !!((end_pos < 0 || *pos < end_pos)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 282, }; ______r = !!((end_pos < 0 || *pos < end_pos)); ______f.miss_hit[______r]++; ______r; })) { | |
16679 | unsigned int copy = (end_pos < 0 ? *count | |
16680 | : ({ typeof(*count) _min1 = (*count); typeof(end_pos - *pos) _min2 = (end_pos - *pos); (void) (&_min1 == &_min2); _min1 < _min2 ? _min1 : _min2; })); | |
16681 | if (__builtin_constant_p(((*kbuf))) ? !!((*kbuf)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 285, }; ______r = !!((*kbuf)); ______f.miss_hit[______r]++; ______r; })) { | |
16682 | __builtin_memset(*kbuf, 0, copy); | |
16683 | *kbuf += copy; | |
16684 | } else if (__builtin_constant_p(((__clear_user(*ubuf, copy)))) ? !!((__clear_user(*ubuf, copy))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 288, }; ______r = !!((__clear_user(*ubuf, copy))); ______f.miss_hit[______r]++; ______r; })) | |
16685 | return -14; | |
16686 | else | |
16687 | *ubuf += copy; | |
16688 | *pos += copy; | |
16689 | *count -= copy; | |
16690 | } | |
16691 | return 0; | |
16692 | } | |
16693 | static inline __attribute__((always_inline)) int user_regset_copyin_ignore(unsigned int *pos, | |
16694 | unsigned int *count, | |
16695 | const void **kbuf, | |
16696 | const void **ubuf, | |
16697 | const int start_pos, | |
16698 | const int end_pos) | |
16699 | { | |
16700 | if (__builtin_constant_p(((*count == 0))) ? !!((*count == 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 305, }; ______r = !!((*count == 0)); ______f.miss_hit[______r]++; ______r; })) | |
16701 | return 0; | |
16702 | do { if (__builtin_constant_p((((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 307, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 307, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 307, }; ______r = !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 307, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/regset.h"), "i" (307), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
16703 | if (__builtin_constant_p(((end_pos < 0 || *pos < end_pos))) ? !!((end_pos < 0 || *pos < end_pos)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 308, }; ______r = !!((end_pos < 0 || *pos < end_pos)); ______f.miss_hit[______r]++; ______r; })) { | |
16704 | unsigned int copy = (end_pos < 0 ? *count | |
16705 | : ({ typeof(*count) _min1 = (*count); typeof(end_pos - *pos) _min2 = (end_pos - *pos); (void) (&_min1 == &_min2); _min1 < _min2 ? _min1 : _min2; })); | |
16706 | if (__builtin_constant_p(((*kbuf))) ? !!((*kbuf)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 311, }; ______r = !!((*kbuf)); ______f.miss_hit[______r]++; ______r; })) | |
16707 | *kbuf += copy; | |
16708 | else | |
16709 | *ubuf += copy; | |
16710 | *pos += copy; | |
16711 | *count -= copy; | |
16712 | } | |
16713 | return 0; | |
16714 | } | |
16715 | static inline __attribute__((always_inline)) int copy_regset_to_user(struct task_struct *target, | |
16716 | const struct user_regset_view *view, | |
16717 | unsigned int setno, | |
16718 | unsigned int offset, unsigned int size, | |
16719 | void *data) | |
16720 | { | |
16721 | const struct user_regset *regset = &view->regsets[setno]; | |
16722 | if (__builtin_constant_p(((!((__builtin_constant_p(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) ? !!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 338, }; ______r = __builtin_expect(!!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))))) ? !!((!((__builtin_constant_p(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) ? !!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 338, }; ______r = __builtin_expect(!!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 338, }; ______r = !!((!((__builtin_constant_p(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) ? !!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 338, }; ______r = __builtin_expect(!!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))); ______f.miss_hit[______r]++; ______r; })) | |
16723 | return -5; | |
16724 | return regset->get(target, regset, offset, size, ((void *)0), data); | |
16725 | } | |
16726 | static inline __attribute__((always_inline)) int copy_regset_from_user(struct task_struct *target, | |
16727 | const struct user_regset_view *view, | |
16728 | unsigned int setno, | |
16729 | unsigned int offset, unsigned int size, | |
16730 | const void *data) | |
16731 | { | |
16732 | const struct user_regset *regset = &view->regsets[setno]; | |
16733 | if (__builtin_constant_p(((!((__builtin_constant_p(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) ? !!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 361, }; ______r = __builtin_expect(!!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))))) ? !!((!((__builtin_constant_p(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) ? !!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 361, }; ______r = __builtin_expect(!!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 361, }; ______r = !!((!((__builtin_constant_p(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) ? !!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 361, }; ______r = __builtin_expect(!!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))); ______f.miss_hit[______r]++; ______r; })) | |
16734 | return -5; | |
16735 | return regset->set(target, regset, offset, size, ((void *)0), data); | |
16736 | } | |
16737 | extern unsigned int xstate_size; | |
16738 | extern u64 pcntxt_mask; | |
16739 | extern u64 xstate_fx_sw_bytes[6]; | |
16740 | extern void xsave_init(void); | |
16741 | extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask); | |
16742 | extern int init_fpu(struct task_struct *child); | |
16743 | extern int check_for_xstate(struct i387_fxsave_struct *buf, | |
16744 | void *fpstate, | |
16745 | struct _fpx_sw_bytes *sw); | |
16746 | static inline __attribute__((always_inline)) int fpu_xrstor_checking(struct fpu *fpu) | |
16747 | { | |
16748 | struct xsave_struct *fx = &fpu->state->xsave; | |
16749 | int err; | |
16750 | asm volatile("1: .byte " "0x0f,0xae,0x2f\n\t" | |
16751 | "2:\n" | |
16752 | ".section .fixup,\"ax\"\n" | |
16753 | "3: movl $-1,%[err]\n" | |
16754 | " jmp 2b\n" | |
16755 | ".previous\n" | |
16756 | " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" | |
16757 | : [err] "=r" (err) | |
16758 | : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0) | |
16759 | : "memory"); | |
16760 | return err; | |
16761 | } | |
16762 | static inline __attribute__((always_inline)) int xsave_user(struct xsave_struct *buf) | |
16763 | { | |
16764 | int err; | |
16765 | err = __clear_user(&buf->xsave_hdr, | |
16766 | sizeof(struct xsave_hdr_struct)); | |
16767 | if (__builtin_constant_p((((__builtin_constant_p(err) ? !!(err) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 74, }; ______r = __builtin_expect(!!(err), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(err) ? !!(err) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 74, }; ______r = __builtin_expect(!!(err), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 74, }; ______r = !!(((__builtin_constant_p(err) ? !!(err) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 74, }; ______r = __builtin_expect(!!(err), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
16768 | return -14; | |
16769 | __asm__ __volatile__("1: .byte " "0x0f,0xae,0x27\n" | |
16770 | "2:\n" | |
16771 | ".section .fixup,\"ax\"\n" | |
16772 | "3: movl $-1,%[err]\n" | |
16773 | " jmp 2b\n" | |
16774 | ".previous\n" | |
16775 | ".section __ex_table,\"a\"\n" | |
16776 | " " ".balign 4" " " "\n" | |
16777 | " " ".long" " " "1b,3b\n" | |
16778 | ".previous" | |
16779 | : [err] "=r" (err) | |
16780 | : "D" (buf), "a" (-1), "d" (-1), "0" (0) | |
16781 | : "memory"); | |
16782 | if (__builtin_constant_p((((__builtin_constant_p(err) ? !!(err) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 90, }; ______r = __builtin_expect(!!(err), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })) && __clear_user(buf, xstate_size)))) ? !!(((__builtin_constant_p(err) ? !!(err) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 90, }; ______r = __builtin_expect(!!(err), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })) && __clear_user(buf, xstate_size))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 90, }; ______r = !!(((__builtin_constant_p(err) ? !!(err) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 90, }; ______r = __builtin_expect(!!(err), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })) && __clear_user(buf, xstate_size))); ______f.miss_hit[______r]++; ______r; })) | |
16783 | err = -14; | |
16784 | return err; | |
16785 | } | |
16786 | static inline __attribute__((always_inline)) int xrestore_user(struct xsave_struct *buf, u64 mask) | |
16787 | { | |
16788 | int err; | |
16789 | struct xsave_struct *xstate = (( struct xsave_struct *)buf); | |
16790 | u32 lmask = mask; | |
16791 | u32 hmask = mask >> 32; | |
16792 | __asm__ __volatile__("1: .byte " "0x0f,0xae,0x2f\n" | |
16793 | "2:\n" | |
16794 | ".section .fixup,\"ax\"\n" | |
16795 | "3: movl $-1,%[err]\n" | |
16796 | " jmp 2b\n" | |
16797 | ".previous\n" | |
16798 | ".section __ex_table,\"a\"\n" | |
16799 | " " ".balign 4" " " "\n" | |
16800 | " " ".long" " " "1b,3b\n" | |
16801 | ".previous" | |
16802 | : [err] "=r" (err) | |
16803 | : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0) | |
16804 | : "memory"); | |
16805 | return err; | |
16806 | } | |
16807 | static inline __attribute__((always_inline)) void xrstor_state(struct xsave_struct *fx, u64 mask) | |
16808 | { | |
16809 | u32 lmask = mask; | |
16810 | u32 hmask = mask >> 32; | |
16811 | asm volatile(".byte " "0x0f,0xae,0x2f\n\t" | |
16812 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | |
16813 | : "memory"); | |
16814 | } | |
16815 | static inline __attribute__((always_inline)) void xsave_state(struct xsave_struct *fx, u64 mask) | |
16816 | { | |
16817 | u32 lmask = mask; | |
16818 | u32 hmask = mask >> 32; | |
16819 | asm volatile(".byte " "0x0f,0xae,0x27\n\t" | |
16820 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | |
16821 | : "memory"); | |
16822 | } | |
16823 | static inline __attribute__((always_inline)) void fpu_xsave(struct fpu *fpu) | |
16824 | { | |
16825 | asm volatile ("661:\n\t" ".byte " "0x0f,0xae,0x27" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(7*32+ 4)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" ".byte " "0x0f,0xae,0x37" "\n664:\n" ".previous" : : "i" (0), [fx] "D" (&fpu->state->xsave), "a" (-1), "d" (-1) : "memory") | |
16826 | ; | |
16827 | } | |
16828 | extern unsigned int sig_xstate_size; | |
16829 | extern void fpu_init(void); | |
16830 | extern void mxcsr_feature_mask_init(void); | |
16831 | extern int init_fpu(struct task_struct *child); | |
16832 | extern __attribute__((regparm(0))) void math_state_restore(void); | |
16833 | extern void __math_state_restore(void); | |
16834 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); | |
16835 | extern user_regset_active_fn fpregs_active, xfpregs_active; | |
16836 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, | |
16837 | xstateregs_get; | |
16838 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, | |
16839 | xstateregs_set; | |
16840 | extern struct _fpx_sw_bytes fx_sw_reserved; | |
16841 | static inline __attribute__((always_inline)) void finit_soft_fpu(struct i387_soft_struct *soft) {} | |
16842 | static inline __attribute__((always_inline)) __attribute__((always_inline)) __attribute__((pure)) bool use_xsaveopt(void) | |
16843 | { | |
16844 | return ( __builtin_constant_p((__builtin_constant_p((7*32+ 4)) && ( ((((7*32+ 4))>>5)==0 && (1UL<<(((7*32+ 4))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((7*32+ 4))>>5)==1 && (1UL<<(((7*32+ 4))&31) & (0|0))) || ((((7*32+ 4))>>5)==2 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==3 && (1UL<<(((7*32+ 4))&31) & (0))) || ((((7*32+ 4))>>5)==4 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==5 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==6 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==7 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==8 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==9 && (1UL<<(((7*32+ 4))&31) & 0)) ) ? 1 : (__builtin_constant_p(((7*32+ 4))) ? constant_test_bit(((7*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((7*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))) ? (__builtin_constant_p((7*32+ 4)) && ( ((((7*32+ 4))>>5)==0 && (1UL<<(((7*32+ 4))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((7*32+ 4))>>5)==1 && (1UL<<(((7*32+ 4))&31) & (0|0))) || ((((7*32+ 4))>>5)==2 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==3 && (1UL<<(((7*32+ 4))&31) & (0))) || ((((7*32+ 4))>>5)==4 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==5 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==6 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==7 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==8 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==9 && (1UL<<(((7*32+ 4))&31) & 0)) ) ? 1 : (__builtin_constant_p(((7*32+ 4))) ? constant_test_bit(((7*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((7*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) : __builtin_constant_p((7*32+ 4)) ? __static_cpu_has((7*32+ 4)) : (__builtin_constant_p((7*32+ 4)) && ( ((((7*32+ 4))>>5)==0 && (1UL<<(((7*32+ 4))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((7*32+ 4))>>5)==1 && (1UL<<(((7*32+ 4))&31) & (0|0))) || ((((7*32+ 4))>>5)==2 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==3 && (1UL<<(((7*32+ 4))&31) & (0))) || ((((7*32+ 4))>>5)==4 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==5 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==6 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==7 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==8 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==9 && (1UL<<(((7*32+ 4))&31) & 0)) ) ? 1 : (__builtin_constant_p(((7*32+ 4))) ? constant_test_bit(((7*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((7*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) ); | |
16845 | } | |
16846 | static inline __attribute__((always_inline)) __attribute__((always_inline)) __attribute__((pure)) bool use_xsave(void) | |
16847 | { | |
16848 | return ( __builtin_constant_p((__builtin_constant_p((4*32+26)) && ( ((((4*32+26))>>5)==0 && (1UL<<(((4*32+26))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+26))>>5)==1 && (1UL<<(((4*32+26))&31) & (0|0))) || ((((4*32+26))>>5)==2 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==3 && (1UL<<(((4*32+26))&31) & (0))) || ((((4*32+26))>>5)==4 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==5 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==6 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==7 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==8 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==9 && (1UL<<(((4*32+26))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+26))) ? constant_test_bit(((4*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((4*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))) ? (__builtin_constant_p((4*32+26)) && ( ((((4*32+26))>>5)==0 && (1UL<<(((4*32+26))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+26))>>5)==1 && (1UL<<(((4*32+26))&31) & (0|0))) || ((((4*32+26))>>5)==2 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==3 && (1UL<<(((4*32+26))&31) & (0))) || ((((4*32+26))>>5)==4 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==5 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==6 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==7 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==8 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==9 && (1UL<<(((4*32+26))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+26))) ? constant_test_bit(((4*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((4*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) : __builtin_constant_p((4*32+26)) ? __static_cpu_has((4*32+26)) : (__builtin_constant_p((4*32+26)) && ( ((((4*32+26))>>5)==0 && (1UL<<(((4*32+26))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+26))>>5)==1 && (1UL<<(((4*32+26))&31) & (0|0))) || ((((4*32+26))>>5)==2 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==3 && (1UL<<(((4*32+26))&31) & (0))) || ((((4*32+26))>>5)==4 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==5 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==6 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==7 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==8 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==9 && (1UL<<(((4*32+26))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+26))) ? constant_test_bit(((4*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((4*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) ); | |
16849 | } | |
16850 | static inline __attribute__((always_inline)) __attribute__((always_inline)) __attribute__((pure)) bool use_fxsr(void) | |
16851 | { | |
16852 | return ( __builtin_constant_p((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))) ? (__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) : __builtin_constant_p((0*32+24)) ? __static_cpu_has((0*32+24)) : (__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) ); | |
16853 | } | |
16854 | extern void __sanitize_i387_state(struct task_struct *); | |
16855 | static inline __attribute__((always_inline)) void sanitize_i387_state(struct task_struct *tsk) | |
16856 | { | |
16857 | if (__builtin_constant_p(((!use_xsaveopt()))) ? !!((!use_xsaveopt())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 85, }; ______r = !!((!use_xsaveopt())); ______f.miss_hit[______r]++; ______r; })) | |
16858 | return; | |
16859 | __sanitize_i387_state(tsk); | |
16860 | } | |
16861 | static inline __attribute__((always_inline)) int fxrstor_checking(struct i387_fxsave_struct *fx) | |
16862 | { | |
16863 | asm volatile ("661:\n\t" "nop ; frstor %1" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(0*32+24)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "fxrstor %1" "\n664:\n" ".previous" : : "i" (0), "m" (*fx)) | |
16864 | ; | |
16865 | return 0; | |
16866 | } | |
16867 | static inline __attribute__((always_inline)) void fpu_fxsave(struct fpu *fpu) | |
16868 | { | |
16869 | asm volatile("fxsave %[fx]" | |
16870 | : [fx] "=m" (fpu->state->fxsave)); | |
16871 | } | |
16872 | static inline __attribute__((always_inline)) void fpu_save_init(struct fpu *fpu) | |
16873 | { | |
16874 | if (__builtin_constant_p(((use_xsave()))) ? !!((use_xsave())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 229, }; ______r = !!((use_xsave())); ______f.miss_hit[______r]++; ______r; })) { | |
16875 | fpu_xsave(fpu); | |
16876 | if (__builtin_constant_p(((!(fpu->state->xsave.xsave_hdr.xstate_bv & 0x1)))) ? !!((!(fpu->state->xsave.xsave_hdr.xstate_bv & 0x1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 235, }; ______r = !!((!(fpu->state->xsave.xsave_hdr.xstate_bv & 0x1))); ______f.miss_hit[______r]++; ______r; })) | |
16877 | return; | |
16878 | } else if (__builtin_constant_p(((use_fxsr()))) ? !!((use_fxsr())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 237, }; ______r = !!((use_fxsr())); ______f.miss_hit[______r]++; ______r; })) { | |
16879 | fpu_fxsave(fpu); | |
16880 | } else { | |
16881 | asm volatile("fnsave %[fx]; fwait" | |
16882 | : [fx] "=m" (fpu->state->fsave)); | |
16883 | return; | |
16884 | } | |
16885 | if (__builtin_constant_p((((__builtin_constant_p(fpu->state->fxsave.swd & (1 << 7)) ? !!(fpu->state->fxsave.swd & (1 << 7)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 245, }; ______r = __builtin_expect(!!(fpu->state->fxsave.swd & (1 << 7)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(fpu->state->fxsave.swd & (1 << 7)) ? !!(fpu->state->fxsave.swd & (1 << 7)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 245, }; ______r = __builtin_expect(!!(fpu->state->fxsave.swd & (1 << 7)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 245, }; ______r = !!(((__builtin_constant_p(fpu->state->fxsave.swd & (1 << 7)) ? !!(fpu->state->fxsave.swd & (1 << 7)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 245, }; ______r = __builtin_expect(!!(fpu->state->fxsave.swd & (1 << 7)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
16886 | asm volatile("fnclex"); | |
16887 | asm volatile ("661:\n\t" ".byte " "0x90,0x8d,0xb4,0x26,0x00,0x00,0x00,0x00" "\n" ".byte " "0x89,0xf6" "\n" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(3*32+10)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "emms\n\t" "fildl %P[addr]" "\n664:\n" ".previous" : : "i" (0), [addr] "m" ((__per_cpu_offset[0]))) | |
16888 | ; | |
16889 | } | |
16890 | static inline __attribute__((always_inline)) void __save_init_fpu(struct task_struct *tsk) | |
16891 | { | |
16892 | fpu_save_init(&tsk->thread.fpu); | |
16893 | ((struct thread_info *)(tsk)->stack)->status &= ~0x0001; | |
16894 | } | |
16895 | static inline __attribute__((always_inline)) int fpu_fxrstor_checking(struct fpu *fpu) | |
16896 | { | |
16897 | return fxrstor_checking(&fpu->state->fxsave); | |
16898 | } | |
16899 | static inline __attribute__((always_inline)) int fpu_restore_checking(struct fpu *fpu) | |
16900 | { | |
16901 | if (__builtin_constant_p(((use_xsave()))) ? !!((use_xsave())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 272, }; ______r = !!((use_xsave())); ______f.miss_hit[______r]++; ______r; })) | |
16902 | return fpu_xrstor_checking(fpu); | |
16903 | else | |
16904 | return fpu_fxrstor_checking(fpu); | |
16905 | } | |
16906 | static inline __attribute__((always_inline)) int restore_fpu_checking(struct task_struct *tsk) | |
16907 | { | |
16908 | return fpu_restore_checking(&tsk->thread.fpu); | |
16909 | } | |
16910 | extern int save_i387_xstate(void *buf); | |
16911 | extern int restore_i387_xstate(void *buf); | |
16912 | static inline __attribute__((always_inline)) void __unlazy_fpu(struct task_struct *tsk) | |
16913 | { | |
16914 | if (__builtin_constant_p(((((struct thread_info *)(tsk)->stack)->status & 0x0001))) ? !!((((struct thread_info *)(tsk)->stack)->status & 0x0001)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 291, }; ______r = !!((((struct thread_info *)(tsk)->stack)->status & 0x0001)); ______f.miss_hit[______r]++; ______r; })) { | |
16915 | __save_init_fpu(tsk); | |
16916 | write_cr0(read_cr0() | 0x00000008); | |
16917 | } else | |
16918 | tsk->fpu_counter = 0; | |
16919 | } | |
16920 | static inline __attribute__((always_inline)) void __clear_fpu(struct task_struct *tsk) | |
16921 | { | |
16922 | if (__builtin_constant_p(((((struct thread_info *)(tsk)->stack)->status & 0x0001))) ? !!((((struct thread_info *)(tsk)->stack)->status & 0x0001)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 300, }; ______r = !!((((struct thread_info *)(tsk)->stack)->status & 0x0001)); ______f.miss_hit[______r]++; ______r; })) { | |
16923 | asm volatile("1: fwait\n" | |
16924 | "2:\n" | |
16925 | " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "2b" "\n" " .previous\n"); | |
16926 | ((struct thread_info *)(tsk)->stack)->status &= ~0x0001; | |
16927 | write_cr0(read_cr0() | 0x00000008); | |
16928 | } | |
16929 | } | |
16930 | static inline __attribute__((always_inline)) void kernel_fpu_begin(void) | |
16931 | { | |
16932 | struct thread_info *me = current_thread_info(); | |
16933 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
16934 | if (__builtin_constant_p(((me->status & 0x0001))) ? !!((me->status & 0x0001)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 314, }; ______r = !!((me->status & 0x0001)); ______f.miss_hit[______r]++; ______r; })) | |
16935 | __save_init_fpu(me->task); | |
16936 | else | |
16937 | clts(); | |
16938 | } | |
16939 | static inline __attribute__((always_inline)) void kernel_fpu_end(void) | |
16940 | { | |
16941 | write_cr0(read_cr0() | 0x00000008); | |
16942 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 323, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 323, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 323, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 323, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
16943 | } | |
16944 | static inline __attribute__((always_inline)) bool irq_fpu_usable(void) | |
16945 | { | |
16946 | struct pt_regs *regs; | |
16947 | return !(((current_thread_info()->preempt_count) & ((((1UL << (10))-1) << ((0 + 8) + 8)) | (((1UL << (8))-1) << (0 + 8)) | (((1UL << (1))-1) << (((0 + 8) + 8) + 10))))) || !(regs = get_irq_regs()) || | |
16948 | user_mode(regs) || (read_cr0() & 0x00000008); | |
16949 | } | |
16950 | static inline __attribute__((always_inline)) int irq_ts_save(void) | |
16951 | { | |
16952 | if (__builtin_constant_p(((!(((current_thread_info()->preempt_count) & ~0x10000000) != 0)))) ? !!((!(((current_thread_info()->preempt_count) & ~0x10000000) != 0))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 348, }; ______r = !!((!(((current_thread_info()->preempt_count) & ~0x10000000) != 0))); ______f.miss_hit[______r]++; ______r; })) | |
16953 | return 0; | |
16954 | if (__builtin_constant_p(((read_cr0() & 0x00000008))) ? !!((read_cr0() & 0x00000008)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 351, }; ______r = !!((read_cr0() & 0x00000008)); ______f.miss_hit[______r]++; ______r; })) { | |
16955 | clts(); | |
16956 | return 1; | |
16957 | } | |
16958 | return 0; | |
16959 | } | |
16960 | static inline __attribute__((always_inline)) void irq_ts_restore(int TS_state) | |
16961 | { | |
16962 | if (__builtin_constant_p(((TS_state))) ? !!((TS_state)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 361, }; ______r = !!((TS_state)); ______f.miss_hit[______r]++; ______r; })) | |
16963 | write_cr0(read_cr0() | 0x00000008); | |
16964 | } | |
16965 | static inline __attribute__((always_inline)) void save_init_fpu(struct task_struct *tsk) | |
16966 | { | |
16967 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
16968 | __save_init_fpu(tsk); | |
16969 | write_cr0(read_cr0() | 0x00000008); | |
16970 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 373, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 373, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 373, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 373, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
16971 | } | |
16972 | static inline __attribute__((always_inline)) void unlazy_fpu(struct task_struct *tsk) | |
16973 | { | |
16974 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
16975 | __unlazy_fpu(tsk); | |
16976 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 380, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 380, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 380, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 380, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
16977 | } | |
16978 | static inline __attribute__((always_inline)) void clear_fpu(struct task_struct *tsk) | |
16979 | { | |
16980 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
16981 | __clear_fpu(tsk); | |
16982 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 387, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 387, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 387, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 387, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
16983 | } | |
16984 | static inline __attribute__((always_inline)) unsigned short get_fpu_cwd(struct task_struct *tsk) | |
16985 | { | |
16986 | if (__builtin_constant_p((((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))))) ? !!(((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 395, }; ______r = !!(((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; })) { | |
16987 | return tsk->thread.fpu.state->fxsave.cwd; | |
16988 | } else { | |
16989 | return (unsigned short)tsk->thread.fpu.state->fsave.cwd; | |
16990 | } | |
16991 | } | |
16992 | static inline __attribute__((always_inline)) unsigned short get_fpu_swd(struct task_struct *tsk) | |
16993 | { | |
16994 | if (__builtin_constant_p((((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))))) ? !!(((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 404, }; ______r = !!(((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; })) { | |
16995 | return tsk->thread.fpu.state->fxsave.swd; | |
16996 | } else { | |
16997 | return (unsigned short)tsk->thread.fpu.state->fsave.swd; | |
16998 | } | |
16999 | } | |
17000 | static inline __attribute__((always_inline)) unsigned short get_fpu_mxcsr(struct task_struct *tsk) | |
17001 | { | |
17002 | if (__builtin_constant_p((((__builtin_constant_p((0*32+25)) && ( ((((0*32+25))>>5)==0 && (1UL<<(((0*32+25))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+25))>>5)==1 && (1UL<<(((0*32+25))&31) & (0|0))) || ((((0*32+25))>>5)==2 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==3 && (1UL<<(((0*32+25))&31) & (0))) || ((((0*32+25))>>5)==4 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==5 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==6 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==7 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==8 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==9 && (1UL<<(((0*32+25))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+25))) ? constant_test_bit(((0*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))))) ? !!(((__builtin_constant_p((0*32+25)) && ( ((((0*32+25))>>5)==0 && (1UL<<(((0*32+25))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+25))>>5)==1 && (1UL<<(((0*32+25))&31) & (0|0))) || ((((0*32+25))>>5)==2 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==3 && (1UL<<(((0*32+25))&31) & (0))) || ((((0*32+25))>>5)==4 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==5 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==6 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==7 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==8 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==9 && (1UL<<(((0*32+25))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+25))) ? constant_test_bit(((0*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 413, }; ______r = !!(((__builtin_constant_p((0*32+25)) && ( ((((0*32+25))>>5)==0 && (1UL<<(((0*32+25))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+25))>>5)==1 && (1UL<<(((0*32+25))&31) & (0|0))) || ((((0*32+25))>>5)==2 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==3 && (1UL<<(((0*32+25))&31) & (0))) || ((((0*32+25))>>5)==4 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==5 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==6 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==7 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==8 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==9 && (1UL<<(((0*32+25))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+25))) ? constant_test_bit(((0*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; })) { | |
17003 | return tsk->thread.fpu.state->fxsave.mxcsr; | |
17004 | } else { | |
17005 | return 0x1f80; | |
17006 | } | |
17007 | } | |
17008 | static bool fpu_allocated(struct fpu *fpu) | |
17009 | { | |
17010 | return fpu->state != ((void *)0); | |
17011 | } | |
17012 | static inline __attribute__((always_inline)) int fpu_alloc(struct fpu *fpu) | |
17013 | { | |
17014 | if (__builtin_constant_p(((fpu_allocated(fpu)))) ? !!((fpu_allocated(fpu))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 427, }; ______r = !!((fpu_allocated(fpu))); ______f.miss_hit[______r]++; ______r; })) | |
17015 | return 0; | |
17016 | fpu->state = kmem_cache_alloc(task_xstate_cachep, ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); | |
17017 | if (__builtin_constant_p(((!fpu->state))) ? !!((!fpu->state)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 430, }; ______r = !!((!fpu->state)); ______f.miss_hit[______r]++; ______r; })) | |
17018 | return -12; | |
17019 | ({ int __ret_warn_on = !!((unsigned long)fpu->state & 15); if (__builtin_constant_p((((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 432, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 432, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 432, }; ______r = !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 432, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) warn_slowpath_null("/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", 432); (__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 432, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); }); | |
17020 | return 0; | |
17021 | } | |
17022 | static inline __attribute__((always_inline)) void fpu_free(struct fpu *fpu) | |
17023 | { | |
17024 | if (__builtin_constant_p(((fpu->state))) ? !!((fpu->state)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 438, }; ______r = !!((fpu->state)); ______f.miss_hit[______r]++; ______r; })) { | |
17025 | kmem_cache_free(task_xstate_cachep, fpu->state); | |
17026 | fpu->state = ((void *)0); | |
17027 | } | |
17028 | } | |
17029 | static inline __attribute__((always_inline)) void fpu_copy(struct fpu *dst, struct fpu *src) | |
17030 | { | |
17031 | __builtin_memcpy(dst->state, src->state, xstate_size); | |
17032 | } | |
17033 | extern void fpu_finit(struct fpu *fpu); | |
17034 | void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, | |
17035 | const u8 *src); | |
17036 | void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, | |
17037 | const u8 *src); | |
17038 | struct bio_set; | |
17039 | struct bio; | |
17040 | struct bio_integrity_payload; | |
17041 | struct page; | |
17042 | struct block_device; | |
17043 | typedef void (bio_end_io_t) (struct bio *, int); | |
17044 | typedef void (bio_destructor_t) (struct bio *); | |
17045 | struct bio_vec { | |
17046 | struct page *bv_page; | |
17047 | unsigned int bv_len; | |
17048 | unsigned int bv_offset; | |
17049 | }; | |
17050 | struct bio { | |
17051 | sector_t bi_sector; | |
17052 | struct bio *bi_next; | |
17053 | struct block_device *bi_bdev; | |
17054 | unsigned long bi_flags; | |
17055 | unsigned long bi_rw; | |
17056 | unsigned short bi_vcnt; | |
17057 | unsigned short bi_idx; | |
17058 | unsigned int bi_phys_segments; | |
17059 | unsigned int bi_size; | |
17060 | unsigned int bi_seg_front_size; | |
17061 | unsigned int bi_seg_back_size; | |
17062 | unsigned int bi_max_vecs; | |
17063 | unsigned int bi_comp_cpu; | |
17064 | atomic_t bi_cnt; | |
17065 | struct bio_vec *bi_io_vec; | |
17066 | bio_end_io_t *bi_end_io; | |
17067 | void *bi_private; | |
17068 | bio_destructor_t *bi_destructor; | |
17069 | struct bio_vec bi_inline_vecs[0]; | |
17070 | }; | |
17071 | enum rq_flag_bits { | |
17072 | __REQ_WRITE, | |
17073 | __REQ_FAILFAST_DEV, | |
17074 | __REQ_FAILFAST_TRANSPORT, | |
17075 | __REQ_FAILFAST_DRIVER, | |
17076 | __REQ_SYNC, | |
17077 | __REQ_META, | |
17078 | __REQ_DISCARD, | |
17079 | __REQ_NOIDLE, | |
17080 | __REQ_RAHEAD, | |
17081 | __REQ_THROTTLED, | |
17082 | __REQ_SORTED, | |
17083 | __REQ_SOFTBARRIER, | |
17084 | __REQ_FUA, | |
17085 | __REQ_NOMERGE, | |
17086 | __REQ_STARTED, | |
17087 | __REQ_DONTPREP, | |
17088 | __REQ_QUEUED, | |
17089 | __REQ_ELVPRIV, | |
17090 | __REQ_FAILED, | |
17091 | __REQ_QUIET, | |
17092 | __REQ_PREEMPT, | |
17093 | __REQ_ALLOCED, | |
17094 | __REQ_COPY_USER, | |
17095 | __REQ_FLUSH, | |
17096 | __REQ_FLUSH_SEQ, | |
17097 | __REQ_IO_STAT, | |
17098 | __REQ_MIXED_MERGE, | |
17099 | __REQ_SECURE, | |
17100 | __REQ_NR_BITS, | |
17101 | }; | |
17102 | struct fstrim_range { | |
17103 | __u64 start; | |
17104 | __u64 len; | |
17105 | __u64 minlen; | |
17106 | }; | |
17107 | struct files_stat_struct { | |
17108 | unsigned long nr_files; | |
17109 | unsigned long nr_free_files; | |
17110 | unsigned long max_files; | |
17111 | }; | |
17112 | struct inodes_stat_t { | |
17113 | int nr_inodes; | |
17114 | int nr_unused; | |
17115 | int dummy[5]; | |
17116 | }; | |
17117 | static inline __attribute__((always_inline)) int old_valid_dev(dev_t dev) | |
17118 | { | |
17119 | return ((unsigned int) ((dev) >> 20)) < 256 && ((unsigned int) ((dev) & ((1U << 20) - 1))) < 256; | |
17120 | } | |
17121 | static inline __attribute__((always_inline)) u16 old_encode_dev(dev_t dev) | |
17122 | { | |
17123 | return (((unsigned int) ((dev) >> 20)) << 8) | ((unsigned int) ((dev) & ((1U << 20) - 1))); | |
17124 | } | |
17125 | static inline __attribute__((always_inline)) dev_t old_decode_dev(u16 val) | |
17126 | { | |
17127 | return ((((val >> 8) & 255) << 20) | (val & 255)); | |
17128 | } | |
17129 | static inline __attribute__((always_inline)) int new_valid_dev(dev_t dev) | |
17130 | { | |
17131 | return 1; | |
17132 | } | |
17133 | static inline __attribute__((always_inline)) u32 new_encode_dev(dev_t dev) | |
17134 | { | |
17135 | unsigned major = ((unsigned int) ((dev) >> 20)); | |
17136 | unsigned minor = ((unsigned int) ((dev) & ((1U << 20) - 1))); | |
17137 | return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12); | |
17138 | } | |
17139 | static inline __attribute__((always_inline)) dev_t new_decode_dev(u32 dev) | |
17140 | { | |
17141 | unsigned major = (dev & 0xfff00) >> 8; | |
17142 | unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00); | |
17143 | return (((major) << 20) | (minor)); | |
17144 | } | |
17145 | static inline __attribute__((always_inline)) int huge_valid_dev(dev_t dev) | |
17146 | { | |
17147 | return 1; | |
17148 | } | |
17149 | static inline __attribute__((always_inline)) u64 huge_encode_dev(dev_t dev) | |
17150 | { | |
17151 | return new_encode_dev(dev); | |
17152 | } | |
17153 | static inline __attribute__((always_inline)) dev_t huge_decode_dev(u64 dev) | |
17154 | { | |
17155 | return new_decode_dev(dev); | |
17156 | } | |
17157 | static inline __attribute__((always_inline)) int sysv_valid_dev(dev_t dev) | |
17158 | { | |
17159 | return ((unsigned int) ((dev) >> 20)) < (1<<14) && ((unsigned int) ((dev) & ((1U << 20) - 1))) < (1<<18); | |
17160 | } | |
17161 | static inline __attribute__((always_inline)) u32 sysv_encode_dev(dev_t dev) | |
17162 | { | |
17163 | return ((unsigned int) ((dev) & ((1U << 20) - 1))) | (((unsigned int) ((dev) >> 20)) << 18); | |
17164 | } | |
17165 | static inline __attribute__((always_inline)) unsigned sysv_major(u32 dev) | |
17166 | { | |
17167 | return (dev >> 18) & 0x3fff; | |
17168 | } | |
17169 | static inline __attribute__((always_inline)) unsigned sysv_minor(u32 dev) | |
17170 | { | |
17171 | return dev & 0x3ffff; | |
17172 | } | |
17173 | static inline __attribute__((always_inline)) void bit_spin_lock(int bitnum, unsigned long *addr) | |
17174 | { | |
17175 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
17176 | while ((__builtin_constant_p(test_and_set_bit_lock(bitnum, addr)) ? !!(test_and_set_bit_lock(bitnum, addr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 25, }; ______r = __builtin_expect(!!(test_and_set_bit_lock(bitnum, addr)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))) { | |
17177 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 26, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 26, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 26, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 26, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
17178 | do { | |
17179 | cpu_relax(); | |
17180 | } while ((__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))); | |
17181 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
17182 | } | |
17183 | (void)0; | |
17184 | } | |
17185 | static inline __attribute__((always_inline)) int bit_spin_trylock(int bitnum, unsigned long *addr) | |
17186 | { | |
17187 | do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); | |
17188 | if (__builtin_constant_p((((__builtin_constant_p(test_and_set_bit_lock(bitnum, addr)) ? !!(test_and_set_bit_lock(bitnum, addr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 43, }; ______r = __builtin_expect(!!(test_and_set_bit_lock(bitnum, addr)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_and_set_bit_lock(bitnum, addr)) ? !!(test_and_set_bit_lock(bitnum, addr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 43, }; ______r = __builtin_expect(!!(test_and_set_bit_lock(bitnum, addr)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 43, }; ______r = !!(((__builtin_constant_p(test_and_set_bit_lock(bitnum, addr)) ? !!(test_and_set_bit_lock(bitnum, addr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 43, }; ______r = __builtin_expect(!!(test_and_set_bit_lock(bitnum, addr)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) { | |
17189 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 44, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 44, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 44, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 44, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
17190 | return 0; | |
17191 | } | |
17192 | (void)0; | |
17193 | return 1; | |
17194 | } | |
17195 | static inline __attribute__((always_inline)) void bit_spin_unlock(int bitnum, unsigned long *addr) | |
17196 | { | |
17197 | do { if (__builtin_constant_p((((__builtin_constant_p(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) ? !!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 58, }; ______r = __builtin_expect(!!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) ? !!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 58, }; ______r = __builtin_expect(!!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 58, }; ______r = !!(((__builtin_constant_p(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) ? !!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 58, }; ______r = __builtin_expect(!!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/bit_spinlock.h"), "i" (58), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
17198 | clear_bit_unlock(bitnum, addr); | |
17199 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 63, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 63, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 63, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 63, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
17200 | (void)0; | |
17201 | } | |
17202 | static inline __attribute__((always_inline)) void __bit_spin_unlock(int bitnum, unsigned long *addr) | |
17203 | { | |
17204 | do { if (__builtin_constant_p((((__builtin_constant_p(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) ? !!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 75, }; ______r = __builtin_expect(!!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) ? !!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 75, }; ______r = __builtin_expect(!!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 75, }; ______r = !!(((__builtin_constant_p(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) ? !!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 75, }; ______r = __builtin_expect(!!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/bit_spinlock.h"), "i" (75), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
17205 | __clear_bit_unlock(bitnum, addr); | |
17206 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 80, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 80, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 80, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 80, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
17207 | (void)0; | |
17208 | } | |
17209 | static inline __attribute__((always_inline)) int bit_spin_is_locked(int bitnum, unsigned long *addr) | |
17210 | { | |
17211 | return (__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr))); | |
17212 | } | |
17213 | struct hlist_bl_head { | |
17214 | struct hlist_bl_node *first; | |
17215 | }; | |
17216 | struct hlist_bl_node { | |
17217 | struct hlist_bl_node *next, **pprev; | |
17218 | }; | |
17219 | static inline __attribute__((always_inline)) void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) | |
17220 | { | |
17221 | h->next = ((void *)0); | |
17222 | h->pprev = ((void *)0); | |
17223 | } | |
17224 | static inline __attribute__((always_inline)) int hlist_bl_unhashed(const struct hlist_bl_node *h) | |
17225 | { | |
17226 | return !h->pprev; | |
17227 | } | |
17228 | static inline __attribute__((always_inline)) struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h) | |
17229 | { | |
17230 | return (struct hlist_bl_node *) | |
17231 | ((unsigned long)h->first & ~1UL); | |
17232 | } | |
17233 | static inline __attribute__((always_inline)) void hlist_bl_set_first(struct hlist_bl_head *h, | |
17234 | struct hlist_bl_node *n) | |
17235 | { | |
17236 | ; | |
17237 | ; | |
17238 | h->first = (struct hlist_bl_node *)((unsigned long)n | 1UL); | |
17239 | } | |
17240 | static inline __attribute__((always_inline)) int hlist_bl_empty(const struct hlist_bl_head *h) | |
17241 | { | |
17242 | return !((unsigned long)h->first & ~1UL); | |
17243 | } | |
17244 | static inline __attribute__((always_inline)) void hlist_bl_add_head(struct hlist_bl_node *n, | |
17245 | struct hlist_bl_head *h) | |
17246 | { | |
17247 | struct hlist_bl_node *first = hlist_bl_first(h); | |
17248 | n->next = first; | |
17249 | if (__builtin_constant_p(((first))) ? !!((first)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list_bl.h", .line = 82, }; ______r = !!((first)); ______f.miss_hit[______r]++; ______r; })) | |
17250 | first->pprev = &n->next; | |
17251 | n->pprev = &h->first; | |
17252 | hlist_bl_set_first(h, n); | |
17253 | } | |
17254 | static inline __attribute__((always_inline)) void __hlist_bl_del(struct hlist_bl_node *n) | |
17255 | { | |
17256 | struct hlist_bl_node *next = n->next; | |
17257 | struct hlist_bl_node **pprev = n->pprev; | |
17258 | ; | |
17259 | *pprev = (struct hlist_bl_node *) | |
17260 | ((unsigned long)next | | |
17261 | ((unsigned long)*pprev & 1UL)); | |
17262 | if (__builtin_constant_p(((next))) ? !!((next)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list_bl.h", .line = 99, }; ______r = !!((next)); ______f.miss_hit[______r]++; ______r; })) | |
17263 | next->pprev = pprev; | |
17264 | } | |
17265 | static inline __attribute__((always_inline)) void hlist_bl_del(struct hlist_bl_node *n) | |
17266 | { | |
17267 | __hlist_bl_del(n); | |
17268 | n->next = ((void *) 0x00100100 + (0x0UL)); | |
17269 | n->pprev = ((void *) 0x00200200 + (0x0UL)); | |
17270 | } | |
17271 | static inline __attribute__((always_inline)) void hlist_bl_del_init(struct hlist_bl_node *n) | |
17272 | { | |
17273 | if (__builtin_constant_p(((!hlist_bl_unhashed(n)))) ? !!((!hlist_bl_unhashed(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list_bl.h", .line = 112, }; ______r = !!((!hlist_bl_unhashed(n))); ______f.miss_hit[______r]++; ______r; })) { | |
17274 | __hlist_bl_del(n); | |
17275 | INIT_HLIST_BL_NODE(n); | |
17276 | } | |
17277 | } | |
17278 | static inline __attribute__((always_inline)) void hlist_bl_lock(struct hlist_bl_head *b) | |
17279 | { | |
17280 | bit_spin_lock(0, (unsigned long *)b); | |
17281 | } | |
17282 | static inline __attribute__((always_inline)) void hlist_bl_unlock(struct hlist_bl_head *b) | |
17283 | { | |
17284 | __bit_spin_unlock(0, (unsigned long *)b); | |
17285 | } | |
17286 | static inline __attribute__((always_inline)) void hlist_bl_set_first_rcu(struct hlist_bl_head *h, | |
17287 | struct hlist_bl_node *n) | |
17288 | { | |
17289 | ; | |
17290 | ; | |
17291 | ({ if (__builtin_constant_p(((!__builtin_constant_p(((struct hlist_bl_node *)((unsigned long)n | 1UL))) || ((((struct hlist_bl_node *)((unsigned long)n | 1UL))) != ((void *)0))))) ? !!((!__builtin_constant_p(((struct hlist_bl_node *)((unsigned long)n | 1UL))) || ((((struct hlist_bl_node *)((unsigned long)n | 1UL))) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
17292 | "include/linux/rculist_bl.h" | |
17293 | , .line = | |
17294 | 17 | |
17295 | , }; ______r = !!((!__builtin_constant_p(((struct hlist_bl_node *)((unsigned long)n | 1UL))) || ((((struct hlist_bl_node *)((unsigned long)n | 1UL))) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); ((h->first)) = (typeof(*((struct hlist_bl_node *)((unsigned long)n | 1UL))) *)(((struct hlist_bl_node *)((unsigned long)n | 1UL))); }) | |
17296 | ; | |
17297 | } | |
17298 | static inline __attribute__((always_inline)) struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h) | |
17299 | { | |
17300 | return (struct hlist_bl_node *) | |
17301 | ((unsigned long)({ typeof(*(h->first)) *_________p1 = (typeof(*(h->first))* )(*(volatile typeof((h->first)) *)&((h->first))); do { } while (0); ; do { } while (0); ((typeof(*(h->first)) *)(_________p1)); }) & ~1UL); | |
17302 | } | |
17303 | static inline __attribute__((always_inline)) void hlist_bl_del_init_rcu(struct hlist_bl_node *n) | |
17304 | { | |
17305 | if (__builtin_constant_p(((!hlist_bl_unhashed(n)))) ? !!((!hlist_bl_unhashed(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist_bl.h", .line = 48, }; ______r = !!((!hlist_bl_unhashed(n))); ______f.miss_hit[______r]++; ______r; })) { | |
17306 | __hlist_bl_del(n); | |
17307 | n->pprev = ((void *)0); | |
17308 | } | |
17309 | } | |
17310 | static inline __attribute__((always_inline)) void hlist_bl_del_rcu(struct hlist_bl_node *n) | |
17311 | { | |
17312 | __hlist_bl_del(n); | |
17313 | n->pprev = ((void *) 0x00200200 + (0x0UL)); | |
17314 | } | |
17315 | static inline __attribute__((always_inline)) void hlist_bl_add_head_rcu(struct hlist_bl_node *n, | |
17316 | struct hlist_bl_head *h) | |
17317 | { | |
17318 | struct hlist_bl_node *first; | |
17319 | first = hlist_bl_first(h); | |
17320 | n->next = first; | |
17321 | if (__builtin_constant_p(((first))) ? !!((first)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist_bl.h", .line = 107, }; ______r = !!((first)); ______f.miss_hit[______r]++; ______r; })) | |
17322 | first->pprev = &n->next; | |
17323 | n->pprev = &h->first; | |
17324 | hlist_bl_set_first_rcu(h, n); | |
17325 | } | |
17326 | struct nameidata; | |
17327 | struct path; | |
17328 | struct vfsmount; | |
17329 | struct qstr { | |
17330 | unsigned int hash; | |
17331 | unsigned int len; | |
17332 | const unsigned char *name; | |
17333 | }; | |
17334 | struct dentry_stat_t { | |
17335 | int nr_dentry; | |
17336 | int nr_unused; | |
17337 | int age_limit; | |
17338 | int want_pages; | |
17339 | int dummy[2]; | |
17340 | }; | |
17341 | extern struct dentry_stat_t dentry_stat; | |
17342 | static inline __attribute__((always_inline)) int dentry_cmp(const unsigned char *cs, size_t scount, | |
17343 | const unsigned char *ct, size_t tcount) | |
17344 | { | |
17345 | int ret; | |
17346 | if (__builtin_constant_p(((scount != tcount))) ? !!((scount != tcount)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/dcache.h", .line = 58, }; ______r = !!((scount != tcount)); ______f.miss_hit[______r]++; ______r; })) | |
17347 | return 1; | |
17348 | do { | |
17349 | ret = (*cs != *ct); | |
17350 | if (__builtin_constant_p(((ret))) ? !!((ret)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/dcache.h", .line = 62, }; ______r = !!((ret)); ______f.miss_hit[______r]++; ______r; })) | |
17351 | break; | |
17352 | cs++; | |
17353 | ct++; | |
17354 | tcount--; | |
17355 | } while (tcount); | |
17356 | return ret; | |
17357 | } | |
17358 | static inline __attribute__((always_inline)) unsigned long | |
17359 | partial_name_hash(unsigned long c, unsigned long prevhash) | |
17360 | { | |
17361 | return (prevhash + (c << 4) + (c >> 4)) * 11; | |
17362 | } | |
17363 | static inline __attribute__((always_inline)) unsigned long end_name_hash(unsigned long hash) | |
17364 | { | |
17365 | return (unsigned int) hash; | |
17366 | } | |
17367 | static inline __attribute__((always_inline)) unsigned int | |
17368 | full_name_hash(const unsigned char *name, unsigned int len) | |
17369 | { | |
17370 | unsigned long hash = 0; | |
17371 | while (len--) | |
17372 | hash = partial_name_hash(*name++, hash); | |
17373 | return end_name_hash(hash); | |
17374 | } | |
17375 | struct dentry { | |
17376 | unsigned int d_flags; | |
17377 | seqcount_t d_seq; | |
17378 | struct hlist_bl_node d_hash; | |
17379 | struct dentry *d_parent; | |
17380 | struct qstr d_name; | |
17381 | struct inode *d_inode; | |
17382 | unsigned char d_iname[36]; | |
17383 | unsigned int d_count; | |
17384 | spinlock_t d_lock; | |
17385 | const struct dentry_operations *d_op; | |
17386 | struct super_block *d_sb; | |
17387 | unsigned long d_time; | |
17388 | void *d_fsdata; | |
17389 | struct list_head d_lru; | |
17390 | union { | |
17391 | struct list_head d_child; | |
17392 | struct rcu_head d_rcu; | |
17393 | } d_u; | |
17394 | struct list_head d_subdirs; | |
17395 | struct list_head d_alias; | |
17396 | }; | |
17397 | enum dentry_d_lock_class | |
17398 | { | |
17399 | DENTRY_D_LOCK_NORMAL, | |
17400 | DENTRY_D_LOCK_NESTED | |
17401 | }; | |
17402 | struct dentry_operations { | |
17403 | int (*d_revalidate)(struct dentry *, struct nameidata *); | |
17404 | int (*d_hash)(const struct dentry *, const struct inode *, | |
17405 | struct qstr *); | |
17406 | int (*d_compare)(const struct dentry *, const struct inode *, | |
17407 | const struct dentry *, const struct inode *, | |
17408 | unsigned int, const char *, const struct qstr *); | |
17409 | int (*d_delete)(const struct dentry *); | |
17410 | void (*d_release)(struct dentry *); | |
17411 | void (*d_iput)(struct dentry *, struct inode *); | |
17412 | char *(*d_dname)(struct dentry *, char *, int); | |
17413 | struct vfsmount *(*d_automount)(struct path *); | |
17414 | int (*d_manage)(struct dentry *, bool); | |
17415 | } __attribute__((__aligned__((1 << (6))))); | |
17416 | extern seqlock_t rename_lock; | |
17417 | static inline __attribute__((always_inline)) int dname_external(struct dentry *dentry) | |
17418 | { | |
17419 | return dentry->d_name.name != dentry->d_iname; | |
17420 | } | |
17421 | extern void d_instantiate(struct dentry *, struct inode *); | |
17422 | extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); | |
17423 | extern struct dentry * d_materialise_unique(struct dentry *, struct inode *); | |
17424 | extern void __d_drop(struct dentry *dentry); | |
17425 | extern void d_drop(struct dentry *dentry); | |
17426 | extern void d_delete(struct dentry *); | |
17427 | extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op); | |
17428 | extern struct dentry * d_alloc(struct dentry *, const struct qstr *); | |
17429 | extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); | |
17430 | extern struct dentry * d_splice_alias(struct inode *, struct dentry *); | |
17431 | extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); | |
17432 | extern struct dentry * d_obtain_alias(struct inode *); | |
17433 | extern void shrink_dcache_sb(struct super_block *); | |
17434 | extern void shrink_dcache_parent(struct dentry *); | |
17435 | extern void shrink_dcache_for_umount(struct super_block *); | |
17436 | extern int d_invalidate(struct dentry *); | |
17437 | extern struct dentry * d_alloc_root(struct inode *); | |
17438 | extern void d_genocide(struct dentry *); | |
17439 | extern struct dentry *d_find_alias(struct inode *); | |
17440 | extern void d_prune_aliases(struct inode *); | |
17441 | extern int have_submounts(struct dentry *); | |
17442 | extern void d_rehash(struct dentry *); | |
17443 | static inline __attribute__((always_inline)) void d_add(struct dentry *entry, struct inode *inode) | |
17444 | { | |
17445 | d_instantiate(entry, inode); | |
17446 | d_rehash(entry); | |
17447 | } | |
17448 | static inline __attribute__((always_inline)) struct dentry *d_add_unique(struct dentry *entry, struct inode *inode) | |
17449 | { | |
17450 | struct dentry *res; | |
17451 | res = d_instantiate_unique(entry, inode); | |
17452 | d_rehash(res != ((void *)0) ? res : entry); | |
17453 | return res; | |
17454 | } | |
17455 | extern void dentry_update_name_case(struct dentry *, struct qstr *); | |
17456 | extern void d_move(struct dentry *, struct dentry *); | |
17457 | extern struct dentry *d_ancestor(struct dentry *, struct dentry *); | |
17458 | extern struct dentry *d_lookup(struct dentry *, struct qstr *); | |
17459 | extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *); | |
17460 | extern struct dentry *__d_lookup(struct dentry *, struct qstr *); | |
17461 | extern struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name, | |
17462 | unsigned *seq, struct inode **inode); | |
17463 | static inline __attribute__((always_inline)) int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq) | |
17464 | { | |
17465 | int ret = 0; | |
17466 | assert_spin_locked(&dentry->d_lock); | |
17467 | if (__builtin_constant_p(((!read_seqcount_retry(&dentry->d_seq, seq)))) ? !!((!read_seqcount_retry(&dentry->d_seq, seq))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/dcache.h", .line = 327, }; ______r = !!((!read_seqcount_retry(&dentry->d_seq, seq))); ______f.miss_hit[______r]++; ______r; })) { | |
17468 | ret = 1; | |
17469 | dentry->d_count++; | |
17470 | } | |
17471 | return ret; | |
17472 | } | |
17473 | extern int d_validate(struct dentry *, struct dentry *); | |
17474 | extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); | |
17475 | extern char *__d_path(const struct path *path, struct path *root, char *, int); | |
17476 | extern char *d_path(const struct path *, char *, int); | |
17477 | extern char *d_path_with_unreachable(const struct path *, char *, int); | |
17478 | extern char *dentry_path_raw(struct dentry *, char *, int); | |
17479 | extern char *dentry_path(struct dentry *, char *, int); | |
17480 | static inline __attribute__((always_inline)) struct dentry *dget_dlock(struct dentry *dentry) | |
17481 | { | |
17482 | if (__builtin_constant_p(((dentry))) ? !!((dentry)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/dcache.h", .line = 361, }; ______r = !!((dentry)); ______f.miss_hit[______r]++; ______r; })) | |
17483 | dentry->d_count++; | |
17484 | return dentry; | |
17485 | } | |
17486 | static inline __attribute__((always_inline)) struct dentry *dget(struct dentry *dentry) | |
17487 | { | |
17488 | if (__builtin_constant_p(((dentry))) ? !!((dentry)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/dcache.h", .line = 368, }; ______r = !!((dentry)); ______f.miss_hit[______r]++; ______r; })) { | |
17489 | spin_lock(&dentry->d_lock); | |
17490 | dget_dlock(dentry); | |
17491 | spin_unlock(&dentry->d_lock); | |
17492 | } | |
17493 | return dentry; | |
17494 | } | |
17495 | extern struct dentry *dget_parent(struct dentry *dentry); | |
17496 | static inline __attribute__((always_inline)) int d_unhashed(struct dentry *dentry) | |
17497 | { | |
17498 | return hlist_bl_unhashed(&dentry->d_hash); | |
17499 | } | |
17500 | static inline __attribute__((always_inline)) int d_unlinked(struct dentry *dentry) | |
17501 | { | |
17502 | return d_unhashed(dentry) && !((dentry) == (dentry)->d_parent); | |
17503 | } | |
17504 | static inline __attribute__((always_inline)) int cant_mount(struct dentry *dentry) | |
17505 | { | |
17506 | return (dentry->d_flags & 0x0100); | |
17507 | } | |
17508 | static inline __attribute__((always_inline)) void dont_mount(struct dentry *dentry) | |
17509 | { | |
17510 | spin_lock(&dentry->d_lock); | |
17511 | dentry->d_flags |= 0x0100; | |
17512 | spin_unlock(&dentry->d_lock); | |
17513 | } | |
17514 | extern void dput(struct dentry *); | |
17515 | static inline __attribute__((always_inline)) bool d_managed(struct dentry *dentry) | |
17516 | { | |
17517 | return dentry->d_flags & (0x10000|0x20000|0x40000); | |
17518 | } | |
17519 | static inline __attribute__((always_inline)) bool d_mountpoint(struct dentry *dentry) | |
17520 | { | |
17521 | return dentry->d_flags & 0x10000; | |
17522 | } | |
17523 | extern struct dentry *lookup_create(struct nameidata *nd, int is_dir); | |
17524 | extern int sysctl_vfs_cache_pressure; | |
17525 | struct dentry; | |
17526 | struct vfsmount; | |
17527 | struct path { | |
17528 | struct vfsmount *mnt; | |
17529 | struct dentry *dentry; | |
17530 | }; | |
17531 | extern void path_get(struct path *); | |
17532 | extern void path_put(struct path *); | |
17533 | static inline __attribute__((always_inline)) int path_equal(const struct path *path1, const struct path *path2) | |
17534 | { | |
17535 | return path1->mnt == path2->mnt && path1->dentry == path2->dentry; | |
17536 | } | |
17537 | static inline __attribute__((always_inline)) int radix_tree_is_indirect_ptr(void *ptr) | |
17538 | { | |
17539 | return (int)((unsigned long)ptr & 1); | |
17540 | } | |
17541 | struct radix_tree_root { | |
17542 | unsigned int height; | |
17543 | gfp_t gfp_mask; | |
17544 | struct radix_tree_node *rnode; | |
17545 | }; | |
17546 | static inline __attribute__((always_inline)) void *radix_tree_deref_slot(void **pslot) | |
17547 | { | |
17548 | return ({ typeof(*(*pslot)) *_________p1 = (typeof(*(*pslot))* )(*(volatile typeof((*pslot)) *)&((*pslot))); do { } while (0); ; do { } while (0); ((typeof(*(*pslot)) *)(_________p1)); }); | |
17549 | } | |
17550 | static inline __attribute__((always_inline)) void *radix_tree_deref_slot_protected(void **pslot, | |
17551 | spinlock_t *treelock) | |
17552 | { | |
17553 | return ({ do { } while (0); ; ((typeof(*(*pslot)) *)((*pslot))); }); | |
17554 | } | |
17555 | static inline __attribute__((always_inline)) int radix_tree_deref_retry(void *arg) | |
17556 | { | |
17557 | return (__builtin_constant_p((unsigned long)arg & 1) ? !!((unsigned long)arg & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 173, }; ______r = __builtin_expect(!!((unsigned long)arg & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); | |
17558 | } | |
17559 | static inline __attribute__((always_inline)) void radix_tree_replace_slot(void **pslot, void *item) | |
17560 | { | |
17561 | do { if (__builtin_constant_p((((__builtin_constant_p(radix_tree_is_indirect_ptr(item)) ? !!(radix_tree_is_indirect_ptr(item)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 186, }; ______r = __builtin_expect(!!(radix_tree_is_indirect_ptr(item)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(radix_tree_is_indirect_ptr(item)) ? !!(radix_tree_is_indirect_ptr(item)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 186, }; ______r = __builtin_expect(!!(radix_tree_is_indirect_ptr(item)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 186, }; ______r = !!(((__builtin_constant_p(radix_tree_is_indirect_ptr(item)) ? !!(radix_tree_is_indirect_ptr(item)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 186, }; ______r = __builtin_expect(!!(radix_tree_is_indirect_ptr(item)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/radix-tree.h"), "i" (186), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
17562 | ({ if (__builtin_constant_p(((!__builtin_constant_p((item)) || (((item)) != ((void *)0))))) ? !!((!__builtin_constant_p((item)) || (((item)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 187, }; ______r = !!((!__builtin_constant_p((item)) || (((item)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); ((*pslot)) = (typeof(*(item)) *)((item)); }); | |
17563 | } | |
17564 | int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); | |
17565 | void *radix_tree_lookup(struct radix_tree_root *, unsigned long); | |
17566 | void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); | |
17567 | void *radix_tree_delete(struct radix_tree_root *, unsigned long); | |
17568 | unsigned int | |
17569 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |
17570 | unsigned long first_index, unsigned int max_items); | |
17571 | unsigned int | |
17572 | radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, | |
17573 | unsigned long first_index, unsigned int max_items); | |
17574 | unsigned long radix_tree_next_hole(struct radix_tree_root *root, | |
17575 | unsigned long index, unsigned long max_scan); | |
17576 | unsigned long radix_tree_prev_hole(struct radix_tree_root *root, | |
17577 | unsigned long index, unsigned long max_scan); | |
17578 | int radix_tree_preload(gfp_t gfp_mask); | |
17579 | void radix_tree_init(void); | |
17580 | void *radix_tree_tag_set(struct radix_tree_root *root, | |
17581 | unsigned long index, unsigned int tag); | |
17582 | void *radix_tree_tag_clear(struct radix_tree_root *root, | |
17583 | unsigned long index, unsigned int tag); | |
17584 | int radix_tree_tag_get(struct radix_tree_root *root, | |
17585 | unsigned long index, unsigned int tag); | |
17586 | unsigned int | |
17587 | radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |
17588 | unsigned long first_index, unsigned int max_items, | |
17589 | unsigned int tag); | |
17590 | unsigned int | |
17591 | radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | |
17592 | unsigned long first_index, unsigned int max_items, | |
17593 | unsigned int tag); | |
17594 | unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |
17595 | unsigned long *first_indexp, unsigned long last_index, | |
17596 | unsigned long nr_to_tag, | |
17597 | unsigned int fromtag, unsigned int totag); | |
17598 | int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); | |
17599 | static inline __attribute__((always_inline)) void radix_tree_preload_end(void) | |
17600 | { | |
17601 | do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 228, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 228, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 228, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 228, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); | |
17602 | } | |
17603 | struct semaphore { | |
17604 | spinlock_t lock; | |
17605 | unsigned int count; | |
17606 | struct list_head wait_list; | |
17607 | }; | |
17608 | static inline __attribute__((always_inline)) void sema_init(struct semaphore *sem, int val) | |
17609 | { | |
17610 | static struct lock_class_key __key; | |
17611 | *sem = (struct semaphore) { .lock = (spinlock_t ) { { .rlock = { .raw_lock = { 0 }, .magic = 0xdead4ead, .owner_cpu = -1, .owner = ((void *)-1L), .dep_map = { .name = "(*sem).lock" } } } }, .count = val, .wait_list = { &((*sem).wait_list), &((*sem).wait_list) }, }; | |
17612 | lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0); | |
17613 | } | |
17614 | extern void down(struct semaphore *sem); | |
17615 | extern int __attribute__((warn_unused_result)) down_interruptible(struct semaphore *sem); | |
17616 | extern int __attribute__((warn_unused_result)) down_killable(struct semaphore *sem); | |
17617 | extern int __attribute__((warn_unused_result)) down_trylock(struct semaphore *sem); | |
17618 | extern int __attribute__((warn_unused_result)) down_timeout(struct semaphore *sem, long jiffies); | |
17619 | extern void up(struct semaphore *sem); | |
17620 | struct fiemap_extent { | |
17621 | __u64 fe_logical; | |
17622 | __u64 fe_physical; | |
17623 | __u64 fe_length; | |
17624 | __u64 fe_reserved64[2]; | |
17625 | __u32 fe_flags; | |
17626 | __u32 fe_reserved[3]; | |
17627 | }; | |
17628 | struct fiemap { | |
17629 | __u64 fm_start; | |
17630 | __u64 fm_length; | |
17631 | __u32 fm_flags; | |
17632 | __u32 fm_mapped_extents; | |
17633 | __u32 fm_extent_count; | |
17634 | __u32 fm_reserved; | |
17635 | struct fiemap_extent fm_extents[0]; | |
17636 | }; | |
17637 | struct export_operations; | |
17638 | struct hd_geometry; | |
17639 | struct iovec; | |
17640 | struct nameidata; | |
17641 | struct kiocb; | |
17642 | struct kobject; | |
17643 | struct pipe_inode_info; | |
17644 | struct poll_table_struct; | |
17645 | struct kstatfs; | |
17646 | struct vm_area_struct; | |
17647 | struct vfsmount; | |
17648 | struct cred; | |
17649 | extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) inode_init(void); | |
17650 | extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) inode_init_early(void); | |
17651 | extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) files_init(unsigned long); | |
17652 | extern struct files_stat_struct files_stat; | |
17653 | extern unsigned long get_max_files(void); | |
17654 | extern int sysctl_nr_open; | |
17655 | extern struct inodes_stat_t inodes_stat; | |
17656 | extern int leases_enable, lease_break_time; | |
17657 | struct buffer_head; | |
17658 | typedef int (get_block_t)(struct inode *inode, sector_t iblock, | |
17659 | struct buffer_head *bh_result, int create); | |
17660 | typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, | |
17661 | ssize_t bytes, void *private, int ret, | |
17662 | bool is_async); | |
17663 | struct iattr { | |
17664 | unsigned int ia_valid; | |
17665 | umode_t ia_mode; | |
17666 | uid_t ia_uid; | |
17667 | gid_t ia_gid; | |
17668 | loff_t ia_size; | |
17669 | struct timespec ia_atime; | |
17670 | struct timespec ia_mtime; | |
17671 | struct timespec ia_ctime; | |
17672 | struct file *ia_file; | |
17673 | }; | |
17674 | enum { | |
17675 | QIF_BLIMITS_B = 0, | |
17676 | QIF_SPACE_B, | |
17677 | QIF_ILIMITS_B, | |
17678 | QIF_INODES_B, | |
17679 | QIF_BTIME_B, | |
17680 | QIF_ITIME_B, | |
17681 | }; | |
17682 | struct if_dqblk { | |
17683 | __u64 dqb_bhardlimit; | |
17684 | __u64 dqb_bsoftlimit; | |
17685 | __u64 dqb_curspace; | |
17686 | __u64 dqb_ihardlimit; | |
17687 | __u64 dqb_isoftlimit; | |
17688 | __u64 dqb_curinodes; | |
17689 | __u64 dqb_btime; | |
17690 | __u64 dqb_itime; | |
17691 | __u32 dqb_valid; | |
17692 | }; | |
17693 | struct if_dqinfo { | |
17694 | __u64 dqi_bgrace; | |
17695 | __u64 dqi_igrace; | |
17696 | __u32 dqi_flags; | |
17697 | __u32 dqi_valid; | |
17698 | }; | |
17699 | enum { | |
17700 | QUOTA_NL_C_UNSPEC, | |
17701 | QUOTA_NL_C_WARNING, | |
17702 | __QUOTA_NL_C_MAX, | |
17703 | }; | |
17704 | enum { | |
17705 | QUOTA_NL_A_UNSPEC, | |
17706 | QUOTA_NL_A_QTYPE, | |
17707 | QUOTA_NL_A_EXCESS_ID, | |
17708 | QUOTA_NL_A_WARNING, | |
17709 | QUOTA_NL_A_DEV_MAJOR, | |
17710 | QUOTA_NL_A_DEV_MINOR, | |
17711 | QUOTA_NL_A_CAUSED_ID, | |
17712 | __QUOTA_NL_A_MAX, | |
17713 | }; | |
17714 | typedef struct fs_disk_quota { | |
17715 | __s8 d_version; | |
17716 | __s8 d_flags; | |
17717 | __u16 d_fieldmask; | |
17718 | __u32 d_id; | |
17719 | __u64 d_blk_hardlimit; | |
17720 | __u64 d_blk_softlimit; | |
17721 | __u64 d_ino_hardlimit; | |
17722 | __u64 d_ino_softlimit; | |
17723 | __u64 d_bcount; | |
17724 | __u64 d_icount; | |
17725 | __s32 d_itimer; | |
17726 | __s32 d_btimer; | |
17727 | __u16 d_iwarns; | |
17728 | __u16 d_bwarns; | |
17729 | __s32 d_padding2; | |
17730 | __u64 d_rtb_hardlimit; | |
17731 | __u64 d_rtb_softlimit; | |
17732 | __u64 d_rtbcount; | |
17733 | __s32 d_rtbtimer; | |
17734 | __u16 d_rtbwarns; | |
17735 | __s16 d_padding3; | |
17736 | char d_padding4[8]; | |
17737 | } fs_disk_quota_t; | |
17738 | typedef struct fs_qfilestat { | |
17739 | __u64 qfs_ino; | |
17740 | __u64 qfs_nblks; | |
17741 | __u32 qfs_nextents; | |
17742 | } fs_qfilestat_t; | |
17743 | typedef struct fs_quota_stat { | |
17744 | __s8 qs_version; | |
17745 | __u16 qs_flags; | |
17746 | __s8 qs_pad; | |
17747 | fs_qfilestat_t qs_uquota; | |
17748 | fs_qfilestat_t qs_gquota; | |
17749 | __u32 qs_incoredqs; | |
17750 | __s32 qs_btimelimit; | |
17751 | __s32 qs_itimelimit; | |
17752 | __s32 qs_rtbtimelimit; | |
17753 | __u16 qs_bwarnlimit; | |
17754 | __u16 qs_iwarnlimit; | |
17755 | } fs_quota_stat_t; | |
17756 | struct dquot; | |
17757 | struct qtree_fmt_operations { | |
17758 | void (*mem2disk_dqblk)(void *disk, struct dquot *dquot); | |
17759 | void (*disk2mem_dqblk)(struct dquot *dquot, void *disk); | |
17760 | int (*is_id)(void *disk, struct dquot *dquot); | |
17761 | }; | |
17762 | struct qtree_mem_dqinfo { | |
17763 | struct super_block *dqi_sb; | |
17764 | int dqi_type; | |
17765 | unsigned int dqi_blocks; | |
17766 | unsigned int dqi_free_blk; | |
17767 | unsigned int dqi_free_entry; | |
17768 | unsigned int dqi_blocksize_bits; | |
17769 | unsigned int dqi_entry_size; | |
17770 | unsigned int dqi_usable_bs; | |
17771 | unsigned int dqi_qtree_depth; | |
17772 | struct qtree_fmt_operations *dqi_ops; | |
17773 | }; | |
17774 | int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); | |
17775 | int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); | |
17776 | int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); | |
17777 | int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); | |
17778 | int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk); | |
17779 | static inline __attribute__((always_inline)) int qtree_depth(struct qtree_mem_dqinfo *info) | |
17780 | { | |
17781 | unsigned int epb = info->dqi_usable_bs >> 2; | |
17782 | unsigned long long entries = epb; | |
17783 | int i; | |
17784 | for (i = 1; entries < (1ULL << 32); i++) | |
17785 | entries *= epb; | |
17786 | return i; | |
17787 | } | |
17788 | typedef __kernel_uid32_t qid_t; | |
17789 | typedef long long qsize_t; | |
17790 | extern spinlock_t dq_data_lock; | |
17791 | struct mem_dqblk { | |
17792 | qsize_t dqb_bhardlimit; | |
17793 | qsize_t dqb_bsoftlimit; | |
17794 | qsize_t dqb_curspace; | |
17795 | qsize_t dqb_rsvspace; | |
17796 | qsize_t dqb_ihardlimit; | |
17797 | qsize_t dqb_isoftlimit; | |
17798 | qsize_t dqb_curinodes; | |
17799 | time_t dqb_btime; | |
17800 | time_t dqb_itime; | |
17801 | }; | |
17802 | struct quota_format_type; | |
17803 | struct mem_dqinfo { | |
17804 | struct quota_format_type *dqi_format; | |
17805 | int dqi_fmt_id; | |
17806 | struct list_head dqi_dirty_list; | |
17807 | unsigned long dqi_flags; | |
17808 | unsigned int dqi_bgrace; | |
17809 | unsigned int dqi_igrace; | |
17810 | qsize_t dqi_maxblimit; | |
17811 | qsize_t dqi_maxilimit; | |
17812 | void *dqi_priv; | |
17813 | }; | |
17814 | struct super_block; | |
17815 | extern void mark_info_dirty(struct super_block *sb, int type); | |
17816 | static inline __attribute__((always_inline)) int info_dirty(struct mem_dqinfo *info) | |
17817 | { | |
17818 | return (__builtin_constant_p((16)) ? constant_test_bit((16), (&info->dqi_flags)) : variable_test_bit((16), (&info->dqi_flags))); | |
17819 | } | |
17820 | enum { | |
17821 | DQST_LOOKUPS, | |
17822 | DQST_DROPS, | |
17823 | DQST_READS, | |
17824 | DQST_WRITES, | |
17825 | DQST_CACHE_HITS, | |
17826 | DQST_ALLOC_DQUOTS, | |
17827 | DQST_FREE_DQUOTS, | |
17828 | DQST_SYNCS, | |
17829 | _DQST_DQSTAT_LAST | |
17830 | }; | |
17831 | struct dqstats { | |
17832 | int stat[_DQST_DQSTAT_LAST]; | |
17833 | struct percpu_counter counter[_DQST_DQSTAT_LAST]; | |
17834 | }; | |
17835 | extern struct dqstats *dqstats_pcpu; | |
17836 | extern struct dqstats dqstats; | |
17837 | static inline __attribute__((always_inline)) void dqstats_inc(unsigned int type) | |
17838 | { | |
17839 | percpu_counter_inc(&dqstats.counter[type]); | |
17840 | } | |
17841 | static inline __attribute__((always_inline)) void dqstats_dec(unsigned int type) | |
17842 | { | |
17843 | percpu_counter_dec(&dqstats.counter[type]); | |
17844 | } | |
17845 | struct dquot { | |
17846 | struct hlist_node dq_hash; | |
17847 | struct list_head dq_inuse; | |
17848 | struct list_head dq_free; | |
17849 | struct list_head dq_dirty; | |
17850 | struct mutex dq_lock; | |
17851 | atomic_t dq_count; | |
17852 | wait_queue_head_t dq_wait_unused; | |
17853 | struct super_block *dq_sb; | |
17854 | unsigned int dq_id; | |
17855 | loff_t dq_off; | |
17856 | unsigned long dq_flags; | |
17857 | short dq_type; | |
17858 | struct mem_dqblk dq_dqb; | |
17859 | }; | |
17860 | struct quota_format_ops { | |
17861 | int (*check_quota_file)(struct super_block *sb, int type); | |
17862 | int (*read_file_info)(struct super_block *sb, int type); | |
17863 | int (*write_file_info)(struct super_block *sb, int type); | |
17864 | int (*free_file_info)(struct super_block *sb, int type); | |
17865 | int (*read_dqblk)(struct dquot *dquot); | |
17866 | int (*commit_dqblk)(struct dquot *dquot); | |
17867 | int (*release_dqblk)(struct dquot *dquot); | |
17868 | }; | |
17869 | struct dquot_operations { | |
17870 | int (*write_dquot) (struct dquot *); | |
17871 | struct dquot *(*alloc_dquot)(struct super_block *, int); | |
17872 | void (*destroy_dquot)(struct dquot *); | |
17873 | int (*acquire_dquot) (struct dquot *); | |
17874 | int (*release_dquot) (struct dquot *); | |
17875 | int (*mark_dirty) (struct dquot *); | |
17876 | int (*write_info) (struct super_block *, int); | |
17877 | qsize_t *(*get_reserved_space) (struct inode *); | |
17878 | }; | |
17879 | struct path; | |
17880 | struct quotactl_ops { | |
17881 | int (*quota_on)(struct super_block *, int, int, struct path *); | |
17882 | int (*quota_on_meta)(struct super_block *, int, int); | |
17883 | int (*quota_off)(struct super_block *, int); | |
17884 | int (*quota_sync)(struct super_block *, int, int); | |
17885 | int (*get_info)(struct super_block *, int, struct if_dqinfo *); | |
17886 | int (*set_info)(struct super_block *, int, struct if_dqinfo *); | |
17887 | int (*get_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *); | |
17888 | int (*set_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *); | |
17889 | int (*get_xstate)(struct super_block *, struct fs_quota_stat *); | |
17890 | int (*set_xstate)(struct super_block *, unsigned int, int); | |
17891 | }; | |
17892 | struct quota_format_type { | |
17893 | int qf_fmt_id; | |
17894 | const struct quota_format_ops *qf_ops; | |
17895 | struct module *qf_owner; | |
17896 | struct quota_format_type *qf_next; | |
17897 | }; | |
17898 | enum { | |
17899 | _DQUOT_USAGE_ENABLED = 0, | |
17900 | _DQUOT_LIMITS_ENABLED, | |
17901 | _DQUOT_SUSPENDED, | |
17902 | _DQUOT_STATE_FLAGS | |
17903 | }; | |
17904 | static inline __attribute__((always_inline)) unsigned int dquot_state_flag(unsigned int flags, int type) | |
17905 | { | |
17906 | return flags << _DQUOT_STATE_FLAGS * type; | |
17907 | } | |
17908 | static inline __attribute__((always_inline)) unsigned int dquot_generic_flag(unsigned int flags, int type) | |
17909 | { | |
17910 | return (flags >> _DQUOT_STATE_FLAGS * type) & ((1 << _DQUOT_USAGE_ENABLED) | (1 << _DQUOT_LIMITS_ENABLED) | (1 << _DQUOT_SUSPENDED)); | |
17911 | } | |
17912 | static inline __attribute__((always_inline)) void quota_send_warning(short type, unsigned int id, dev_t dev, | |
17913 | const char warntype) | |
17914 | { | |
17915 | return; | |
17916 | } | |
17917 | struct quota_info { | |
17918 | unsigned int flags; | |
17919 | struct mutex dqio_mutex; | |
17920 | struct mutex dqonoff_mutex; | |
17921 | struct rw_semaphore dqptr_sem; | |
17922 | struct inode *files[2]; | |
17923 | struct mem_dqinfo info[2]; | |
17924 | const struct quota_format_ops *ops[2]; | |
17925 | }; | |
17926 | int register_quota_format(struct quota_format_type *fmt); | |
17927 | void unregister_quota_format(struct quota_format_type *fmt); | |
17928 | struct quota_module_name { | |
17929 | int qm_fmt_id; | |
17930 | char *qm_mod_name; | |
17931 | }; | |
17932 | enum positive_aop_returns { | |
17933 | AOP_WRITEPAGE_ACTIVATE = 0x80000, | |
17934 | AOP_TRUNCATED_PAGE = 0x80001, | |
17935 | }; | |
17936 | struct page; | |
17937 | struct address_space; | |
17938 | struct writeback_control; | |
17939 | struct iov_iter { | |
17940 | const struct iovec *iov; | |
17941 | unsigned long nr_segs; | |
17942 | size_t iov_offset; | |
17943 | size_t count; | |
17944 | }; | |
17945 | size_t iov_iter_copy_from_user_atomic(struct page *page, | |
17946 | struct iov_iter *i, unsigned long offset, size_t bytes); | |
17947 | size_t iov_iter_copy_from_user(struct page *page, | |
17948 | struct iov_iter *i, unsigned long offset, size_t bytes); | |
17949 | void iov_iter_advance(struct iov_iter *i, size_t bytes); | |
17950 | int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); | |
17951 | size_t iov_iter_single_seg_count(struct iov_iter *i); | |
17952 | static inline __attribute__((always_inline)) void iov_iter_init(struct iov_iter *i, | |
17953 | const struct iovec *iov, unsigned long nr_segs, | |
17954 | size_t count, size_t written) | |
17955 | { | |
17956 | i->iov = iov; | |
17957 | i->nr_segs = nr_segs; | |
17958 | i->iov_offset = 0; | |
17959 | i->count = count + written; | |
17960 | iov_iter_advance(i, written); | |
17961 | } | |
17962 | static inline __attribute__((always_inline)) size_t iov_iter_count(struct iov_iter *i) | |
17963 | { | |
17964 | return i->count; | |
17965 | } | |
17966 | typedef struct { | |
17967 | size_t written; | |
17968 | size_t count; | |
17969 | union { | |
17970 | char *buf; | |
17971 | void *data; | |
17972 | } arg; | |
17973 | int error; | |
17974 | } read_descriptor_t; | |
17975 | typedef int (*read_actor_t)(read_descriptor_t *, struct page *, | |
17976 | unsigned long, unsigned long); | |
17977 | struct address_space_operations { | |
17978 | int (*writepage)(struct page *page, struct writeback_control *wbc); | |
17979 | int (*readpage)(struct file *, struct page *); | |
17980 | int (*writepages)(struct address_space *, struct writeback_control *); | |
17981 | int (*set_page_dirty)(struct page *page); | |
17982 | int (*readpages)(struct file *filp, struct address_space *mapping, | |
17983 | struct list_head *pages, unsigned nr_pages); | |
17984 | int (*write_begin)(struct file *, struct address_space *mapping, | |
17985 | loff_t pos, unsigned len, unsigned flags, | |
17986 | struct page **pagep, void **fsdata); | |
17987 | int (*write_end)(struct file *, struct address_space *mapping, | |
17988 | loff_t pos, unsigned len, unsigned copied, | |
17989 | struct page *page, void *fsdata); | |
17990 | sector_t (*bmap)(struct address_space *, sector_t); | |
17991 | void (*invalidatepage) (struct page *, unsigned long); | |
17992 | int (*releasepage) (struct page *, gfp_t); | |
17993 | void (*freepage)(struct page *); | |
17994 | ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, | |
17995 | loff_t offset, unsigned long nr_segs); | |
17996 | int (*get_xip_mem)(struct address_space *, unsigned long, int, | |
17997 | void **, unsigned long *); | |
17998 | int (*migratepage) (struct address_space *, | |
17999 | struct page *, struct page *); | |
18000 | int (*launder_page) (struct page *); | |
18001 | int (*is_partially_uptodate) (struct page *, read_descriptor_t *, | |
18002 | unsigned long); | |
18003 | int (*error_remove_page)(struct address_space *, struct page *); | |
18004 | }; | |
18005 | extern const struct address_space_operations empty_aops; | |
18006 | int pagecache_write_begin(struct file *, struct address_space *mapping, | |
18007 | loff_t pos, unsigned len, unsigned flags, | |
18008 | struct page **pagep, void **fsdata); | |
18009 | int pagecache_write_end(struct file *, struct address_space *mapping, | |
18010 | loff_t pos, unsigned len, unsigned copied, | |
18011 | struct page *page, void *fsdata); | |
18012 | struct backing_dev_info; | |
18013 | struct address_space { | |
18014 | struct inode *host; | |
18015 | struct radix_tree_root page_tree; | |
18016 | spinlock_t tree_lock; | |
18017 | unsigned int i_mmap_writable; | |
18018 | struct prio_tree_root i_mmap; | |
18019 | struct list_head i_mmap_nonlinear; | |
18020 | struct mutex i_mmap_mutex; | |
18021 | unsigned long nrpages; | |
18022 | unsigned long writeback_index; | |
18023 | const struct address_space_operations *a_ops; | |
18024 | unsigned long flags; | |
18025 | struct backing_dev_info *backing_dev_info; | |
18026 | spinlock_t private_lock; | |
18027 | struct list_head private_list; | |
18028 | struct address_space *assoc_mapping; | |
18029 | } __attribute__((aligned(sizeof(long)))); | |
18030 | struct block_device { | |
18031 | dev_t bd_dev; | |
18032 | int bd_openers; | |
18033 | struct inode * bd_inode; | |
18034 | struct super_block * bd_super; | |
18035 | struct mutex bd_mutex; | |
18036 | struct list_head bd_inodes; | |
18037 | void * bd_claiming; | |
18038 | void * bd_holder; | |
18039 | int bd_holders; | |
18040 | bool bd_write_holder; | |
18041 | struct list_head bd_holder_disks; | |
18042 | struct block_device * bd_contains; | |
18043 | unsigned bd_block_size; | |
18044 | struct hd_struct * bd_part; | |
18045 | unsigned bd_part_count; | |
18046 | int bd_invalidated; | |
18047 | struct gendisk * bd_disk; | |
18048 | struct list_head bd_list; | |
18049 | unsigned long bd_private; | |
18050 | int bd_fsfreeze_count; | |
18051 | struct mutex bd_fsfreeze_mutex; | |
18052 | }; | |
18053 | int mapping_tagged(struct address_space *mapping, int tag); | |
18054 | static inline __attribute__((always_inline)) int mapping_mapped(struct address_space *mapping) | |
18055 | { | |
18056 | return !prio_tree_empty(&mapping->i_mmap) || | |
18057 | !list_empty(&mapping->i_mmap_nonlinear); | |
18058 | } | |
18059 | static inline __attribute__((always_inline)) int mapping_writably_mapped(struct address_space *mapping) | |
18060 | { | |
18061 | return mapping->i_mmap_writable != 0; | |
18062 | } | |
18063 | struct posix_acl; | |
18064 | struct inode { | |
18065 | umode_t i_mode; | |
18066 | uid_t i_uid; | |
18067 | gid_t i_gid; | |
18068 | const struct inode_operations *i_op; | |
18069 | struct super_block *i_sb; | |
18070 | spinlock_t i_lock; | |
18071 | unsigned int i_flags; | |
18072 | unsigned long i_state; | |
18073 | void *i_security; | |
18074 | struct mutex i_mutex; | |
18075 | unsigned long dirtied_when; | |
18076 | struct hlist_node i_hash; | |
18077 | struct list_head i_wb_list; | |
18078 | struct list_head i_lru; | |
18079 | struct list_head i_sb_list; | |
18080 | union { | |
18081 | struct list_head i_dentry; | |
18082 | struct rcu_head i_rcu; | |
18083 | }; | |
18084 | unsigned long i_ino; | |
18085 | atomic_t i_count; | |
18086 | unsigned int i_nlink; | |
18087 | dev_t i_rdev; | |
18088 | unsigned int i_blkbits; | |
18089 | u64 i_version; | |
18090 | loff_t i_size; | |
18091 | seqcount_t i_size_seqcount; | |
18092 | struct timespec i_atime; | |
18093 | struct timespec i_mtime; | |
18094 | struct timespec i_ctime; | |
18095 | blkcnt_t i_blocks; | |
18096 | unsigned short i_bytes; | |
18097 | struct rw_semaphore i_alloc_sem; | |
18098 | const struct file_operations *i_fop; | |
18099 | struct file_lock *i_flock; | |
18100 | struct address_space *i_mapping; | |
18101 | struct address_space i_data; | |
18102 | struct dquot *i_dquot[2]; | |
18103 | struct list_head i_devices; | |
18104 | union { | |
18105 | struct pipe_inode_info *i_pipe; | |
18106 | struct block_device *i_bdev; | |
18107 | struct cdev *i_cdev; | |
18108 | }; | |
18109 | __u32 i_generation; | |
18110 | __u32 i_fsnotify_mask; | |
18111 | struct hlist_head i_fsnotify_marks; | |
18112 | atomic_t i_writecount; | |
18113 | struct posix_acl *i_acl; | |
18114 | struct posix_acl *i_default_acl; | |
18115 | void *i_private; | |
18116 | }; | |
18117 | static inline __attribute__((always_inline)) int inode_unhashed(struct inode *inode) | |
18118 | { | |
18119 | return hlist_unhashed(&inode->i_hash); | |
18120 | } | |
18121 | enum inode_i_mutex_lock_class | |
18122 | { | |
18123 | I_MUTEX_NORMAL, | |
18124 | I_MUTEX_PARENT, | |
18125 | I_MUTEX_CHILD, | |
18126 | I_MUTEX_XATTR, | |
18127 | I_MUTEX_QUOTA | |
18128 | }; | |
18129 | static inline __attribute__((always_inline)) loff_t i_size_read(const struct inode *inode) | |
18130 | { | |
18131 | loff_t i_size; | |
18132 | unsigned int seq; | |
18133 | do { | |
18134 | seq = read_seqcount_begin(&inode->i_size_seqcount); | |
18135 | i_size = inode->i_size; | |
18136 | } while (read_seqcount_retry(&inode->i_size_seqcount, seq)); | |
18137 | return i_size; | |
18138 | } | |
18139 | static inline __attribute__((always_inline)) void i_size_write(struct inode *inode, loff_t i_size) | |
18140 | { | |
18141 | write_seqcount_begin(&inode->i_size_seqcount); | |
18142 | inode->i_size = i_size; | |
18143 | write_seqcount_end(&inode->i_size_seqcount); | |
18144 | } | |
18145 | static inline __attribute__((always_inline)) unsigned iminor(const struct inode *inode) | |
18146 | { | |
18147 | return ((unsigned int) ((inode->i_rdev) & ((1U << 20) - 1))); | |
18148 | } | |
18149 | static inline __attribute__((always_inline)) unsigned imajor(const struct inode *inode) | |
18150 | { | |
18151 | return ((unsigned int) ((inode->i_rdev) >> 20)); | |
18152 | } | |
18153 | extern struct block_device *I_BDEV(struct inode *inode); | |
18154 | struct fown_struct { | |
18155 | rwlock_t lock; | |
18156 | struct pid *pid; | |
18157 | enum pid_type pid_type; | |
18158 | uid_t uid, euid; | |
18159 | int signum; | |
18160 | }; | |
18161 | struct file_ra_state { | |
18162 | unsigned long start; | |
18163 | unsigned int size; | |
18164 | unsigned int async_size; | |
18165 | unsigned int ra_pages; | |
18166 | unsigned int mmap_miss; | |
18167 | loff_t prev_pos; | |
18168 | }; | |
18169 | static inline __attribute__((always_inline)) int ra_has_index(struct file_ra_state *ra, unsigned long index) | |
18170 | { | |
18171 | return (index >= ra->start && | |
18172 | index < ra->start + ra->size); | |
18173 | } | |
18174 | struct file { | |
18175 | union { | |
18176 | struct list_head fu_list; | |
18177 | struct rcu_head fu_rcuhead; | |
18178 | } f_u; | |
18179 | struct path f_path; | |
18180 | const struct file_operations *f_op; | |
18181 | spinlock_t f_lock; | |
18182 | int f_sb_list_cpu; | |
18183 | atomic_long_t f_count; | |
18184 | unsigned int f_flags; | |
18185 | fmode_t f_mode; | |
18186 | loff_t f_pos; | |
18187 | struct fown_struct f_owner; | |
18188 | const struct cred *f_cred; | |
18189 | struct file_ra_state f_ra; | |
18190 | u64 f_version; | |
18191 | void *f_security; | |
18192 | void *private_data; | |
18193 | struct list_head f_ep_links; | |
18194 | struct address_space *f_mapping; | |
18195 | }; | |
18196 | struct file_handle { | |
18197 | __u32 handle_bytes; | |
18198 | int handle_type; | |
18199 | unsigned char f_handle[0]; | |
18200 | }; | |
18201 | static inline __attribute__((always_inline)) void file_take_write(struct file *filp) {} | |
18202 | static inline __attribute__((always_inline)) void file_release_write(struct file *filp) {} | |
18203 | static inline __attribute__((always_inline)) void file_reset_write(struct file *filp) {} | |
18204 | static inline __attribute__((always_inline)) void file_check_state(struct file *filp) {} | |
18205 | static inline __attribute__((always_inline)) int file_check_writeable(struct file *filp) | |
18206 | { | |
18207 | return 0; | |
18208 | } | |
18209 | typedef struct files_struct *fl_owner_t; | |
18210 | struct file_lock_operations { | |
18211 | void (*fl_copy_lock)(struct file_lock *, struct file_lock *); | |
18212 | void (*fl_release_private)(struct file_lock *); | |
18213 | }; | |
18214 | struct lock_manager_operations { | |
18215 | int (*fl_compare_owner)(struct file_lock *, struct file_lock *); | |
18216 | void (*fl_notify)(struct file_lock *); | |
18217 | int (*fl_grant)(struct file_lock *, struct file_lock *, int); | |
18218 | void (*fl_release_private)(struct file_lock *); | |
18219 | void (*fl_break)(struct file_lock *); | |
18220 | int (*fl_change)(struct file_lock **, int); | |
18221 | }; | |
18222 | struct lock_manager { | |
18223 | struct list_head list; | |
18224 | }; | |
18225 | void locks_start_grace(struct lock_manager *); | |
18226 | void locks_end_grace(struct lock_manager *); | |
18227 | int locks_in_grace(void); | |
18228 | enum nfs_stat { | |
18229 | NFS_OK = 0, | |
18230 | NFSERR_PERM = 1, | |
18231 | NFSERR_NOENT = 2, | |
18232 | NFSERR_IO = 5, | |
18233 | NFSERR_NXIO = 6, | |
18234 | NFSERR_EAGAIN = 11, | |
18235 | NFSERR_ACCES = 13, | |
18236 | NFSERR_EXIST = 17, | |
18237 | NFSERR_XDEV = 18, | |
18238 | NFSERR_NODEV = 19, | |
18239 | NFSERR_NOTDIR = 20, | |
18240 | NFSERR_ISDIR = 21, | |
18241 | NFSERR_INVAL = 22, | |
18242 | NFSERR_FBIG = 27, | |
18243 | NFSERR_NOSPC = 28, | |
18244 | NFSERR_ROFS = 30, | |
18245 | NFSERR_MLINK = 31, | |
18246 | NFSERR_OPNOTSUPP = 45, | |
18247 | NFSERR_NAMETOOLONG = 63, | |
18248 | NFSERR_NOTEMPTY = 66, | |
18249 | NFSERR_DQUOT = 69, | |
18250 | NFSERR_STALE = 70, | |
18251 | NFSERR_REMOTE = 71, | |
18252 | NFSERR_WFLUSH = 99, | |
18253 | NFSERR_BADHANDLE = 10001, | |
18254 | NFSERR_NOT_SYNC = 10002, | |
18255 | NFSERR_BAD_COOKIE = 10003, | |
18256 | NFSERR_NOTSUPP = 10004, | |
18257 | NFSERR_TOOSMALL = 10005, | |
18258 | NFSERR_SERVERFAULT = 10006, | |
18259 | NFSERR_BADTYPE = 10007, | |
18260 | NFSERR_JUKEBOX = 10008, | |
18261 | NFSERR_SAME = 10009, | |
18262 | NFSERR_DENIED = 10010, | |
18263 | NFSERR_EXPIRED = 10011, | |
18264 | NFSERR_LOCKED = 10012, | |
18265 | NFSERR_GRACE = 10013, | |
18266 | NFSERR_FHEXPIRED = 10014, | |
18267 | NFSERR_SHARE_DENIED = 10015, | |
18268 | NFSERR_WRONGSEC = 10016, | |
18269 | NFSERR_CLID_INUSE = 10017, | |
18270 | NFSERR_RESOURCE = 10018, | |
18271 | NFSERR_MOVED = 10019, | |
18272 | NFSERR_NOFILEHANDLE = 10020, | |
18273 | NFSERR_MINOR_VERS_MISMATCH = 10021, | |
18274 | NFSERR_STALE_CLIENTID = 10022, | |
18275 | NFSERR_STALE_STATEID = 10023, | |
18276 | NFSERR_OLD_STATEID = 10024, | |
18277 | NFSERR_BAD_STATEID = 10025, | |
18278 | NFSERR_BAD_SEQID = 10026, | |
18279 | NFSERR_NOT_SAME = 10027, | |
18280 | NFSERR_LOCK_RANGE = 10028, | |
18281 | NFSERR_SYMLINK = 10029, | |
18282 | NFSERR_RESTOREFH = 10030, | |
18283 | NFSERR_LEASE_MOVED = 10031, | |
18284 | NFSERR_ATTRNOTSUPP = 10032, | |
18285 | NFSERR_NO_GRACE = 10033, | |
18286 | NFSERR_RECLAIM_BAD = 10034, | |
18287 | NFSERR_RECLAIM_CONFLICT = 10035, | |
18288 | NFSERR_BAD_XDR = 10036, | |
18289 | NFSERR_LOCKS_HELD = 10037, | |
18290 | NFSERR_OPENMODE = 10038, | |
18291 | NFSERR_BADOWNER = 10039, | |
18292 | NFSERR_BADCHAR = 10040, | |
18293 | NFSERR_BADNAME = 10041, | |
18294 | NFSERR_BAD_RANGE = 10042, | |
18295 | NFSERR_LOCK_NOTSUPP = 10043, | |
18296 | NFSERR_OP_ILLEGAL = 10044, | |
18297 | NFSERR_DEADLOCK = 10045, | |
18298 | NFSERR_FILE_OPEN = 10046, | |
18299 | NFSERR_ADMIN_REVOKED = 10047, | |
18300 | NFSERR_CB_PATH_DOWN = 10048, | |
18301 | }; | |
18302 | enum nfs_ftype { | |
18303 | NFNON = 0, | |
18304 | NFREG = 1, | |
18305 | NFDIR = 2, | |
18306 | NFBLK = 3, | |
18307 | NFCHR = 4, | |
18308 | NFLNK = 5, | |
18309 | NFSOCK = 6, | |
18310 | NFBAD = 7, | |
18311 | NFFIFO = 8 | |
18312 | }; | |
18313 | typedef u32 rpc_authflavor_t; | |
18314 | enum rpc_auth_flavors { | |
18315 | RPC_AUTH_NULL = 0, | |
18316 | RPC_AUTH_UNIX = 1, | |
18317 | RPC_AUTH_SHORT = 2, | |
18318 | RPC_AUTH_DES = 3, | |
18319 | RPC_AUTH_KRB = 4, | |
18320 | RPC_AUTH_GSS = 6, | |
18321 | RPC_AUTH_MAXFLAVOR = 8, | |
18322 | RPC_AUTH_GSS_KRB5 = 390003, | |
18323 | RPC_AUTH_GSS_KRB5I = 390004, | |
18324 | RPC_AUTH_GSS_KRB5P = 390005, | |
18325 | RPC_AUTH_GSS_LKEY = 390006, | |
18326 | RPC_AUTH_GSS_LKEYI = 390007, | |
18327 | RPC_AUTH_GSS_LKEYP = 390008, | |
18328 | RPC_AUTH_GSS_SPKM = 390009, | |
18329 | RPC_AUTH_GSS_SPKMI = 390010, | |
18330 | RPC_AUTH_GSS_SPKMP = 390011, | |
18331 | }; | |
18332 | enum rpc_msg_type { | |
18333 | RPC_CALL = 0, | |
18334 | RPC_REPLY = 1 | |
18335 | }; | |
18336 | enum rpc_reply_stat { | |
18337 | RPC_MSG_ACCEPTED = 0, | |
18338 | RPC_MSG_DENIED = 1 | |
18339 | }; | |
18340 | enum rpc_accept_stat { | |
18341 | RPC_SUCCESS = 0, | |
18342 | RPC_PROG_UNAVAIL = 1, | |
18343 | RPC_PROG_MISMATCH = 2, | |
18344 | RPC_PROC_UNAVAIL = 3, | |
18345 | RPC_GARBAGE_ARGS = 4, | |
18346 | RPC_SYSTEM_ERR = 5, | |
18347 | RPC_DROP_REPLY = 60000, | |
18348 | }; | |
18349 | enum rpc_reject_stat { | |
18350 | RPC_MISMATCH = 0, | |
18351 | RPC_AUTH_ERROR = 1 | |
18352 | }; | |
18353 | enum rpc_auth_stat { | |
18354 | RPC_AUTH_OK = 0, | |
18355 | RPC_AUTH_BADCRED = 1, | |
18356 | RPC_AUTH_REJECTEDCRED = 2, | |
18357 | RPC_AUTH_BADVERF = 3, | |
18358 | RPC_AUTH_REJECTEDVERF = 4, | |
18359 | RPC_AUTH_TOOWEAK = 5, | |
18360 | RPCSEC_GSS_CREDPROBLEM = 13, | |
18361 | RPCSEC_GSS_CTXPROBLEM = 14 | |
18362 | }; | |
18363 | typedef __be32 rpc_fraghdr; | |
18364 | extern __be32 in_aton(const char *str); | |
18365 | extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end); | |
18366 | extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end); | |
18367 | struct nfs_fh { | |
18368 | unsigned short size; | |
18369 | unsigned char data[128]; | |
18370 | }; | |
18371 | static inline __attribute__((always_inline)) int nfs_compare_fh(const struct nfs_fh *a, const struct nfs_fh *b) | |
18372 | { | |
18373 | return a->size != b->size || __builtin_memcmp(a->data, b->data, a->size) != 0; | |
18374 | } | |
18375 | static inline __attribute__((always_inline)) void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *source) | |
18376 | { | |
18377 | target->size = source->size; | |
18378 | __builtin_memcpy(target->data, source->data, source->size); | |
18379 | } | |
18380 | enum nfs3_stable_how { | |
18381 | NFS_UNSTABLE = 0, | |
18382 | NFS_DATA_SYNC = 1, | |
18383 | NFS_FILE_SYNC = 2 | |
18384 | }; | |
18385 | struct nlm_lockowner; | |
18386 | struct nfs_lock_info { | |
18387 | u32 state; | |
18388 | struct nlm_lockowner *owner; | |
18389 | struct list_head list; | |
18390 | }; | |
18391 | struct nfs4_lock_state; | |
18392 | struct nfs4_lock_info { | |
18393 | struct nfs4_lock_state *owner; | |
18394 | }; | |
18395 | struct file_lock { | |
18396 | struct file_lock *fl_next; | |
18397 | struct list_head fl_link; | |
18398 | struct list_head fl_block; | |
18399 | fl_owner_t fl_owner; | |
18400 | unsigned char fl_flags; | |
18401 | unsigned char fl_type; | |
18402 | unsigned int fl_pid; | |
18403 | struct pid *fl_nspid; | |
18404 | wait_queue_head_t fl_wait; | |
18405 | struct file *fl_file; | |
18406 | loff_t fl_start; | |
18407 | loff_t fl_end; | |
18408 | struct fasync_struct * fl_fasync; | |
18409 | unsigned long fl_break_time; | |
18410 | const struct file_lock_operations *fl_ops; | |
18411 | const struct lock_manager_operations *fl_lmops; | |
18412 | union { | |
18413 | struct nfs_lock_info nfs_fl; | |
18414 | struct nfs4_lock_info nfs4_fl; | |
18415 | struct { | |
18416 | struct list_head link; | |
18417 | int state; | |
18418 | } afs; | |
18419 | } fl_u; | |
18420 | }; | |
18421 | struct f_owner_ex { | |
18422 | int type; | |
18423 | __kernel_pid_t pid; | |
18424 | }; | |
18425 | struct flock { | |
18426 | short l_type; | |
18427 | short l_whence; | |
18428 | __kernel_off_t l_start; | |
18429 | __kernel_off_t l_len; | |
18430 | __kernel_pid_t l_pid; | |
18431 | }; | |
18432 | struct flock64 { | |
18433 | short l_type; | |
18434 | short l_whence; | |
18435 | __kernel_loff_t l_start; | |
18436 | __kernel_loff_t l_len; | |
18437 | __kernel_pid_t l_pid; | |
18438 | }; | |
18439 | extern void send_sigio(struct fown_struct *fown, int fd, int band); | |
18440 | extern int fcntl_getlk(struct file *, struct flock *); | |
18441 | extern int fcntl_setlk(unsigned int, struct file *, unsigned int, | |
18442 | struct flock *); | |
18443 | extern int fcntl_getlk64(struct file *, struct flock64 *); | |
18444 | extern int fcntl_setlk64(unsigned int, struct file *, unsigned int, | |
18445 | struct flock64 *); | |
18446 | extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); | |
18447 | extern int fcntl_getlease(struct file *filp); | |
18448 | void locks_free_lock(struct file_lock *fl); | |
18449 | extern void locks_init_lock(struct file_lock *); | |
18450 | extern struct file_lock * locks_alloc_lock(void); | |
18451 | extern void locks_copy_lock(struct file_lock *, struct file_lock *); | |
18452 | extern void __locks_copy_lock(struct file_lock *, const struct file_lock *); | |
18453 | extern void locks_remove_posix(struct file *, fl_owner_t); | |
18454 | extern void locks_remove_flock(struct file *); | |
18455 | extern void locks_release_private(struct file_lock *); | |
18456 | extern void posix_test_lock(struct file *, struct file_lock *); | |
18457 | extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); | |
18458 | extern int posix_lock_file_wait(struct file *, struct file_lock *); | |
18459 | extern int posix_unblock_lock(struct file *, struct file_lock *); | |
18460 | extern int vfs_test_lock(struct file *, struct file_lock *); | |
18461 | extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); | |
18462 | extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); | |
18463 | extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); | |
18464 | extern int __break_lease(struct inode *inode, unsigned int flags); | |
18465 | extern void lease_get_mtime(struct inode *, struct timespec *time); | |
18466 | extern int generic_setlease(struct file *, long, struct file_lock **); | |
18467 | extern int vfs_setlease(struct file *, long, struct file_lock **); | |
18468 | extern int lease_modify(struct file_lock **, int); | |
18469 | extern int lock_may_read(struct inode *, loff_t start, unsigned long count); | |
18470 | extern int lock_may_write(struct inode *, loff_t start, unsigned long count); | |
18471 | extern void lock_flocks(void); | |
18472 | extern void unlock_flocks(void); | |
18473 | struct fasync_struct { | |
18474 | spinlock_t fa_lock; | |
18475 | int magic; | |
18476 | int fa_fd; | |
18477 | struct fasync_struct *fa_next; | |
18478 | struct file *fa_file; | |
18479 | struct rcu_head fa_rcu; | |
18480 | }; | |
18481 | extern int fasync_helper(int, struct file *, int, struct fasync_struct **); | |
18482 | extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *); | |
18483 | extern int fasync_remove_entry(struct file *, struct fasync_struct **); | |
18484 | extern struct fasync_struct *fasync_alloc(void); | |
18485 | extern void fasync_free(struct fasync_struct *); | |
18486 | extern void kill_fasync(struct fasync_struct **, int, int); | |
18487 | extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force); | |
18488 | extern int f_setown(struct file *filp, unsigned long arg, int force); | |
18489 | extern void f_delown(struct file *filp); | |
18490 | extern pid_t f_getown(struct file *filp); | |
18491 | extern int send_sigurg(struct fown_struct *fown); | |
18492 | extern struct list_head super_blocks; | |
18493 | extern spinlock_t sb_lock; | |
18494 | struct super_block { | |
18495 | struct list_head s_list; | |
18496 | dev_t s_dev; | |
18497 | unsigned char s_dirt; | |
18498 | unsigned char s_blocksize_bits; | |
18499 | unsigned long s_blocksize; | |
18500 | loff_t s_maxbytes; | |
18501 | struct file_system_type *s_type; | |
18502 | const struct super_operations *s_op; | |
18503 | const struct dquot_operations *dq_op; | |
18504 | const struct quotactl_ops *s_qcop; | |
18505 | const struct export_operations *s_export_op; | |
18506 | unsigned long s_flags; | |
18507 | unsigned long s_magic; | |
18508 | struct dentry *s_root; | |
18509 | struct rw_semaphore s_umount; | |
18510 | struct mutex s_lock; | |
18511 | int s_count; | |
18512 | atomic_t s_active; | |
18513 | void *s_security; | |
18514 | const struct xattr_handler **s_xattr; | |
18515 | struct list_head s_inodes; | |
18516 | struct hlist_bl_head s_anon; | |
18517 | struct list_head *s_files; | |
18518 | struct list_head s_dentry_lru; | |
18519 | int s_nr_dentry_unused; | |
18520 | struct block_device *s_bdev; | |
18521 | struct backing_dev_info *s_bdi; | |
18522 | struct mtd_info *s_mtd; | |
18523 | struct list_head s_instances; | |
18524 | struct quota_info s_dquot; | |
18525 | int s_frozen; | |
18526 | wait_queue_head_t s_wait_unfrozen; | |
18527 | char s_id[32]; | |
18528 | u8 s_uuid[16]; | |
18529 | void *s_fs_info; | |
18530 | fmode_t s_mode; | |
18531 | u32 s_time_gran; | |
18532 | struct mutex s_vfs_rename_mutex; | |
18533 | char *s_subtype; | |
18534 | char *s_options; | |
18535 | const struct dentry_operations *s_d_op; | |
18536 | int cleancache_poolid; | |
18537 | }; | |
18538 | extern struct timespec current_fs_time(struct super_block *sb); | |
18539 | enum { | |
18540 | SB_UNFROZEN = 0, | |
18541 | SB_FREEZE_WRITE = 1, | |
18542 | SB_FREEZE_TRANS = 2, | |
18543 | }; | |
18544 | extern struct user_namespace init_user_ns; | |
18545 | extern bool inode_owner_or_capable(const struct inode *inode); | |
18546 | extern void lock_super(struct super_block *); | |
18547 | extern void unlock_super(struct super_block *); | |
18548 | extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *); | |
18549 | extern int vfs_mkdir(struct inode *, struct dentry *, int); | |
18550 | extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t); | |
18551 | extern int vfs_symlink(struct inode *, struct dentry *, const char *); | |
18552 | extern int vfs_link(struct dentry *, struct inode *, struct dentry *); | |
18553 | extern int vfs_rmdir(struct inode *, struct dentry *); | |
18554 | extern int vfs_unlink(struct inode *, struct dentry *); | |
18555 | extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); | |
18556 | extern void dentry_unhash(struct dentry *dentry); | |
18557 | extern int file_permission(struct file *, int); | |
18558 | extern void inode_init_owner(struct inode *inode, const struct inode *dir, | |
18559 | mode_t mode); | |
18560 | struct fiemap_extent_info { | |
18561 | unsigned int fi_flags; | |
18562 | unsigned int fi_extents_mapped; | |
18563 | unsigned int fi_extents_max; | |
18564 | struct fiemap_extent *fi_extents_start; | |
18565 | }; | |
18566 | int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, | |
18567 | u64 phys, u64 len, u32 flags); | |
18568 | int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); | |
18569 | typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned); | |
18570 | struct block_device_operations; | |
18571 | struct file_operations { | |
18572 | struct module *owner; | |
18573 | loff_t (*llseek) (struct file *, loff_t, int); | |
18574 | ssize_t (*read) (struct file *, char *, size_t, loff_t *); | |
18575 | ssize_t (*write) (struct file *, const char *, size_t, loff_t *); | |
18576 | ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t); | |
18577 | ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t); | |
18578 | int (*readdir) (struct file *, void *, filldir_t); | |
18579 | unsigned int (*poll) (struct file *, struct poll_table_struct *); | |
18580 | long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); | |
18581 | long (*compat_ioctl) (struct file *, unsigned int, unsigned long); | |
18582 | int (*mmap) (struct file *, struct vm_area_struct *); | |
18583 | int (*open) (struct inode *, struct file *); | |
18584 | int (*flush) (struct file *, fl_owner_t id); | |
18585 | int (*release) (struct inode *, struct file *); | |
18586 | int (*fsync) (struct file *, int datasync); | |
18587 | int (*aio_fsync) (struct kiocb *, int datasync); | |
18588 | int (*fasync) (int, struct file *, int); | |
18589 | int (*lock) (struct file *, int, struct file_lock *); | |
18590 | ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); | |
18591 | unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); | |
18592 | int (*check_flags)(int); | |
18593 | int (*flock) (struct file *, int, struct file_lock *); | |
18594 | ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); | |
18595 | ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); | |
18596 | int (*setlease)(struct file *, long, struct file_lock **); | |
18597 | long (*fallocate)(struct file *file, int mode, loff_t offset, | |
18598 | loff_t len); | |
18599 | }; | |
18600 | struct inode_operations { | |
18601 | struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *); | |
18602 | void * (*follow_link) (struct dentry *, struct nameidata *); | |
18603 | int (*permission) (struct inode *, int, unsigned int); | |
18604 | int (*check_acl)(struct inode *, int, unsigned int); | |
18605 | int (*readlink) (struct dentry *, char *,int); | |
18606 | void (*put_link) (struct dentry *, struct nameidata *, void *); | |
18607 | int (*create) (struct inode *,struct dentry *,int, struct nameidata *); | |
18608 | int (*link) (struct dentry *,struct inode *,struct dentry *); | |
18609 | int (*unlink) (struct inode *,struct dentry *); | |
18610 | int (*symlink) (struct inode *,struct dentry *,const char *); | |
18611 | int (*mkdir) (struct inode *,struct dentry *,int); | |
18612 | int (*rmdir) (struct inode *,struct dentry *); | |
18613 | int (*mknod) (struct inode *,struct dentry *,int,dev_t); | |
18614 | int (*rename) (struct inode *, struct dentry *, | |
18615 | struct inode *, struct dentry *); | |
18616 | void (*truncate) (struct inode *); | |
18617 | int (*setattr) (struct dentry *, struct iattr *); | |
18618 | int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); | |
18619 | int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); | |
18620 | ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); | |
18621 | ssize_t (*listxattr) (struct dentry *, char *, size_t); | |
18622 | int (*removexattr) (struct dentry *, const char *); | |
18623 | void (*truncate_range)(struct inode *, loff_t, loff_t); | |
18624 | int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, | |
18625 | u64 len); | |
18626 | } __attribute__((__aligned__((1 << (6))))); | |
18627 | struct seq_file; | |
18628 | ssize_t rw_copy_check_uvector(int type, const struct iovec * uvector, | |
18629 | unsigned long nr_segs, unsigned long fast_segs, | |
18630 | struct iovec *fast_pointer, | |
18631 | struct iovec **ret_pointer); | |
18632 | extern ssize_t vfs_read(struct file *, char *, size_t, loff_t *); | |
18633 | extern ssize_t vfs_write(struct file *, const char *, size_t, loff_t *); | |
18634 | extern ssize_t vfs_readv(struct file *, const struct iovec *, | |
18635 | unsigned long, loff_t *); | |
18636 | extern ssize_t vfs_writev(struct file *, const struct iovec *, | |
18637 | unsigned long, loff_t *); | |
18638 | struct super_operations { | |
18639 | struct inode *(*alloc_inode)(struct super_block *sb); | |
18640 | void (*destroy_inode)(struct inode *); | |
18641 | void (*dirty_inode) (struct inode *, int flags); | |
18642 | int (*write_inode) (struct inode *, struct writeback_control *wbc); | |
18643 | int (*drop_inode) (struct inode *); | |
18644 | void (*evict_inode) (struct inode *); | |
18645 | void (*put_super) (struct super_block *); | |
18646 | void (*write_super) (struct super_block *); | |
18647 | int (*sync_fs)(struct super_block *sb, int wait); | |
18648 | int (*freeze_fs) (struct super_block *); | |
18649 | int (*unfreeze_fs) (struct super_block *); | |
18650 | int (*statfs) (struct dentry *, struct kstatfs *); | |
18651 | int (*remount_fs) (struct super_block *, int *, char *); | |
18652 | void (*umount_begin) (struct super_block *); | |
18653 | int (*show_options)(struct seq_file *, struct vfsmount *); | |
18654 | int (*show_devname)(struct seq_file *, struct vfsmount *); | |
18655 | int (*show_path)(struct seq_file *, struct vfsmount *); | |
18656 | int (*show_stats)(struct seq_file *, struct vfsmount *); | |
18657 | ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); | |
18658 | ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); | |
18659 | int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); | |
18660 | }; | |
18661 | extern void __mark_inode_dirty(struct inode *, int); | |
18662 | static inline __attribute__((always_inline)) void mark_inode_dirty(struct inode *inode) | |
18663 | { | |
18664 | __mark_inode_dirty(inode, ((1 << 0) | (1 << 1) | (1 << 2))); | |
18665 | } | |
18666 | static inline __attribute__((always_inline)) void mark_inode_dirty_sync(struct inode *inode) | |
18667 | { | |
18668 | __mark_inode_dirty(inode, (1 << 0)); | |
18669 | } | |
18670 | static inline __attribute__((always_inline)) void inc_nlink(struct inode *inode) | |
18671 | { | |
18672 | inode->i_nlink++; | |
18673 | } | |
18674 | static inline __attribute__((always_inline)) void inode_inc_link_count(struct inode *inode) | |
18675 | { | |
18676 | inc_nlink(inode); | |
18677 | mark_inode_dirty(inode); | |
18678 | } | |
18679 | static inline __attribute__((always_inline)) void drop_nlink(struct inode *inode) | |
18680 | { | |
18681 | inode->i_nlink--; | |
18682 | } | |
18683 | static inline __attribute__((always_inline)) void clear_nlink(struct inode *inode) | |
18684 | { | |
18685 | inode->i_nlink = 0; | |
18686 | } | |
18687 | static inline __attribute__((always_inline)) void inode_dec_link_count(struct inode *inode) | |
18688 | { | |
18689 | drop_nlink(inode); | |
18690 | mark_inode_dirty(inode); | |
18691 | } | |
18692 | static inline __attribute__((always_inline)) void inode_inc_iversion(struct inode *inode) | |
18693 | { | |
18694 | spin_lock(&inode->i_lock); | |
18695 | inode->i_version++; | |
18696 | spin_unlock(&inode->i_lock); | |
18697 | } | |
18698 | extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry); | |
18699 | static inline __attribute__((always_inline)) void file_accessed(struct file *file) | |
18700 | { | |
18701 | if (__builtin_constant_p(((!(file->f_flags & 01000000)))) ? !!((!(file->f_flags & 01000000))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/fs.h", .line = 1795, }; ______r = !!((!(file->f_flags & 01000000))); ______f.miss_hit[______r]++; ______r; })) | |
18702 | touch_atime(file->f_path.mnt, file->f_path.dentry); | |
18703 | } | |
18704 | int sync_inode(struct inode *inode, struct writeback_control *wbc); | |
18705 | int sync_inode_metadata(struct inode *inode, int wait); | |
18706 | struct file_system_type { | |
18707 | const char *name; | |
18708 | int fs_flags; | |
18709 | struct dentry *(*mount) (struct file_system_type *, int, | |
18710 | const char *, void *); | |
18711 | void (*kill_sb) (struct super_block *); | |
18712 | struct module *owner; | |
18713 | struct file_system_type * next; | |
18714 | struct list_head fs_supers; | |
18715 | struct lock_class_key s_lock_key; | |
18716 | struct lock_class_key s_umount_key; | |
18717 | struct lock_class_key s_vfs_rename_key; | |
18718 | struct lock_class_key i_lock_key; | |
18719 | struct lock_class_key i_mutex_key; | |
18720 | struct lock_class_key i_mutex_dir_key; | |
18721 | struct lock_class_key i_alloc_sem_key; | |
18722 | }; | |
18723 | extern struct dentry *mount_ns(struct file_system_type *fs_type, int flags, | |
18724 | void *data, int (*fill_super)(struct super_block *, void *, int)); | |
18725 | extern struct dentry *mount_bdev(struct file_system_type *fs_type, | |
18726 | int flags, const char *dev_name, void *data, | |
18727 | int (*fill_super)(struct super_block *, void *, int)); | |
18728 | extern struct dentry *mount_single(struct file_system_type *fs_type, | |
18729 | int flags, void *data, | |
18730 | int (*fill_super)(struct super_block *, void *, int)); | |
18731 | extern struct dentry *mount_nodev(struct file_system_type *fs_type, | |
18732 | int flags, void *data, | |
18733 | int (*fill_super)(struct super_block *, void *, int)); | |
18734 | void generic_shutdown_super(struct super_block *sb); | |
18735 | void kill_block_super(struct super_block *sb); | |
18736 | void kill_anon_super(struct super_block *sb); | |
18737 | void kill_litter_super(struct super_block *sb); | |
18738 | void deactivate_super(struct super_block *sb); | |
18739 | void deactivate_locked_super(struct super_block *sb); | |
18740 | int set_anon_super(struct super_block *s, void *data); | |
18741 | struct super_block *sget(struct file_system_type *type, | |
18742 | int (*test)(struct super_block *,void *), | |
18743 | int (*set)(struct super_block *,void *), | |
18744 | void *data); | |
18745 | extern struct dentry *mount_pseudo(struct file_system_type *, char *, | |
18746 | const struct super_operations *ops, | |
18747 | const struct dentry_operations *dops, | |
18748 | unsigned long); | |
18749 | static inline __attribute__((always_inline)) void sb_mark_dirty(struct super_block *sb) | |
18750 | { | |
18751 | sb->s_dirt = 1; | |
18752 | } | |
18753 | static inline __attribute__((always_inline)) void sb_mark_clean(struct super_block *sb) | |
18754 | { | |
18755 | sb->s_dirt = 0; | |
18756 | } | |
18757 | static inline __attribute__((always_inline)) int sb_is_dirty(struct super_block *sb) | |
18758 | { | |
18759 | return sb->s_dirt; | |
18760 | } | |
18761 | extern int register_filesystem(struct file_system_type *); | |
18762 | extern int unregister_filesystem(struct file_system_type *); | |
18763 | extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data); | |
18764 | extern int may_umount_tree(struct vfsmount *); | |
18765 | extern int may_umount(struct vfsmount *); | |
18766 | extern long do_mount(char *, char *, char *, unsigned long, void *); | |
18767 | extern struct vfsmount *collect_mounts(struct path *); | |
18768 | extern void drop_collected_mounts(struct vfsmount *); | |
18769 | extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, | |
18770 | struct vfsmount *); | |
18771 | extern int vfs_statfs(struct path *, struct kstatfs *); | |
18772 | extern int user_statfs(const char *, struct kstatfs *); | |
18773 | extern int fd_statfs(int, struct kstatfs *); | |
18774 | extern int statfs_by_dentry(struct dentry *, struct kstatfs *); | |
18775 | extern int freeze_super(struct super_block *super); | |
18776 | extern int thaw_super(struct super_block *super); | |
18777 | extern int current_umask(void); | |
18778 | extern struct kobject *fs_kobj; | |
18779 | extern int rw_verify_area(int, struct file *, loff_t *, size_t); | |
18780 | extern int locks_mandatory_locked(struct inode *); | |
18781 | extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t); | |
18782 | static inline __attribute__((always_inline)) int __mandatory_lock(struct inode *ino) | |
18783 | { | |
18784 | return (ino->i_mode & (0002000 | 00010)) == 0002000; | |
18785 | } | |
18786 | static inline __attribute__((always_inline)) int mandatory_lock(struct inode *ino) | |
18787 | { | |
18788 | return ((ino)->i_sb->s_flags & (64)) && __mandatory_lock(ino); | |
18789 | } | |
18790 | static inline __attribute__((always_inline)) int locks_verify_locked(struct inode *inode) | |
18791 | { | |
18792 | if (__builtin_constant_p(((mandatory_lock(inode)))) ? !!((mandatory_lock(inode))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/fs.h", .line = 1923, }; ______r = !!((mandatory_lock(inode))); ______f.miss_hit[______r]++; ______r; })) | |
18793 | return locks_mandatory_locked(inode); | |
18794 | return 0; | |
18795 | } | |
18796 | static inline __attribute__((always_inline)) int locks_verify_truncate(struct inode *inode, | |
18797 | struct file *filp, | |
18798 | loff_t size) | |
18799 | { | |
18800 | if (__builtin_constant_p(((inode->i_flock && mandatory_lock(inode)))) ? !!((inode->i_flock && mandatory_lock(inode))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/fs.h", .line = 1932, }; ______r = !!((inode->i_flock && mandatory_lock(inode))); ______f.miss_hit[______r]++; ______r; })) | |
18801 | return locks_mandatory_area( | |
18802 | 2, inode, filp, | |
18803 | size < inode->i_size ? size : inode->i_size, | |
18804 | (size < inode->i_size ? inode->i_size - size | |
18805 | : size - inode->i_size) | |
18806 | ); | |
18807 | return 0; | |
18808 | } | |
18809 | static inline __attribute__((always_inline)) int break_lease(struct inode *inode, unsigned int mode) | |
18810 | { | |
18811 | if (__builtin_constant_p(((inode->i_flock))) ? !!((inode->i_flock)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/fs.h", .line = 1944, }; ______r = !!((inode->i_flock)); ______f.miss_hit[______r]++; ______r; })) | |
18812 | return __break_lease(inode, mode); | |
18813 | return 0; | |
18814 | } | |
18815 | extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, | |
18816 | struct file *filp); | |
18817 | extern int do_fallocate(struct file *file, int mode, loff_t offset, | |
18818 | loff_t len); | |
18819 | extern long do_sys_open(int dfd, const char *filename, int flags, | |
18820 | int mode); | |
18821 | extern struct file *filp_open(const char *, int, int); | |
18822 | extern struct file *file_open_root(struct dentry *, struct vfsmount *, | |
18823 | const char *, int); | |
18824 | extern struct file * dentry_open(struct dentry *, struct vfsmount *, int, | |
18825 | const struct cred *); | |
18826 | extern int filp_close(struct file *, fl_owner_t id); | |
18827 | extern char * getname(const char *); | |
18828 | extern int ioctl_preallocate(struct file *filp, void *argp); | |
18829 | extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vfs_caches_init_early(void); | |
18830 | extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vfs_caches_init(unsigned long); | |
18831 | extern struct kmem_cache *names_cachep; | |
18832 | extern void putname(const char *name); | |
18833 | extern int register_blkdev(unsigned int, const char *); | |
18834 | extern void unregister_blkdev(unsigned int, const char *); | |
18835 | extern struct block_device *bdget(dev_t); | |
18836 | extern struct block_device *bdgrab(struct block_device *bdev); | |
18837 | extern void bd_set_size(struct block_device *, loff_t size); | |
18838 | extern void bd_forget(struct inode *inode); | |
18839 | extern void bdput(struct block_device *); | |
18840 | extern void invalidate_bdev(struct block_device *); | |
18841 | extern int sync_blockdev(struct block_device *bdev); | |
18842 | extern struct super_block *freeze_bdev(struct block_device *); | |
18843 | extern void emergency_thaw_all(void); | |
18844 | extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); | |
18845 | extern int fsync_bdev(struct block_device *); | |
18846 | extern int sync_filesystem(struct super_block *); | |
18847 | extern const struct file_operations def_blk_fops; | |
18848 | extern const struct file_operations def_chr_fops; | |
18849 | extern const struct file_operations bad_sock_fops; | |
18850 | extern const struct file_operations def_fifo_fops; | |
18851 | extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); | |
18852 | extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); | |
18853 | extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); | |
18854 | extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder); | |
18855 | extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, | |
18856 | void *holder); | |
18857 | extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, | |
18858 | void *holder); | |
18859 | extern int blkdev_put(struct block_device *bdev, fmode_t mode); | |
18860 | extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); | |
18861 | extern void bd_unlink_disk_holder(struct block_device *bdev, | |
18862 | struct gendisk *disk); | |
18863 | extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); | |
18864 | extern int register_chrdev_region(dev_t, unsigned, const char *); | |
18865 | extern int __register_chrdev(unsigned int major, unsigned int baseminor, | |
18866 | unsigned int count, const char *name, | |
18867 | const struct file_operations *fops); | |
18868 | extern void __unregister_chrdev(unsigned int major, unsigned int baseminor, | |
18869 | unsigned int count, const char *name); | |
18870 | extern void unregister_chrdev_region(dev_t, unsigned); | |
18871 | extern void chrdev_show(struct seq_file *,off_t); | |
18872 | static inline __attribute__((always_inline)) int register_chrdev(unsigned int major, const char *name, | |
18873 | const struct file_operations *fops) | |
18874 | { | |
18875 | return __register_chrdev(major, 0, 256, name, fops); | |
18876 | } | |
18877 | static inline __attribute__((always_inline)) void unregister_chrdev(unsigned int major, const char *name) | |
18878 | { | |
18879 | __unregister_chrdev(major, 0, 256, name); | |
18880 | } | |
18881 | extern const char *__bdevname(dev_t, char *buffer); | |
18882 | extern const char *bdevname(struct block_device *bdev, char *buffer); | |
18883 | extern struct block_device *lookup_bdev(const char *); | |
18884 | extern void blkdev_show(struct seq_file *,off_t); | |
18885 | extern void init_special_inode(struct inode *, umode_t, dev_t); | |
18886 | extern void make_bad_inode(struct inode *); | |
18887 | extern int is_bad_inode(struct inode *); | |
18888 | extern const struct file_operations read_pipefifo_fops; | |
18889 | extern const struct file_operations write_pipefifo_fops; | |
18890 | extern const struct file_operations rdwr_pipefifo_fops; | |
18891 | extern int fs_may_remount_ro(struct super_block *); | |
18892 | extern void check_disk_size_change(struct gendisk *disk, | |
18893 | struct block_device *bdev); | |
18894 | extern int revalidate_disk(struct gendisk *); | |
18895 | extern int check_disk_change(struct block_device *); | |
18896 | extern int __invalidate_device(struct block_device *, bool); | |
18897 | extern int invalidate_partition(struct gendisk *, int); | |
18898 | unsigned long invalidate_mapping_pages(struct address_space *mapping, | |
18899 | unsigned long start, unsigned long end); | |
18900 | static inline __attribute__((always_inline)) void invalidate_remote_inode(struct inode *inode) | |
18901 | { | |
18902 | if (__builtin_constant_p((((((inode->i_mode) & 00170000) == 0100000) || (((inode->i_mode) & 00170000) == 0040000) || (((inode->i_mode) & 00170000) == 0120000)))) ? !!(((((inode->i_mode) & 00170000) == 0100000) || (((inode->i_mode) & 00170000) == 0040000) || (((inode->i_mode) & 00170000) == 0120000))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
18903 | "include/linux/fs.h" | |
18904 | , .line = | |
18905 | 2159 | |
18906 | , }; ______r = !!(((((inode->i_mode) & 00170000) == 0100000) || (((inode->i_mode) & 00170000) == 0040000) || (((inode->i_mode) & 00170000) == 0120000))); ______f.miss_hit[______r]++; ______r; })) | |
18907 | invalidate_mapping_pages(inode->i_mapping, 0, -1); | |
18908 | } | |
18909 | extern int invalidate_inode_pages2(struct address_space *mapping); | |
18910 | extern int invalidate_inode_pages2_range(struct address_space *mapping, | |
18911 | unsigned long start, unsigned long end); | |
18912 | extern int write_inode_now(struct inode *, int); | |
18913 | extern int filemap_fdatawrite(struct address_space *); | |
18914 | extern int filemap_flush(struct address_space *); | |
18915 | extern int filemap_fdatawait(struct address_space *); | |
18916 | extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, | |
18917 | loff_t lend); | |
18918 | extern int filemap_write_and_wait(struct address_space *mapping); | |
18919 | extern int filemap_write_and_wait_range(struct address_space *mapping, | |
18920 | loff_t lstart, loff_t lend); | |
18921 | extern int __filemap_fdatawrite_range(struct address_space *mapping, | |
18922 | loff_t start, loff_t end, int sync_mode); | |
18923 | extern int filemap_fdatawrite_range(struct address_space *mapping, | |
18924 | loff_t start, loff_t end); | |
18925 | extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, | |
18926 | int datasync); | |
18927 | extern int vfs_fsync(struct file *file, int datasync); | |
18928 | extern int generic_write_sync(struct file *file, loff_t pos, loff_t count); | |
18929 | extern void sync_supers(void); | |
18930 | extern void emergency_sync(void); | |
18931 | extern void emergency_remount(void); | |
18932 | extern sector_t bmap(struct inode *, sector_t); | |
18933 | extern int notify_change(struct dentry *, struct iattr *); | |
18934 | extern int inode_permission(struct inode *, int); | |
18935 | extern int generic_permission(struct inode *, int, unsigned int, | |
18936 | int (*check_acl)(struct inode *, int, unsigned int)); | |
18937 | static inline __attribute__((always_inline)) bool execute_ok(struct inode *inode) | |
18938 | { | |
18939 | return (inode->i_mode & (00100|00010|00001)) || (((inode->i_mode) & 00170000) == 0040000); | |
18940 | } | |
18941 | extern int get_write_access(struct inode *); | |
18942 | extern int deny_write_access(struct file *); | |
18943 | static inline __attribute__((always_inline)) void put_write_access(struct inode * inode) | |
18944 | { | |
18945 | atomic_dec(&inode->i_writecount); | |
18946 | } | |
18947 | static inline __attribute__((always_inline)) void allow_write_access(struct file *file) | |
18948 | { | |
18949 | if (__builtin_constant_p(((file))) ? !!((file)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/fs.h", .line = 2207, }; ______r = !!((file)); ______f.miss_hit[______r]++; ______r; })) | |
18950 | atomic_inc(&file->f_path.dentry->d_inode->i_writecount); | |
18951 | } | |
18952 | static inline __attribute__((always_inline)) void i_readcount_dec(struct inode *inode) | |
18953 | { | |
18954 | return; | |
18955 | } | |
18956 | static inline __attribute__((always_inline)) void i_readcount_inc(struct inode *inode) | |
18957 | { | |
18958 | return; | |
18959 | } | |
18960 | extern int do_pipe_flags(int *, int); | |
18961 | extern struct file *create_read_pipe(struct file *f, int flags); | |
18962 | extern struct file *create_write_pipe(int flags); | |
18963 | extern void free_write_pipe(struct file *); | |
18964 | extern int kernel_read(struct file *, loff_t, char *, unsigned long); | |
18965 | extern struct file * open_exec(const char *); | |
18966 | extern int is_subdir(struct dentry *, struct dentry *); | |
18967 | extern int path_is_under(struct path *, struct path *); | |
18968 | extern ino_t find_inode_number(struct dentry *, struct qstr *); | |
18969 | extern loff_t default_llseek(struct file *file, loff_t offset, int origin); | |
18970 | extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); | |
18971 | extern int inode_init_always(struct super_block *, struct inode *); | |
18972 | extern void inode_init_once(struct inode *); | |
18973 | extern void address_space_init_once(struct address_space *mapping); | |
18974 | extern void ihold(struct inode * inode); | |
18975 | extern void iput(struct inode *); | |
18976 | extern struct inode * igrab(struct inode *); | |
18977 | extern ino_t iunique(struct super_block *, ino_t); | |
18978 | extern int inode_needs_sync(struct inode *inode); | |
18979 | extern int generic_delete_inode(struct inode *inode); | |
18980 | extern int generic_drop_inode(struct inode *inode); | |
18981 | extern struct inode *ilookup5_nowait(struct super_block *sb, | |
18982 | unsigned long hashval, int (*test)(struct inode *, void *), | |
18983 | void *data); | |
18984 | extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval, | |
18985 | int (*test)(struct inode *, void *), void *data); | |
18986 | extern struct inode *ilookup(struct super_block *sb, unsigned long ino); | |
18987 | extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *); | |
18988 | extern struct inode * iget_locked(struct super_block *, unsigned long); | |
18989 | extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); | |
18990 | extern int insert_inode_locked(struct inode *); | |
18991 | extern void unlock_new_inode(struct inode *); | |
18992 | extern unsigned int get_next_ino(void); | |
18993 | extern void __iget(struct inode * inode); | |
18994 | extern void iget_failed(struct inode *); | |
18995 | extern void end_writeback(struct inode *); | |
18996 | extern void __destroy_inode(struct inode *); | |
18997 | extern struct inode *new_inode(struct super_block *); | |
18998 | extern void free_inode_nonrcu(struct inode *inode); | |
18999 | extern int should_remove_suid(struct dentry *); | |
19000 | extern int file_remove_suid(struct file *); | |
19001 | extern void __insert_inode_hash(struct inode *, unsigned long hashval); | |
19002 | extern void remove_inode_hash(struct inode *); | |
19003 | static inline __attribute__((always_inline)) void insert_inode_hash(struct inode *inode) | |
19004 | { | |
19005 | __insert_inode_hash(inode, inode->i_ino); | |
19006 | } | |
19007 | extern void inode_sb_list_add(struct inode *inode); | |
19008 | extern void submit_bio(int, struct bio *); | |
19009 | extern int bdev_read_only(struct block_device *); | |
19010 | extern int set_blocksize(struct block_device *, int); | |
19011 | extern int sb_set_blocksize(struct super_block *, int); | |
19012 | extern int sb_min_blocksize(struct super_block *, int); | |
19013 | extern int generic_file_mmap(struct file *, struct vm_area_struct *); | |
19014 | extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); | |
19015 | extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size); | |
19016 | int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk); | |
19017 | extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t); | |
19018 | extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, | |
19019 | loff_t *); | |
19020 | extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t); | |
19021 | extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *, | |
19022 | unsigned long *, loff_t, loff_t *, size_t, size_t); | |
19023 | extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *, | |
19024 | unsigned long, loff_t, loff_t *, size_t, ssize_t); | |
19025 | extern ssize_t do_sync_read(struct file *filp, char *buf, size_t len, loff_t *ppos); | |
19026 | extern ssize_t do_sync_write(struct file *filp, const char *buf, size_t len, loff_t *ppos); | |
19027 | extern int generic_segment_checks(const struct iovec *iov, | |
19028 | unsigned long *nr_segs, size_t *count, int access_flags); | |
19029 | extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, | |
19030 | unsigned long nr_segs, loff_t pos); | |
19031 | extern int blkdev_fsync(struct file *filp, int datasync); | |
19032 | extern ssize_t generic_file_splice_read(struct file *, loff_t *, | |
19033 | struct pipe_inode_info *, size_t, unsigned int); | |
19034 | extern ssize_t default_file_splice_read(struct file *, loff_t *, | |
19035 | struct pipe_inode_info *, size_t, unsigned int); | |
19036 | extern ssize_t generic_file_splice_write(struct pipe_inode_info *, | |
19037 | struct file *, loff_t *, size_t, unsigned int); | |
19038 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, | |
19039 | struct file *out, loff_t *, size_t len, unsigned int flags); | |
19040 | extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, | |
19041 | size_t len, unsigned int flags); | |
19042 | extern void | |
19043 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); | |
19044 | extern loff_t noop_llseek(struct file *file, loff_t offset, int origin); | |
19045 | extern loff_t no_llseek(struct file *file, loff_t offset, int origin); | |
19046 | extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin); | |
19047 | extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset, | |
19048 | int origin); | |
19049 | extern int generic_file_open(struct inode * inode, struct file * filp); | |
19050 | extern int nonseekable_open(struct inode * inode, struct file * filp); | |
19051 | static inline __attribute__((always_inline)) int xip_truncate_page(struct address_space *mapping, loff_t from) | |
19052 | { | |
19053 | return 0; | |
19054 | } | |
19055 | typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, | |
19056 | loff_t file_offset); | |
19057 | enum { | |
19058 | DIO_LOCKING = 0x01, | |
19059 | DIO_SKIP_HOLES = 0x02, | |
19060 | }; | |
19061 | void dio_end_io(struct bio *bio, int error); | |
19062 | ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |
19063 | struct block_device *bdev, const struct iovec *iov, loff_t offset, | |
19064 | unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, | |
19065 | dio_submit_t submit_io, int flags); | |
19066 | static inline __attribute__((always_inline)) ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, | |
19067 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, | |
19068 | loff_t offset, unsigned long nr_segs, get_block_t get_block, | |
19069 | dio_iodone_t end_io) | |
19070 | { | |
19071 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, | |
19072 | nr_segs, get_block, end_io, ((void *)0), | |
19073 | DIO_LOCKING | DIO_SKIP_HOLES); | |
19074 | } | |
19075 | extern const struct file_operations generic_ro_fops; | |
19076 | extern int vfs_readlink(struct dentry *, char *, int, const char *); | |
19077 | extern int vfs_follow_link(struct nameidata *, const char *); | |
19078 | extern int page_readlink(struct dentry *, char *, int); | |
19079 | extern void *page_follow_link_light(struct dentry *, struct nameidata *); | |
19080 | extern void page_put_link(struct dentry *, struct nameidata *, void *); | |
19081 | extern int __page_symlink(struct inode *inode, const char *symname, int len, | |
19082 | int nofs); | |
19083 | extern int page_symlink(struct inode *inode, const char *symname, int len); | |
19084 | extern const struct inode_operations page_symlink_inode_operations; | |
19085 | extern int generic_readlink(struct dentry *, char *, int); | |
19086 | extern void generic_fillattr(struct inode *, struct kstat *); | |
19087 | extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); | |
19088 | void __inode_add_bytes(struct inode *inode, loff_t bytes); | |
19089 | void inode_add_bytes(struct inode *inode, loff_t bytes); | |
19090 | void inode_sub_bytes(struct inode *inode, loff_t bytes); | |
19091 | loff_t inode_get_bytes(struct inode *inode); | |
19092 | void inode_set_bytes(struct inode *inode, loff_t bytes); | |
19093 | extern int vfs_readdir(struct file *, filldir_t, void *); | |
19094 | extern int vfs_stat(const char *, struct kstat *); | |
19095 | extern int vfs_lstat(const char *, struct kstat *); | |
19096 | extern int vfs_fstat(unsigned int, struct kstat *); | |
19097 | extern int vfs_fstatat(int , const char *, struct kstat *, int); | |
19098 | extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, | |
19099 | unsigned long arg); | |
19100 | extern int __generic_block_fiemap(struct inode *inode, | |
19101 | struct fiemap_extent_info *fieinfo, | |
19102 | loff_t start, loff_t len, | |
19103 | get_block_t *get_block); | |
19104 | extern int generic_block_fiemap(struct inode *inode, | |
19105 | struct fiemap_extent_info *fieinfo, u64 start, | |
19106 | u64 len, get_block_t *get_block); | |
19107 | extern void get_filesystem(struct file_system_type *fs); | |
19108 | extern void put_filesystem(struct file_system_type *fs); | |
19109 | extern struct file_system_type *get_fs_type(const char *name); | |
19110 | extern struct super_block *get_super(struct block_device *); | |
19111 | extern struct super_block *get_active_super(struct block_device *bdev); | |
19112 | extern struct super_block *user_get_super(dev_t); | |
19113 | extern void drop_super(struct super_block *sb); | |
19114 | extern void iterate_supers(void (*)(struct super_block *, void *), void *); | |
19115 | extern int dcache_dir_open(struct inode *, struct file *); | |
19116 | extern int dcache_dir_close(struct inode *, struct file *); | |
19117 | extern loff_t dcache_dir_lseek(struct file *, loff_t, int); | |
19118 | extern int dcache_readdir(struct file *, void *, filldir_t); | |
19119 | extern int simple_setattr(struct dentry *, struct iattr *); | |
19120 | extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *); | |
19121 | extern int simple_statfs(struct dentry *, struct kstatfs *); | |
19122 | extern int simple_link(struct dentry *, struct inode *, struct dentry *); | |
19123 | extern int simple_unlink(struct inode *, struct dentry *); | |
19124 | extern int simple_rmdir(struct inode *, struct dentry *); | |
19125 | extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); | |
19126 | extern int noop_fsync(struct file *, int); | |
19127 | extern int simple_empty(struct dentry *); | |
19128 | extern int simple_readpage(struct file *file, struct page *page); | |
19129 | extern int simple_write_begin(struct file *file, struct address_space *mapping, | |
19130 | loff_t pos, unsigned len, unsigned flags, | |
19131 | struct page **pagep, void **fsdata); | |
19132 | extern int simple_write_end(struct file *file, struct address_space *mapping, | |
19133 | loff_t pos, unsigned len, unsigned copied, | |
19134 | struct page *page, void *fsdata); | |
19135 | extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *); | |
19136 | extern ssize_t generic_read_dir(struct file *, char *, size_t, loff_t *); | |
19137 | extern const struct file_operations simple_dir_operations; | |
19138 | extern const struct inode_operations simple_dir_inode_operations; | |
19139 | struct tree_descr { char *name; const struct file_operations *ops; int mode; }; | |
19140 | struct dentry *d_alloc_name(struct dentry *, const char *); | |
19141 | extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *); | |
19142 | extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count); | |
19143 | extern void simple_release_fs(struct vfsmount **mount, int *count); | |
19144 | extern ssize_t simple_read_from_buffer(void *to, size_t count, | |
19145 | loff_t *ppos, const void *from, size_t available); | |
19146 | extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, | |
19147 | const void *from, size_t count); | |
19148 | extern int generic_file_fsync(struct file *, int); | |
19149 | extern int generic_check_addressable(unsigned, u64); | |
19150 | extern int buffer_migrate_page(struct address_space *, | |
19151 | struct page *, struct page *); | |
19152 | extern int inode_change_ok(const struct inode *, struct iattr *); | |
19153 | extern int inode_newsize_ok(const struct inode *, loff_t offset); | |
19154 | extern void setattr_copy(struct inode *inode, const struct iattr *attr); | |
19155 | extern void file_update_time(struct file *file); | |
19156 | extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt); | |
19157 | extern void save_mount_options(struct super_block *sb, char *options); | |
19158 | extern void replace_mount_options(struct super_block *sb, char *options); | |
19159 | static inline __attribute__((always_inline)) ino_t parent_ino(struct dentry *dentry) | |
19160 | { | |
19161 | ino_t res; | |
19162 | spin_lock(&dentry->d_lock); | |
19163 | res = dentry->d_parent->d_inode->i_ino; | |
19164 | spin_unlock(&dentry->d_lock); | |
19165 | return res; | |
19166 | } | |
19167 | struct simple_transaction_argresp { | |
19168 | ssize_t size; | |
19169 | char data[0]; | |
19170 | }; | |
19171 | char *simple_transaction_get(struct file *file, const char *buf, | |
19172 | size_t size); | |
19173 | ssize_t simple_transaction_read(struct file *file, char *buf, | |
19174 | size_t size, loff_t *pos); | |
19175 | int simple_transaction_release(struct inode *inode, struct file *file); | |
19176 | void simple_transaction_set(struct file *file, size_t n); | |
19177 | static inline __attribute__((always_inline)) void __attribute__((format(printf, 1, 2))) | |
19178 | __simple_attr_check_format(const char *fmt, ...) | |
19179 | { | |
19180 | } | |
19181 | int simple_attr_open(struct inode *inode, struct file *file, | |
19182 | int (*get)(void *, u64 *), int (*set)(void *, u64), | |
19183 | const char *fmt); | |
19184 | int simple_attr_release(struct inode *inode, struct file *file); | |
19185 | ssize_t simple_attr_read(struct file *file, char *buf, | |
19186 | size_t len, loff_t *ppos); | |
19187 | ssize_t simple_attr_write(struct file *file, const char *buf, | |
19188 | size_t len, loff_t *ppos); | |
19189 | struct ctl_table; | |
19190 | int proc_nr_files(struct ctl_table *table, int write, | |
19191 | void *buffer, size_t *lenp, loff_t *ppos); | |
19192 | int proc_nr_dentry(struct ctl_table *table, int write, | |
19193 | void *buffer, size_t *lenp, loff_t *ppos); | |
19194 | int proc_nr_inodes(struct ctl_table *table, int write, | |
19195 | void *buffer, size_t *lenp, loff_t *ppos); | |
19196 | int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) get_filesystem_list(char *buf); | |
19197 | static inline __attribute__((always_inline)) int is_sxid(mode_t mode) | |
19198 | { | |
19199 | return (mode & 0004000) || ((mode & 0002000) && (mode & 00010)); | |
19200 | } | |
19201 | static inline __attribute__((always_inline)) void inode_has_no_xattr(struct inode *inode) | |
19202 | { | |
19203 | if (__builtin_constant_p(((!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & (1<<28))))) ? !!((!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & (1<<28)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/fs.h", .line = 2597, }; ______r = !!((!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & (1<<28)))); ______f.miss_hit[______r]++; ______r; })) | |
19204 | inode->i_flags |= 4096; | |
19205 | } | |
19206 | struct range { | |
19207 | u64 start; | |
19208 | u64 end; | |
19209 | }; | |
19210 | int add_range(struct range *range, int az, int nr_range, | |
19211 | u64 start, u64 end); | |
19212 | int add_range_with_merge(struct range *range, int az, int nr_range, | |
19213 | u64 start, u64 end); | |
19214 | void subtract_range(struct range *range, int az, u64 start, u64 end); | |
19215 | int clean_sort_range(struct range *range, int az); | |
19216 | void sort_range(struct range *range, int nr_range); | |
19217 | static inline __attribute__((always_inline)) resource_size_t cap_resource(u64 val) | |
19218 | { | |
19219 | if (__builtin_constant_p(((val > ((resource_size_t)~0)))) ? !!((val > ((resource_size_t)~0))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/range.h", .line = 25, }; ______r = !!((val > ((resource_size_t)~0))); ______f.miss_hit[______r]++; ______r; })) | |
19220 | return ((resource_size_t)~0); | |
19221 | return val; | |
19222 | } | |
19223 | struct mempolicy; | |
19224 | struct anon_vma; | |
19225 | struct file_ra_state; | |
19226 | struct user_struct; | |
19227 | struct writeback_control; | |
19228 | extern unsigned long max_mapnr; | |
19229 | extern unsigned long num_physpages; | |
19230 | extern unsigned long totalram_pages; | |
19231 | extern void * high_memory; | |
19232 | extern int page_cluster; | |
19233 | extern int sysctl_legacy_va_layout; | |
19234 | extern unsigned long empty_zero_page[((1UL) << 12) / sizeof(unsigned long)]; | |
19235 | extern spinlock_t pgd_lock; | |
19236 | extern struct list_head pgd_list; | |
19237 | extern struct mm_struct *pgd_page_get_mm(struct page *page); | |
19238 | static inline __attribute__((always_inline)) int pte_dirty(pte_t pte) | |
19239 | { | |
19240 | return pte_flags(pte) & (((pteval_t)(1)) << 6); | |
19241 | } | |
19242 | static inline __attribute__((always_inline)) int pte_young(pte_t pte) | |
19243 | { | |
19244 | return pte_flags(pte) & (((pteval_t)(1)) << 5); | |
19245 | } | |
19246 | static inline __attribute__((always_inline)) int pmd_young(pmd_t pmd) | |
19247 | { | |
19248 | return pmd_flags(pmd) & (((pteval_t)(1)) << 5); | |
19249 | } | |
19250 | static inline __attribute__((always_inline)) int pte_write(pte_t pte) | |
19251 | { | |
19252 | return pte_flags(pte) & (((pteval_t)(1)) << 1); | |
19253 | } | |
19254 | static inline __attribute__((always_inline)) int pte_file(pte_t pte) | |
19255 | { | |
19256 | return pte_flags(pte) & (((pteval_t)(1)) << 6); | |
19257 | } | |
19258 | static inline __attribute__((always_inline)) int pte_huge(pte_t pte) | |
19259 | { | |
19260 | return pte_flags(pte) & (((pteval_t)(1)) << 7); | |
19261 | } | |
19262 | static inline __attribute__((always_inline)) int pte_global(pte_t pte) | |
19263 | { | |
19264 | return pte_flags(pte) & (((pteval_t)(1)) << 8); | |
19265 | } | |
19266 | static inline __attribute__((always_inline)) int pte_exec(pte_t pte) | |
19267 | { | |
19268 | return !(pte_flags(pte) & (((pteval_t)(1)) << 63)); | |
19269 | } | |
19270 | static inline __attribute__((always_inline)) int pte_special(pte_t pte) | |
19271 | { | |
19272 | return pte_flags(pte) & (((pteval_t)(1)) << 9); | |
19273 | } | |
19274 | static inline __attribute__((always_inline)) unsigned long pte_pfn(pte_t pte) | |
19275 | { | |
19276 | return (pte_val(pte) & ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1))))) >> 12; | |
19277 | } | |
19278 | static inline __attribute__((always_inline)) unsigned long pmd_pfn(pmd_t pmd) | |
19279 | { | |
19280 | return (pmd_val(pmd) & ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1))))) >> 12; | |
19281 | } | |
19282 | static inline __attribute__((always_inline)) int pmd_large(pmd_t pte) | |
19283 | { | |
19284 | return (pmd_flags(pte) & ((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 0))) == | |
19285 | ((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 0)); | |
19286 | } | |
19287 | static inline __attribute__((always_inline)) int pmd_trans_splitting(pmd_t pmd) | |
19288 | { | |
19289 | return pmd_val(pmd) & (((pteval_t)(1)) << 9); | |
19290 | } | |
19291 | static inline __attribute__((always_inline)) int pmd_trans_huge(pmd_t pmd) | |
19292 | { | |
19293 | return pmd_val(pmd) & (((pteval_t)(1)) << 7); | |
19294 | } | |
19295 | static inline __attribute__((always_inline)) int has_transparent_hugepage(void) | |
19296 | { | |
19297 | return (__builtin_constant_p((0*32+ 3)) && ( ((((0*32+ 3))>>5)==0 && (1UL<<(((0*32+ 3))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+ 3))>>5)==1 && (1UL<<(((0*32+ 3))&31) & (0|0))) || ((((0*32+ 3))>>5)==2 && (1UL<<(((0*32+ 3))&31) & 0)) || ((((0*32+ 3))>>5)==3 && (1UL<<(((0*32+ 3))&31) & (0))) || ((((0*32+ 3))>>5)==4 && (1UL<<(((0*32+ 3))&31) & 0)) || ((((0*32+ 3))>>5)==5 && (1UL<<(((0*32+ 3))&31) & 0)) || ((((0*32+ 3))>>5)==6 && (1UL<<(((0*32+ 3))&31) & 0)) || ((((0*32+ 3))>>5)==7 && (1UL<<(((0*32+ 3))&31) & 0)) || ((((0*32+ 3))>>5)==8 && (1UL<<(((0*32+ 3))&31) & 0)) || ((((0*32+ 3))>>5)==9 && (1UL<<(((0*32+ 3))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+ 3))) ? constant_test_bit(((0*32+ 3)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+ 3)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))); | |
19298 | } | |
19299 | static inline __attribute__((always_inline)) pte_t pte_set_flags(pte_t pte, pteval_t set) | |
19300 | { | |
19301 | pteval_t v = native_pte_val(pte); | |
19302 | return native_make_pte(v | set); | |
19303 | } | |
19304 | static inline __attribute__((always_inline)) pte_t pte_clear_flags(pte_t pte, pteval_t clear) | |
19305 | { | |
19306 | pteval_t v = native_pte_val(pte); | |
19307 | return native_make_pte(v & ~clear); | |
19308 | } | |
19309 | static inline __attribute__((always_inline)) pte_t pte_mkclean(pte_t pte) | |
19310 | { | |
19311 | return pte_clear_flags(pte, (((pteval_t)(1)) << 6)); | |
19312 | } | |
19313 | static inline __attribute__((always_inline)) pte_t pte_mkold(pte_t pte) | |
19314 | { | |
19315 | return pte_clear_flags(pte, (((pteval_t)(1)) << 5)); | |
19316 | } | |
19317 | static inline __attribute__((always_inline)) pte_t pte_wrprotect(pte_t pte) | |
19318 | { | |
19319 | return pte_clear_flags(pte, (((pteval_t)(1)) << 1)); | |
19320 | } | |
19321 | static inline __attribute__((always_inline)) pte_t pte_mkexec(pte_t pte) | |
19322 | { | |
19323 | return pte_clear_flags(pte, (((pteval_t)(1)) << 63)); | |
19324 | } | |
19325 | static inline __attribute__((always_inline)) pte_t pte_mkdirty(pte_t pte) | |
19326 | { | |
19327 | return pte_set_flags(pte, (((pteval_t)(1)) << 6)); | |
19328 | } | |
19329 | static inline __attribute__((always_inline)) pte_t pte_mkyoung(pte_t pte) | |
19330 | { | |
19331 | return pte_set_flags(pte, (((pteval_t)(1)) << 5)); | |
19332 | } | |
19333 | static inline __attribute__((always_inline)) pte_t pte_mkwrite(pte_t pte) | |
19334 | { | |
19335 | return pte_set_flags(pte, (((pteval_t)(1)) << 1)); | |
19336 | } | |
19337 | static inline __attribute__((always_inline)) pte_t pte_mkhuge(pte_t pte) | |
19338 | { | |
19339 | return pte_set_flags(pte, (((pteval_t)(1)) << 7)); | |
19340 | } | |
19341 | static inline __attribute__((always_inline)) pte_t pte_clrhuge(pte_t pte) | |
19342 | { | |
19343 | return pte_clear_flags(pte, (((pteval_t)(1)) << 7)); | |
19344 | } | |
19345 | static inline __attribute__((always_inline)) pte_t pte_mkglobal(pte_t pte) | |
19346 | { | |
19347 | return pte_set_flags(pte, (((pteval_t)(1)) << 8)); | |
19348 | } | |
19349 | static inline __attribute__((always_inline)) pte_t pte_clrglobal(pte_t pte) | |
19350 | { | |
19351 | return pte_clear_flags(pte, (((pteval_t)(1)) << 8)); | |
19352 | } | |
19353 | static inline __attribute__((always_inline)) pte_t pte_mkspecial(pte_t pte) | |
19354 | { | |
19355 | return pte_set_flags(pte, (((pteval_t)(1)) << 9)); | |
19356 | } | |
19357 | static inline __attribute__((always_inline)) pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) | |
19358 | { | |
19359 | pmdval_t v = native_pmd_val(pmd); | |
19360 | return __pmd(v | set); | |
19361 | } | |
19362 | static inline __attribute__((always_inline)) pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) | |
19363 | { | |
19364 | pmdval_t v = native_pmd_val(pmd); | |
19365 | return __pmd(v & ~clear); | |
19366 | } | |
19367 | static inline __attribute__((always_inline)) pmd_t pmd_mkold(pmd_t pmd) | |
19368 | { | |
19369 | return pmd_clear_flags(pmd, (((pteval_t)(1)) << 5)); | |
19370 | } | |
19371 | static inline __attribute__((always_inline)) pmd_t pmd_wrprotect(pmd_t pmd) | |
19372 | { | |
19373 | return pmd_clear_flags(pmd, (((pteval_t)(1)) << 1)); | |
19374 | } | |
19375 | static inline __attribute__((always_inline)) pmd_t pmd_mkdirty(pmd_t pmd) | |
19376 | { | |
19377 | return pmd_set_flags(pmd, (((pteval_t)(1)) << 6)); | |
19378 | } | |
19379 | static inline __attribute__((always_inline)) pmd_t pmd_mkhuge(pmd_t pmd) | |
19380 | { | |
19381 | return pmd_set_flags(pmd, (((pteval_t)(1)) << 7)); | |
19382 | } | |
19383 | static inline __attribute__((always_inline)) pmd_t pmd_mkyoung(pmd_t pmd) | |
19384 | { | |
19385 | return pmd_set_flags(pmd, (((pteval_t)(1)) << 5)); | |
19386 | } | |
19387 | static inline __attribute__((always_inline)) pmd_t pmd_mkwrite(pmd_t pmd) | |
19388 | { | |
19389 | return pmd_set_flags(pmd, (((pteval_t)(1)) << 1)); | |
19390 | } | |
19391 | static inline __attribute__((always_inline)) pmd_t pmd_mknotpresent(pmd_t pmd) | |
19392 | { | |
19393 | return pmd_clear_flags(pmd, (((pteval_t)(1)) << 0)); | |
19394 | } | |
19395 | static inline __attribute__((always_inline)) pgprotval_t massage_pgprot(pgprot_t pgprot) | |
19396 | { | |
19397 | pgprotval_t protval = ((pgprot).pgprot); | |
19398 | if (__builtin_constant_p(((protval & (((pteval_t)(1)) << 0)))) ? !!((protval & (((pteval_t)(1)) << 0))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/pgtable.h", .line = 301, }; ______r = !!((protval & (((pteval_t)(1)) << 0))); ______f.miss_hit[______r]++; ______r; })) | |
19399 | protval &= __supported_pte_mask; | |
19400 | return protval; | |
19401 | } | |
19402 | static inline __attribute__((always_inline)) pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) | |
19403 | { | |
19404 | return __pte(((phys_addr_t)page_nr << 12) | | |
19405 | massage_pgprot(pgprot)); | |
19406 | } | |
19407 | static inline __attribute__((always_inline)) pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | |
19408 | { | |
19409 | return __pmd(((phys_addr_t)page_nr << 12) | | |
19410 | massage_pgprot(pgprot)); | |
19411 | } | |
19412 | static inline __attribute__((always_inline)) pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
19413 | { | |
19414 | pteval_t val = pte_val(pte); | |
19415 | val &= (((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6)); | |
19416 | val |= massage_pgprot(newprot) & ~(((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6)); | |
19417 | return __pte(val); | |
19418 | } | |
19419 | static inline __attribute__((always_inline)) pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | |
19420 | { | |
19421 | pmdval_t val = pmd_val(pmd); | |
19422 | val &= ((((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6)) | (((pteval_t)(1)) << 7)); | |
19423 | val |= massage_pgprot(newprot) & ~((((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6)) | (((pteval_t)(1)) << 7)); | |
19424 | return __pmd(val); | |
19425 | } | |
19426 | static inline __attribute__((always_inline)) pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | |
19427 | { | |
19428 | pgprotval_t preservebits = ((oldprot).pgprot) & (((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6)); | |
19429 | pgprotval_t addbits = ((newprot).pgprot); | |
19430 | return ((pgprot_t) { (preservebits | addbits) } ); | |
19431 | } | |
19432 | static inline __attribute__((always_inline)) int is_new_memtype_allowed(u64 paddr, unsigned long size, | |
19433 | unsigned long flags, | |
19434 | unsigned long new_flags) | |
19435 | { | |
19436 | if (__builtin_constant_p(((x86_platform.is_untracked_pat_range(paddr, paddr + size)))) ? !!((x86_platform.is_untracked_pat_range(paddr, paddr + size))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/pgtable.h", .line = 363, }; ______r = !!((x86_platform.is_untracked_pat_range(paddr, paddr + size))); ______f.miss_hit[______r]++; ______r; })) | |
19437 | return 1; | |
19438 | if (__builtin_constant_p((((flags == ((((pteval_t)(1)) << 4)) && new_flags == (0)) || (flags == ((((pteval_t)(1)) << 3)) && new_flags == (0))))) ? !!(((flags == ((((pteval_t)(1)) << 4)) && new_flags == (0)) || (flags == ((((pteval_t)(1)) << 3)) && new_flags == (0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
19439 | "/data/exp/linux-3.0.4/arch/x86/include/asm/pgtable.h" | |
19440 | , .line = | |
19441 | 375 | |
19442 | , }; ______r = !!(((flags == ((((pteval_t)(1)) << 4)) && new_flags == (0)) || (flags == ((((pteval_t)(1)) << 3)) && new_flags == (0)))); ______f.miss_hit[______r]++; ______r; })) | |
19443 | { | |
19444 | return 0; | |
19445 | } | |
19446 | return 1; | |
19447 | } | |
19448 | pmd_t *populate_extra_pmd(unsigned long vaddr); | |
19449 | pte_t *populate_extra_pte(unsigned long vaddr); | |
19450 | struct mm_struct; | |
19451 | struct vm_area_struct; | |
19452 | extern pgd_t swapper_pg_dir[1024]; | |
19453 | extern pgd_t initial_page_table[1024]; | |
19454 | static inline __attribute__((always_inline)) void pgtable_cache_init(void) { } | |
19455 | static inline __attribute__((always_inline)) void check_pgt_cache(void) { } | |
19456 | void paging_init(void); | |
19457 | extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); | |
19458 | static inline __attribute__((always_inline)) void native_set_pte(pte_t *ptep, pte_t pte) | |
19459 | { | |
19460 | ptep->pte_high = pte.pte_high; | |
19461 | __asm__ __volatile__("": : :"memory"); | |
19462 | ptep->pte_low = pte.pte_low; | |
19463 | } | |
19464 | static inline __attribute__((always_inline)) void native_set_pte_atomic(pte_t *ptep, pte_t pte) | |
19465 | { | |
19466 | set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); | |
19467 | } | |
19468 | static inline __attribute__((always_inline)) void native_set_pmd(pmd_t *pmdp, pmd_t pmd) | |
19469 | { | |
19470 | set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); | |
19471 | } | |
19472 | static inline __attribute__((always_inline)) void native_set_pud(pud_t *pudp, pud_t pud) | |
19473 | { | |
19474 | set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); | |
19475 | } | |
19476 | static inline __attribute__((always_inline)) void native_pte_clear(struct mm_struct *mm, unsigned long addr, | |
19477 | pte_t *ptep) | |
19478 | { | |
19479 | ptep->pte_low = 0; | |
19480 | __asm__ __volatile__("": : :"memory"); | |
19481 | ptep->pte_high = 0; | |
19482 | } | |
19483 | static inline __attribute__((always_inline)) void native_pmd_clear(pmd_t *pmd) | |
19484 | { | |
19485 | u32 *tmp = (u32 *)pmd; | |
19486 | *tmp = 0; | |
19487 | __asm__ __volatile__("": : :"memory"); | |
19488 | *(tmp + 1) = 0; | |
19489 | } | |
19490 | static inline __attribute__((always_inline)) void pud_clear(pud_t *pudp) | |
19491 | { | |
19492 | set_pud(pudp, ((pud_t) { __pgd(0) } )); | |
19493 | } | |
19494 | static inline __attribute__((always_inline)) pte_t native_ptep_get_and_clear(pte_t *ptep) | |
19495 | { | |
19496 | pte_t res; | |
19497 | res.pte_low = ({ __typeof(*((&ptep->pte_low))) __x = ((0)); switch (sizeof(*&ptep->pte_low)) { case 1: { volatile u8 *__ptr = (volatile u8 *)((&ptep->pte_low)); asm volatile("xchgb %0,%1" : "=q" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)((&ptep->pte_low)); asm volatile("xchgw %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)((&ptep->pte_low)); asm volatile("xchgl %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } default: __xchg_wrong_size(); } __x; }); | |
19498 | res.pte_high = ptep->pte_high; | |
19499 | ptep->pte_high = 0; | |
19500 | return res; | |
19501 | } | |
19502 | union split_pmd { | |
19503 | struct { | |
19504 | u32 pmd_low; | |
19505 | u32 pmd_high; | |
19506 | }; | |
19507 | pmd_t pmd; | |
19508 | }; | |
19509 | static inline __attribute__((always_inline)) pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) | |
19510 | { | |
19511 | union split_pmd res, *orig = (union split_pmd *)pmdp; | |
19512 | res.pmd_low = ({ __typeof(*((&orig->pmd_low))) __x = ((0)); switch (sizeof(*&orig->pmd_low)) { case 1: { volatile u8 *__ptr = (volatile u8 *)((&orig->pmd_low)); asm volatile("xchgb %0,%1" : "=q" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)((&orig->pmd_low)); asm volatile("xchgw %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)((&orig->pmd_low)); asm volatile("xchgl %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } default: __xchg_wrong_size(); } __x; }); | |
19513 | res.pmd_high = orig->pmd_high; | |
19514 | orig->pmd_high = 0; | |
19515 | return res.pmd; | |
19516 | } | |
19517 | static inline __attribute__((always_inline)) int pte_none(pte_t pte) | |
19518 | { | |
19519 | return !pte.pte; | |
19520 | } | |
19521 | static inline __attribute__((always_inline)) int pte_same(pte_t a, pte_t b) | |
19522 | { | |
19523 | return a.pte == b.pte; | |
19524 | } | |
19525 | static inline __attribute__((always_inline)) int pte_present(pte_t a) | |
19526 | { | |
19527 | return pte_flags(a) & ((((pteval_t)(1)) << 0) | (((pteval_t)(1)) << 8)); | |
19528 | } | |
19529 | static inline __attribute__((always_inline)) int pte_hidden(pte_t pte) | |
19530 | { | |
19531 | return pte_flags(pte) & (((pteval_t)(0))); | |
19532 | } | |
19533 | static inline __attribute__((always_inline)) int pmd_present(pmd_t pmd) | |
19534 | { | |
19535 | return pmd_flags(pmd) & (((pteval_t)(1)) << 0); | |
19536 | } | |
19537 | static inline __attribute__((always_inline)) int pmd_none(pmd_t pmd) | |
19538 | { | |
19539 | return (unsigned long)native_pmd_val(pmd) == 0; | |
19540 | } | |
19541 | static inline __attribute__((always_inline)) unsigned long pmd_page_vaddr(pmd_t pmd) | |
19542 | { | |
19543 | return (unsigned long)((void *)((unsigned long)(pmd_val(pmd) & ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))))+((unsigned long)(0xC0000000UL)))); | |
19544 | } | |
19545 | static inline __attribute__((always_inline)) unsigned long pmd_index(unsigned long address) | |
19546 | { | |
19547 | return (address >> 21) & (512 - 1); | |
19548 | } | |
19549 | static inline __attribute__((always_inline)) unsigned long pte_index(unsigned long address) | |
19550 | { | |
19551 | return (address >> 12) & (512 - 1); | |
19552 | } | |
19553 | static inline __attribute__((always_inline)) pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) | |
19554 | { | |
19555 | return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); | |
19556 | } | |
19557 | static inline __attribute__((always_inline)) int pmd_bad(pmd_t pmd) | |
19558 | { | |
19559 | return (pmd_flags(pmd) & ~(((pteval_t)(1)) << 2)) != ((((pteval_t)(1)) << 0) | (((pteval_t)(1)) << 1) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6)); | |
19560 | } | |
19561 | static inline __attribute__((always_inline)) unsigned long pages_to_mb(unsigned long npg) | |
19562 | { | |
19563 | return npg >> (20 - 12); | |
19564 | } | |
19565 | static inline __attribute__((always_inline)) int pud_none(pud_t pud) | |
19566 | { | |
19567 | return native_pud_val(pud) == 0; | |
19568 | } | |
19569 | static inline __attribute__((always_inline)) int pud_present(pud_t pud) | |
19570 | { | |
19571 | return pud_flags(pud) & (((pteval_t)(1)) << 0); | |
19572 | } | |
19573 | static inline __attribute__((always_inline)) unsigned long pud_page_vaddr(pud_t pud) | |
19574 | { | |
19575 | return (unsigned long)((void *)((unsigned long)((unsigned long)(pgd_val((pud).pgd)) & ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))))+((unsigned long)(0xC0000000UL)))); | |
19576 | } | |
19577 | static inline __attribute__((always_inline)) pmd_t *pmd_offset(pud_t *pud, unsigned long address) | |
19578 | { | |
19579 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); | |
19580 | } | |
19581 | static inline __attribute__((always_inline)) int pud_large(pud_t pud) | |
19582 | { | |
19583 | return ((pgd_val((pud).pgd)) & ((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 0))) == | |
19584 | ((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 0)); | |
19585 | } | |
19586 | static inline __attribute__((always_inline)) int pud_bad(pud_t pud) | |
19587 | { | |
19588 | return (pud_flags(pud) & ~(((((pteval_t)(1)) << 0) | (((pteval_t)(1)) << 1) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6)) | (((pteval_t)(1)) << 2))) != 0; | |
19589 | } | |
19590 | extern int direct_gbpages; | |
19591 | static inline __attribute__((always_inline)) pte_t native_local_ptep_get_and_clear(pte_t *ptep) | |
19592 | { | |
19593 | pte_t res = *ptep; | |
19594 | native_pte_clear(((void *)0), 0, ptep); | |
19595 | return res; | |
19596 | } | |
19597 | static inline __attribute__((always_inline)) pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp) | |
19598 | { | |
19599 | pmd_t res = *pmdp; | |
19600 | native_pmd_clear(pmdp); | |
19601 | return res; | |
19602 | } | |
19603 | static inline __attribute__((always_inline)) void native_set_pte_at(struct mm_struct *mm, unsigned long addr, | |
19604 | pte_t *ptep , pte_t pte) | |
19605 | { | |
19606 | native_set_pte(ptep, pte); | |
19607 | } | |
19608 | static inline __attribute__((always_inline)) void native_set_pmd_at(struct mm_struct *mm, unsigned long addr, | |
19609 | pmd_t *pmdp , pmd_t pmd) | |
19610 | { | |
19611 | native_set_pmd(pmdp, pmd); | |
19612 | } | |
19613 | struct vm_area_struct; | |
19614 | extern int ptep_set_access_flags(struct vm_area_struct *vma, | |
19615 | unsigned long address, pte_t *ptep, | |
19616 | pte_t entry, int dirty); | |
19617 | extern int ptep_test_and_clear_young(struct vm_area_struct *vma, | |
19618 | unsigned long addr, pte_t *ptep); | |
19619 | extern int ptep_clear_flush_young(struct vm_area_struct *vma, | |
19620 | unsigned long address, pte_t *ptep); | |
19621 | static inline __attribute__((always_inline)) pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | |
19622 | pte_t *ptep) | |
19623 | { | |
19624 | pte_t pte = native_ptep_get_and_clear(ptep); | |
19625 | pte_update(mm, addr, ptep); | |
19626 | return pte; | |
19627 | } | |
19628 | static inline __attribute__((always_inline)) pte_t ptep_get_and_clear_full(struct mm_struct *mm, | |
19629 | unsigned long addr, pte_t *ptep, | |
19630 | int full) | |
19631 | { | |
19632 | pte_t pte; | |
19633 | if (__builtin_constant_p(((full))) ? !!((full)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/pgtable.h", .line = 686, }; ______r = !!((full)); ______f.miss_hit[______r]++; ______r; })) { | |
19634 | pte = native_local_ptep_get_and_clear(ptep); | |
19635 | } else { | |
19636 | pte = ptep_get_and_clear(mm, addr, ptep); | |
19637 | } | |
19638 | return pte; | |
19639 | } | |
19640 | static inline __attribute__((always_inline)) void ptep_set_wrprotect(struct mm_struct *mm, | |
19641 | unsigned long addr, pte_t *ptep) | |
19642 | { | |
19643 | clear_bit(1, (unsigned long *)&ptep->pte); | |
19644 | pte_update(mm, addr, ptep); | |
19645 | } | |
19646 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, | |
19647 | unsigned long address, pmd_t *pmdp, | |
19648 | pmd_t entry, int dirty); | |
19649 | extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
19650 | unsigned long addr, pmd_t *pmdp); | |
19651 | extern int pmdp_clear_flush_young(struct vm_area_struct *vma, | |
19652 | unsigned long address, pmd_t *pmdp); | |
19653 | extern void pmdp_splitting_flush(struct vm_area_struct *vma, | |
19654 | unsigned long addr, pmd_t *pmdp); | |
19655 | static inline __attribute__((always_inline)) int pmd_write(pmd_t pmd) | |
19656 | { | |
19657 | return pmd_flags(pmd) & (((pteval_t)(1)) << 1); | |
19658 | } | |
19659 | static inline __attribute__((always_inline)) pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr, | |
19660 | pmd_t *pmdp) | |
19661 | { | |
19662 | pmd_t pmd = native_pmdp_get_and_clear(pmdp); | |
19663 | pmd_update(mm, addr, pmdp); | |
19664 | return pmd; | |
19665 | } | |
19666 | static inline __attribute__((always_inline)) void pmdp_set_wrprotect(struct mm_struct *mm, | |
19667 | unsigned long addr, pmd_t *pmdp) | |
19668 | { | |
19669 | clear_bit(1, (unsigned long *)pmdp); | |
19670 | pmd_update(mm, addr, pmdp); | |
19671 | } | |
19672 | static inline __attribute__((always_inline)) void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |
19673 | { | |
19674 | __builtin_memcpy(dst, src, count * sizeof(pgd_t)); | |
19675 | } | |
19676 | static inline __attribute__((always_inline)) void pte_clear_not_present_full(struct mm_struct *mm, | |
19677 | unsigned long address, | |
19678 | pte_t *ptep, | |
19679 | int full) | |
19680 | { | |
19681 | pte_clear(mm, address, ptep); | |
19682 | } | |
19683 | extern pte_t ptep_clear_flush(struct vm_area_struct *vma, | |
19684 | unsigned long address, | |
19685 | pte_t *ptep); | |
19686 | extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, | |
19687 | unsigned long address, | |
19688 | pmd_t *pmdp); | |
19689 | static inline __attribute__((always_inline)) int pmd_same(pmd_t pmd_a, pmd_t pmd_b) | |
19690 | { | |
19691 | return pmd_val(pmd_a) == pmd_val(pmd_b); | |
19692 | } | |
19693 | void pgd_clear_bad(pgd_t *); | |
19694 | void pud_clear_bad(pud_t *); | |
19695 | void pmd_clear_bad(pmd_t *); | |
19696 | static inline __attribute__((always_inline)) int pgd_none_or_clear_bad(pgd_t *pgd) | |
19697 | { | |
19698 | if (__builtin_constant_p(((pgd_none(*pgd)))) ? !!((pgd_none(*pgd))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 257, }; ______r = !!((pgd_none(*pgd))); ______f.miss_hit[______r]++; ______r; })) | |
19699 | return 1; | |
19700 | if (__builtin_constant_p((((__builtin_constant_p(pgd_bad(*pgd)) ? !!(pgd_bad(*pgd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 259, }; ______r = __builtin_expect(!!(pgd_bad(*pgd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(pgd_bad(*pgd)) ? !!(pgd_bad(*pgd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 259, }; ______r = __builtin_expect(!!(pgd_bad(*pgd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 259, }; ______r = !!(((__builtin_constant_p(pgd_bad(*pgd)) ? !!(pgd_bad(*pgd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 259, }; ______r = __builtin_expect(!!(pgd_bad(*pgd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) { | |
19701 | pgd_clear_bad(pgd); | |
19702 | return 1; | |
19703 | } | |
19704 | return 0; | |
19705 | } | |
19706 | static inline __attribute__((always_inline)) int pud_none_or_clear_bad(pud_t *pud) | |
19707 | { | |
19708 | if (__builtin_constant_p(((pud_none(*pud)))) ? !!((pud_none(*pud))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 268, }; ______r = !!((pud_none(*pud))); ______f.miss_hit[______r]++; ______r; })) | |
19709 | return 1; | |
19710 | if (__builtin_constant_p((((__builtin_constant_p(pud_bad(*pud)) ? !!(pud_bad(*pud)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 270, }; ______r = __builtin_expect(!!(pud_bad(*pud)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(pud_bad(*pud)) ? !!(pud_bad(*pud)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 270, }; ______r = __builtin_expect(!!(pud_bad(*pud)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 270, }; ______r = !!(((__builtin_constant_p(pud_bad(*pud)) ? !!(pud_bad(*pud)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 270, }; ______r = __builtin_expect(!!(pud_bad(*pud)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) { | |
19711 | pud_clear_bad(pud); | |
19712 | return 1; | |
19713 | } | |
19714 | return 0; | |
19715 | } | |
19716 | static inline __attribute__((always_inline)) int pmd_none_or_clear_bad(pmd_t *pmd) | |
19717 | { | |
19718 | if (__builtin_constant_p(((pmd_none(*pmd)))) ? !!((pmd_none(*pmd))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 279, }; ______r = !!((pmd_none(*pmd))); ______f.miss_hit[______r]++; ______r; })) | |
19719 | return 1; | |
19720 | if (__builtin_constant_p((((__builtin_constant_p(pmd_bad(*pmd)) ? !!(pmd_bad(*pmd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 281, }; ______r = __builtin_expect(!!(pmd_bad(*pmd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(pmd_bad(*pmd)) ? !!(pmd_bad(*pmd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 281, }; ______r = __builtin_expect(!!(pmd_bad(*pmd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 281, }; ______r = !!(((__builtin_constant_p(pmd_bad(*pmd)) ? !!(pmd_bad(*pmd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 281, }; ______r = __builtin_expect(!!(pmd_bad(*pmd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) { | |
19721 | pmd_clear_bad(pmd); | |
19722 | return 1; | |
19723 | } | |
19724 | return 0; | |
19725 | } | |
19726 | static inline __attribute__((always_inline)) pte_t __ptep_modify_prot_start(struct mm_struct *mm, | |
19727 | unsigned long addr, | |
19728 | pte_t *ptep) | |
19729 | { | |
19730 | return ptep_get_and_clear(mm, addr, ptep); | |
19731 | } | |
19732 | static inline __attribute__((always_inline)) void __ptep_modify_prot_commit(struct mm_struct *mm, | |
19733 | unsigned long addr, | |
19734 | pte_t *ptep, pte_t pte) | |
19735 | { | |
19736 | set_pte_at(mm, addr, ptep, pte); | |
19737 | } | |
19738 | extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, | |
19739 | unsigned long pfn, unsigned long size); | |
19740 | extern int track_pfn_vma_copy(struct vm_area_struct *vma); | |
19741 | extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, | |
19742 | unsigned long size); | |
19743 | extern struct kmem_cache *vm_area_cachep; | |
19744 | extern pgprot_t protection_map[16]; | |
19745 | static inline __attribute__((always_inline)) int is_linear_pfn_mapping(struct vm_area_struct *vma) | |
19746 | { | |
19747 | return !!(vma->vm_flags & 0x40000000); | |
19748 | } | |
19749 | static inline __attribute__((always_inline)) int is_pfn_mapping(struct vm_area_struct *vma) | |
19750 | { | |
19751 | return !!(vma->vm_flags & 0x00000400); | |
19752 | } | |
19753 | struct vm_fault { | |
19754 | unsigned int flags; | |
19755 | unsigned long pgoff; | |
19756 | void *virtual_address; | |
19757 | struct page *page; | |
19758 | }; | |
19759 | struct vm_operations_struct { | |
19760 | void (*open)(struct vm_area_struct * area); | |
19761 | void (*close)(struct vm_area_struct * area); | |
19762 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); | |
19763 | int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); | |
19764 | int (*access)(struct vm_area_struct *vma, unsigned long addr, | |
19765 | void *buf, int len, int write); | |
19766 | }; | |
19767 | struct mmu_gather; | |
19768 | struct inode; | |
19769 | enum pageflags { | |
19770 | PG_locked, | |
19771 | PG_error, | |
19772 | PG_referenced, | |
19773 | PG_uptodate, | |
19774 | PG_dirty, | |
19775 | PG_lru, | |
19776 | PG_active, | |
19777 | PG_slab, | |
19778 | PG_owner_priv_1, | |
19779 | PG_arch_1, | |
19780 | PG_reserved, | |
19781 | PG_private, | |
19782 | PG_private_2, | |
19783 | PG_writeback, | |
19784 | PG_head, | |
19785 | PG_tail, | |
19786 | PG_swapcache, | |
19787 | PG_mappedtodisk, | |
19788 | PG_reclaim, | |
19789 | PG_swapbacked, | |
19790 | PG_unevictable, | |
19791 | PG_mlocked, | |
19792 | PG_uncached, | |
19793 | PG_compound_lock, | |
19794 | __NR_PAGEFLAGS, | |
19795 | PG_checked = PG_owner_priv_1, | |
19796 | PG_fscache = PG_private_2, | |
19797 | PG_pinned = PG_owner_priv_1, | |
19798 | PG_savepinned = PG_dirty, | |
19799 | PG_slob_free = PG_private, | |
19800 | PG_slub_frozen = PG_active, | |
19801 | }; | |
19802 | struct page; | |
19803 | static inline __attribute__((always_inline)) int PageLocked(struct page *page) { return (__builtin_constant_p((PG_locked)) ? constant_test_bit((PG_locked), (&page->flags)) : variable_test_bit((PG_locked), (&page->flags))); } | |
19804 | static inline __attribute__((always_inline)) int PageError(struct page *page) { return (__builtin_constant_p((PG_error)) ? constant_test_bit((PG_error), (&page->flags)) : variable_test_bit((PG_error), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageError(struct page *page) { set_bit(PG_error, &page->flags); } static inline __attribute__((always_inline)) void ClearPageError(struct page *page) { clear_bit(PG_error, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageError(struct page *page) { return test_and_clear_bit(PG_error, &page->flags); } | |
19805 | static inline __attribute__((always_inline)) int PageReferenced(struct page *page) { return (__builtin_constant_p((PG_referenced)) ? constant_test_bit((PG_referenced), (&page->flags)) : variable_test_bit((PG_referenced), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageReferenced(struct page *page) { set_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReferenced(struct page *page) { clear_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageReferenced(struct page *page) { return test_and_clear_bit(PG_referenced, &page->flags); } | |
19806 | static inline __attribute__((always_inline)) int PageDirty(struct page *page) { return (__builtin_constant_p((PG_dirty)) ? constant_test_bit((PG_dirty), (&page->flags)) : variable_test_bit((PG_dirty), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageDirty(struct page *page) { set_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void ClearPageDirty(struct page *page) { clear_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) int TestSetPageDirty(struct page *page) { return test_and_set_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageDirty(struct page *page) { return test_and_clear_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageDirty(struct page *page) { __clear_bit(PG_dirty, &page->flags); } | |
19807 | static inline __attribute__((always_inline)) int PageLRU(struct page *page) { return (__builtin_constant_p((PG_lru)) ? constant_test_bit((PG_lru), (&page->flags)) : variable_test_bit((PG_lru), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageLRU(struct page *page) { set_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void ClearPageLRU(struct page *page) { clear_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageLRU(struct page *page) { __clear_bit(PG_lru, &page->flags); } | |
19808 | static inline __attribute__((always_inline)) int PageActive(struct page *page) { return (__builtin_constant_p((PG_active)) ? constant_test_bit((PG_active), (&page->flags)) : variable_test_bit((PG_active), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageActive(struct page *page) { set_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void ClearPageActive(struct page *page) { clear_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageActive(struct page *page) { __clear_bit(PG_active, &page->flags); } | |
19809 | static inline __attribute__((always_inline)) int TestClearPageActive(struct page *page) { return test_and_clear_bit(PG_active, &page->flags); } | |
19810 | static inline __attribute__((always_inline)) int PageSlab(struct page *page) { return (__builtin_constant_p((PG_slab)) ? constant_test_bit((PG_slab), (&page->flags)) : variable_test_bit((PG_slab), (&page->flags))); } static inline __attribute__((always_inline)) void __SetPageSlab(struct page *page) { __set_bit(PG_slab, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlab(struct page *page) { __clear_bit(PG_slab, &page->flags); } | |
19811 | static inline __attribute__((always_inline)) int PageChecked(struct page *page) { return (__builtin_constant_p((PG_checked)) ? constant_test_bit((PG_checked), (&page->flags)) : variable_test_bit((PG_checked), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageChecked(struct page *page) { set_bit(PG_checked, &page->flags); } static inline __attribute__((always_inline)) void ClearPageChecked(struct page *page) { clear_bit(PG_checked, &page->flags); } | |
19812 | static inline __attribute__((always_inline)) int PagePinned(struct page *page) { return (__builtin_constant_p((PG_pinned)) ? constant_test_bit((PG_pinned), (&page->flags)) : variable_test_bit((PG_pinned), (&page->flags))); } static inline __attribute__((always_inline)) void SetPagePinned(struct page *page) { set_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePinned(struct page *page) { clear_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) int TestSetPagePinned(struct page *page) { return test_and_set_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) int TestClearPagePinned(struct page *page) { return test_and_clear_bit(PG_pinned, &page->flags); } | |
19813 | static inline __attribute__((always_inline)) int PageSavePinned(struct page *page) { return (__builtin_constant_p((PG_savepinned)) ? constant_test_bit((PG_savepinned), (&page->flags)) : variable_test_bit((PG_savepinned), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageSavePinned(struct page *page) { set_bit(PG_savepinned, &page->flags); } static inline __attribute__((always_inline)) void ClearPageSavePinned(struct page *page) { clear_bit(PG_savepinned, &page->flags); }; | |
19814 | static inline __attribute__((always_inline)) int PageReserved(struct page *page) { return (__builtin_constant_p((PG_reserved)) ? constant_test_bit((PG_reserved), (&page->flags)) : variable_test_bit((PG_reserved), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageReserved(struct page *page) { set_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReserved(struct page *page) { clear_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageReserved(struct page *page) { __clear_bit(PG_reserved, &page->flags); } | |
19815 | static inline __attribute__((always_inline)) int PageSwapBacked(struct page *page) { return (__builtin_constant_p((PG_swapbacked)) ? constant_test_bit((PG_swapbacked), (&page->flags)) : variable_test_bit((PG_swapbacked), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageSwapBacked(struct page *page) { set_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void ClearPageSwapBacked(struct page *page) { clear_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSwapBacked(struct page *page) { __clear_bit(PG_swapbacked, &page->flags); } | |
19816 | static inline __attribute__((always_inline)) int PageSlobFree(struct page *page) { return (__builtin_constant_p((PG_slob_free)) ? constant_test_bit((PG_slob_free), (&page->flags)) : variable_test_bit((PG_slob_free), (&page->flags))); } static inline __attribute__((always_inline)) void __SetPageSlobFree(struct page *page) { __set_bit(PG_slob_free, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlobFree(struct page *page) { __clear_bit(PG_slob_free, &page->flags); } | |
19817 | static inline __attribute__((always_inline)) int PageSlubFrozen(struct page *page) { return (__builtin_constant_p((PG_slub_frozen)) ? constant_test_bit((PG_slub_frozen), (&page->flags)) : variable_test_bit((PG_slub_frozen), (&page->flags))); } static inline __attribute__((always_inline)) void __SetPageSlubFrozen(struct page *page) { __set_bit(PG_slub_frozen, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlubFrozen(struct page *page) { __clear_bit(PG_slub_frozen, &page->flags); } | |
19818 | static inline __attribute__((always_inline)) int PagePrivate(struct page *page) { return (__builtin_constant_p((PG_private)) ? constant_test_bit((PG_private), (&page->flags)) : variable_test_bit((PG_private), (&page->flags))); } static inline __attribute__((always_inline)) void SetPagePrivate(struct page *page) { set_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePrivate(struct page *page) { clear_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void __SetPagePrivate(struct page *page) { __set_bit(PG_private, &page->flags); } | |
19819 | static inline __attribute__((always_inline)) void __ClearPagePrivate(struct page *page) { __clear_bit(PG_private, &page->flags); } | |
19820 | static inline __attribute__((always_inline)) int PagePrivate2(struct page *page) { return (__builtin_constant_p((PG_private_2)) ? constant_test_bit((PG_private_2), (&page->flags)) : variable_test_bit((PG_private_2), (&page->flags))); } static inline __attribute__((always_inline)) void SetPagePrivate2(struct page *page) { set_bit(PG_private_2, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePrivate2(struct page *page) { clear_bit(PG_private_2, &page->flags); } static inline __attribute__((always_inline)) int TestSetPagePrivate2(struct page *page) { return test_and_set_bit(PG_private_2, &page->flags); } static inline __attribute__((always_inline)) int TestClearPagePrivate2(struct page *page) { return test_and_clear_bit(PG_private_2, &page->flags); } | |
19821 | static inline __attribute__((always_inline)) int PageOwnerPriv1(struct page *page) { return (__builtin_constant_p((PG_owner_priv_1)) ? constant_test_bit((PG_owner_priv_1), (&page->flags)) : variable_test_bit((PG_owner_priv_1), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageOwnerPriv1(struct page *page) { set_bit(PG_owner_priv_1, &page->flags); } static inline __attribute__((always_inline)) void ClearPageOwnerPriv1(struct page *page) { clear_bit(PG_owner_priv_1, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageOwnerPriv1(struct page *page) { return test_and_clear_bit(PG_owner_priv_1, &page->flags); } | |
19822 | static inline __attribute__((always_inline)) int PageWriteback(struct page *page) { return (__builtin_constant_p((PG_writeback)) ? constant_test_bit((PG_writeback), (&page->flags)) : variable_test_bit((PG_writeback), (&page->flags))); } static inline __attribute__((always_inline)) int TestSetPageWriteback(struct page *page) { return test_and_set_bit(PG_writeback, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageWriteback(struct page *page) { return test_and_clear_bit(PG_writeback, &page->flags); } | |
19823 | static inline __attribute__((always_inline)) int PageMappedToDisk(struct page *page) { return (__builtin_constant_p((PG_mappedtodisk)) ? constant_test_bit((PG_mappedtodisk), (&page->flags)) : variable_test_bit((PG_mappedtodisk), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageMappedToDisk(struct page *page) { set_bit(PG_mappedtodisk, &page->flags); } static inline __attribute__((always_inline)) void ClearPageMappedToDisk(struct page *page) { clear_bit(PG_mappedtodisk, &page->flags); } | |
19824 | static inline __attribute__((always_inline)) int PageReclaim(struct page *page) { return (__builtin_constant_p((PG_reclaim)) ? constant_test_bit((PG_reclaim), (&page->flags)) : variable_test_bit((PG_reclaim), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageReclaim(struct page *page) { set_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReclaim(struct page *page) { clear_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageReclaim(struct page *page) { return test_and_clear_bit(PG_reclaim, &page->flags); } | |
19825 | static inline __attribute__((always_inline)) int PageReadahead(struct page *page) { return (__builtin_constant_p((PG_reclaim)) ? constant_test_bit((PG_reclaim), (&page->flags)) : variable_test_bit((PG_reclaim), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageReadahead(struct page *page) { set_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReadahead(struct page *page) { clear_bit(PG_reclaim, &page->flags); } | |
19826 | static inline __attribute__((always_inline)) int PageSwapCache(struct page *page) { return (__builtin_constant_p((PG_swapcache)) ? constant_test_bit((PG_swapcache), (&page->flags)) : variable_test_bit((PG_swapcache), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageSwapCache(struct page *page) { set_bit(PG_swapcache, &page->flags); } static inline __attribute__((always_inline)) void ClearPageSwapCache(struct page *page) { clear_bit(PG_swapcache, &page->flags); } | |
19827 | static inline __attribute__((always_inline)) int PageUnevictable(struct page *page) { return (__builtin_constant_p((PG_unevictable)) ? constant_test_bit((PG_unevictable), (&page->flags)) : variable_test_bit((PG_unevictable), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageUnevictable(struct page *page) { set_bit(PG_unevictable, &page->flags); } static inline __attribute__((always_inline)) void ClearPageUnevictable(struct page *page) { clear_bit(PG_unevictable, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageUnevictable(struct page *page) { __clear_bit(PG_unevictable, &page->flags); } | |
19828 | static inline __attribute__((always_inline)) int TestClearPageUnevictable(struct page *page) { return test_and_clear_bit(PG_unevictable, &page->flags); } | |
19829 | static inline __attribute__((always_inline)) int PageMlocked(struct page *page) { return (__builtin_constant_p((PG_mlocked)) ? constant_test_bit((PG_mlocked), (&page->flags)) : variable_test_bit((PG_mlocked), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageMlocked(struct page *page) { set_bit(PG_mlocked, &page->flags); } static inline __attribute__((always_inline)) void ClearPageMlocked(struct page *page) { clear_bit(PG_mlocked, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageMlocked(struct page *page) { __clear_bit(PG_mlocked, &page->flags); } | |
19830 | static inline __attribute__((always_inline)) int TestSetPageMlocked(struct page *page) { return test_and_set_bit(PG_mlocked, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageMlocked(struct page *page) { return test_and_clear_bit(PG_mlocked, &page->flags); } static inline __attribute__((always_inline)) int __TestClearPageMlocked(struct page *page) { return __test_and_clear_bit(PG_mlocked, &page->flags); } | |
19831 | static inline __attribute__((always_inline)) int PageUncached(struct page *page) { return (__builtin_constant_p((PG_uncached)) ? constant_test_bit((PG_uncached), (&page->flags)) : variable_test_bit((PG_uncached), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageUncached(struct page *page) { set_bit(PG_uncached, &page->flags); } static inline __attribute__((always_inline)) void ClearPageUncached(struct page *page) { clear_bit(PG_uncached, &page->flags); } | |
19832 | static inline __attribute__((always_inline)) int PageHWPoison(struct page *page) { return 0; } | |
19833 | u64 stable_page_flags(struct page *page); | |
19834 | static inline __attribute__((always_inline)) int PageUptodate(struct page *page) | |
19835 | { | |
19836 | int ret = (__builtin_constant_p((PG_uptodate)) ? constant_test_bit((PG_uptodate), (&(page)->flags)) : variable_test_bit((PG_uptodate), (&(page)->flags))); | |
19837 | if (__builtin_constant_p(((ret))) ? !!((ret)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/page-flags.h", .line = 295, }; ______r = !!((ret)); ______f.miss_hit[______r]++; ______r; })) | |
19838 | __asm__ __volatile__("": : :"memory"); | |
19839 | return ret; | |
19840 | } | |
19841 | static inline __attribute__((always_inline)) void __SetPageUptodate(struct page *page) | |
19842 | { | |
19843 | __asm__ __volatile__("": : :"memory"); | |
19844 | __set_bit(PG_uptodate, &(page)->flags); | |
19845 | } | |
19846 | static inline __attribute__((always_inline)) void SetPageUptodate(struct page *page) | |
19847 | { | |
19848 | __asm__ __volatile__("": : :"memory"); | |
19849 | set_bit(PG_uptodate, &(page)->flags); | |
19850 | } | |
19851 | static inline __attribute__((always_inline)) void ClearPageUptodate(struct page *page) { clear_bit(PG_uptodate, &page->flags); } | |
19852 | extern void cancel_dirty_page(struct page *page, unsigned int account_size); | |
19853 | int test_clear_page_writeback(struct page *page); | |
19854 | int test_set_page_writeback(struct page *page); | |
19855 | static inline __attribute__((always_inline)) void set_page_writeback(struct page *page) | |
19856 | { | |
19857 | test_set_page_writeback(page); | |
19858 | } | |
19859 | static inline __attribute__((always_inline)) int PageHead(struct page *page) { return (__builtin_constant_p((PG_head)) ? constant_test_bit((PG_head), (&page->flags)) : variable_test_bit((PG_head), (&page->flags))); } static inline __attribute__((always_inline)) void __SetPageHead(struct page *page) { __set_bit(PG_head, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageHead(struct page *page) { __clear_bit(PG_head, &page->flags); } static inline __attribute__((always_inline)) void ClearPageHead(struct page *page) { clear_bit(PG_head, &page->flags); } | |
19860 | static inline __attribute__((always_inline)) int PageTail(struct page *page) { return (__builtin_constant_p((PG_tail)) ? constant_test_bit((PG_tail), (&page->flags)) : variable_test_bit((PG_tail), (&page->flags))); } static inline __attribute__((always_inline)) void __SetPageTail(struct page *page) { __set_bit(PG_tail, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageTail(struct page *page) { __clear_bit(PG_tail, &page->flags); } | |
19861 | static inline __attribute__((always_inline)) int PageCompound(struct page *page) | |
19862 | { | |
19863 | return page->flags & ((1L << PG_head) | (1L << PG_tail)); | |
19864 | } | |
19865 | static inline __attribute__((always_inline)) void ClearPageCompound(struct page *page) | |
19866 | { | |
19867 | do { if (__builtin_constant_p((((__builtin_constant_p(!PageHead(page)) ? !!(!PageHead(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/page-flags.h", .line = 356, }; ______r = __builtin_expect(!!(!PageHead(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(!PageHead(page)) ? !!(!PageHead(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/page-flags.h", .line = 356, }; ______r = __builtin_expect(!!(!PageHead(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/page-flags.h", .line = 356, }; ______r = !!(((__builtin_constant_p(!PageHead(page)) ? !!(!PageHead(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/page-flags.h", .line = 356, }; ______r = __builtin_expect(!!(!PageHead(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (356), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
19868 | ClearPageHead(page); | |
19869 | } | |
19870 | static inline __attribute__((always_inline)) int PageTransHuge(struct page *page) | |
19871 | { | |
19872 | do { (void)(PageTail(page)); } while (0); | |
19873 | return PageHead(page); | |
19874 | } | |
19875 | static inline __attribute__((always_inline)) int PageTransCompound(struct page *page) | |
19876 | { | |
19877 | return PageCompound(page); | |
19878 | } | |
19879 | static inline __attribute__((always_inline)) int page_has_private(struct page *page) | |
19880 | { | |
19881 | return !!(page->flags & (1 << PG_private | 1 << PG_private_2)); | |
19882 | } | |
19883 | extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, | |
19884 | struct vm_area_struct *vma, | |
19885 | unsigned long address, pmd_t *pmd, | |
19886 | unsigned int flags); | |
19887 | extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |
19888 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, | |
19889 | struct vm_area_struct *vma); | |
19890 | extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |
19891 | unsigned long address, pmd_t *pmd, | |
19892 | pmd_t orig_pmd); | |
19893 | extern pgtable_t get_pmd_huge_pte(struct mm_struct *mm); | |
19894 | extern struct page *follow_trans_huge_pmd(struct mm_struct *mm, | |
19895 | unsigned long addr, | |
19896 | pmd_t *pmd, | |
19897 | unsigned int flags); | |
19898 | extern int zap_huge_pmd(struct mmu_gather *tlb, | |
19899 | struct vm_area_struct *vma, | |
19900 | pmd_t *pmd); | |
19901 | extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |
19902 | unsigned long addr, unsigned long end, | |
19903 | unsigned char *vec); | |
19904 | extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |
19905 | unsigned long addr, pgprot_t newprot); | |
19906 | enum transparent_hugepage_flag { | |
19907 | TRANSPARENT_HUGEPAGE_FLAG, | |
19908 | TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, | |
19909 | TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, | |
19910 | TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, | |
19911 | TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, | |
19912 | }; | |
19913 | enum page_check_address_pmd_flag { | |
19914 | PAGE_CHECK_ADDRESS_PMD_FLAG, | |
19915 | PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, | |
19916 | PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, | |
19917 | }; | |
19918 | extern pmd_t *page_check_address_pmd(struct page *page, | |
19919 | struct mm_struct *mm, | |
19920 | unsigned long address, | |
19921 | enum page_check_address_pmd_flag flag); | |
19922 | extern unsigned long transparent_hugepage_flags; | |
19923 | extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |
19924 | pmd_t *dst_pmd, pmd_t *src_pmd, | |
19925 | struct vm_area_struct *vma, | |
19926 | unsigned long addr, unsigned long end); | |
19927 | extern int handle_pte_fault(struct mm_struct *mm, | |
19928 | struct vm_area_struct *vma, unsigned long address, | |
19929 | pte_t *pte, pmd_t *pmd, unsigned int flags); | |
19930 | extern int split_huge_page(struct page *page); | |
19931 | extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd); | |
19932 | extern int hugepage_madvise(struct vm_area_struct *vma, | |
19933 | unsigned long *vm_flags, int advice); | |
19934 | extern void __vma_adjust_trans_huge(struct vm_area_struct *vma, | |
19935 | unsigned long start, | |
19936 | unsigned long end, | |
19937 | long adjust_next); | |
19938 | static inline __attribute__((always_inline)) void vma_adjust_trans_huge(struct vm_area_struct *vma, | |
19939 | unsigned long start, | |
19940 | unsigned long end, | |
19941 | long adjust_next) | |
19942 | { | |
19943 | if (__builtin_constant_p(((!vma->anon_vma || vma->vm_ops))) ? !!((!vma->anon_vma || vma->vm_ops)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 116, }; ______r = !!((!vma->anon_vma || vma->vm_ops)); ______f.miss_hit[______r]++; ______r; })) | |
19944 | return; | |
19945 | __vma_adjust_trans_huge(vma, start, end, adjust_next); | |
19946 | } | |
19947 | static inline __attribute__((always_inline)) int hpage_nr_pages(struct page *page) | |
19948 | { | |
19949 | if (__builtin_constant_p((((__builtin_constant_p(PageTransHuge(page)) ? !!(PageTransHuge(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 122, }; ______r = __builtin_expect(!!(PageTransHuge(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(PageTransHuge(page)) ? !!(PageTransHuge(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 122, }; ______r = __builtin_expect(!!(PageTransHuge(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 122, }; ______r = !!(((__builtin_constant_p(PageTransHuge(page)) ? !!(PageTransHuge(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 122, }; ______r = __builtin_expect(!!(PageTransHuge(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
19950 | return (1<<(21 -12)); | |
19951 | return 1; | |
19952 | } | |
19953 | static inline __attribute__((always_inline)) struct page *compound_trans_head(struct page *page) | |
19954 | { | |
19955 | if (__builtin_constant_p(((PageTail(page)))) ? !!((PageTail(page))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 128, }; ______r = !!((PageTail(page))); ______f.miss_hit[______r]++; ______r; })) { | |
19956 | struct page *head; | |
19957 | head = page->first_page; | |
19958 | __asm__ __volatile__("": : :"memory"); | |
19959 | if (__builtin_constant_p(((PageTail(page)))) ? !!((PageTail(page))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 138, }; ______r = !!((PageTail(page))); ______f.miss_hit[______r]++; ______r; })) | |
19960 | return head; | |
19961 | } | |
19962 | return page; | |
19963 | } | |
19964 | static inline __attribute__((always_inline)) int put_page_testzero(struct page *page) | |
19965 | { | |
19966 | do { (void)(atomic_read(&page->_count) == 0); } while (0); | |
19967 | return atomic_dec_and_test(&page->_count); | |
19968 | } | |
19969 | static inline __attribute__((always_inline)) int get_page_unless_zero(struct page *page) | |
19970 | { | |
19971 | return atomic_add_unless((&page->_count), 1, 0); | |
19972 | } | |
19973 | extern int page_is_ram(unsigned long pfn); | |
19974 | struct page *vmalloc_to_page(const void *addr); | |
19975 | unsigned long vmalloc_to_pfn(const void *addr); | |
19976 | static inline __attribute__((always_inline)) int is_vmalloc_addr(const void *x) | |
19977 | { | |
19978 | unsigned long addr = (unsigned long)x; | |
19979 | return addr >= ((unsigned long)high_memory + (8 * 1024 * 1024)) && addr < ((((((unsigned long)__FIXADDR_TOP) - (__end_of_fixed_addresses << 12)) - ((1UL) << 12) * (512 + 1)) & (~((1UL << 21) - 1))) - 2 * ((1UL) << 12)); | |
19980 | } | |
19981 | extern int is_vmalloc_or_module_addr(const void *x); | |
19982 | static inline __attribute__((always_inline)) void compound_lock(struct page *page) | |
19983 | { | |
19984 | bit_spin_lock(PG_compound_lock, &page->flags); | |
19985 | } | |
19986 | static inline __attribute__((always_inline)) void compound_unlock(struct page *page) | |
19987 | { | |
19988 | bit_spin_unlock(PG_compound_lock, &page->flags); | |
19989 | } | |
19990 | static inline __attribute__((always_inline)) unsigned long compound_lock_irqsave(struct page *page) | |
19991 | { | |
19992 | unsigned long flags = flags; | |
19993 | do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0); | |
19994 | compound_lock(page); | |
19995 | return flags; | |
19996 | } | |
19997 | static inline __attribute__((always_inline)) void compound_unlock_irqrestore(struct page *page, | |
19998 | unsigned long flags) | |
19999 | { | |
20000 | compound_unlock(page); | |
20001 | do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 347, }; ______r = !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); | |
20002 | } | |
20003 | static inline __attribute__((always_inline)) struct page *compound_head(struct page *page) | |
20004 | { | |
20005 | if (__builtin_constant_p((((__builtin_constant_p(PageTail(page)) ? !!(PageTail(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 353, }; ______r = __builtin_expect(!!(PageTail(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(PageTail(page)) ? !!(PageTail(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 353, }; ______r = __builtin_expect(!!(PageTail(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 353, }; ______r = !!(((__builtin_constant_p(PageTail(page)) ? !!(PageTail(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 353, }; ______r = __builtin_expect(!!(PageTail(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
20006 | return page->first_page; | |
20007 | return page; | |
20008 | } | |
20009 | static inline __attribute__((always_inline)) int page_count(struct page *page) | |
20010 | { | |
20011 | return atomic_read(&compound_head(page)->_count); | |
20012 | } | |
20013 | static inline __attribute__((always_inline)) void get_page(struct page *page) | |
20014 | { | |
20015 | do { (void)(atomic_read(&page->_count) < !PageTail(page)); } while (0); | |
20016 | atomic_inc(&page->_count); | |
20017 | if (__builtin_constant_p((((__builtin_constant_p(PageTail(page)) ? !!(PageTail(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 379, }; ______r = __builtin_expect(!!(PageTail(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(PageTail(page)) ? !!(PageTail(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 379, }; ______r = __builtin_expect(!!(PageTail(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 379, }; ______r = !!(((__builtin_constant_p(PageTail(page)) ? !!(PageTail(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 379, }; ______r = __builtin_expect(!!(PageTail(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) { | |
20018 | do { (void)(atomic_read(&page->first_page->_count) <= 0); } while (0); | |
20019 | atomic_inc(&page->first_page->_count); | |
20020 | } | |
20021 | } | |
20022 | static inline __attribute__((always_inline)) struct page *virt_to_head_page(const void *x) | |
20023 | { | |
20024 | struct page *page = (mem_map + (((((unsigned long)(x)) - ((unsigned long)(0xC0000000UL))) >> 12) - (0UL))); | |
20025 | return compound_head(page); | |
20026 | } | |
20027 | static inline __attribute__((always_inline)) void init_page_count(struct page *page) | |
20028 | { | |
20029 | atomic_set(&page->_count, 1); | |
20030 | } | |
20031 | static inline __attribute__((always_inline)) int PageBuddy(struct page *page) | |
20032 | { | |
20033 | return atomic_read(&page->_mapcount) == (-128); | |
20034 | } | |
20035 | static inline __attribute__((always_inline)) void __SetPageBuddy(struct page *page) | |
20036 | { | |
20037 | do { (void)(atomic_read(&page->_mapcount) != -1); } while (0); | |
20038 | atomic_set(&page->_mapcount, (-128)); | |
20039 | } | |
20040 | static inline __attribute__((always_inline)) void __ClearPageBuddy(struct page *page) | |
20041 | { | |
20042 | do { (void)(!PageBuddy(page)); } while (0); | |
20043 | atomic_set(&page->_mapcount, -1); | |
20044 | } | |
20045 | void put_page(struct page *page); | |
20046 | void put_pages_list(struct list_head *pages); | |
20047 | void split_page(struct page *page, unsigned int order); | |
20048 | int split_free_page(struct page *page); | |
20049 | typedef void compound_page_dtor(struct page *); | |
20050 | static inline __attribute__((always_inline)) void set_compound_page_dtor(struct page *page, | |
20051 | compound_page_dtor *dtor) | |
20052 | { | |
20053 | page[1].lru.next = (void *)dtor; | |
20054 | } | |
20055 | static inline __attribute__((always_inline)) compound_page_dtor *get_compound_page_dtor(struct page *page) | |
20056 | { | |
20057 | return (compound_page_dtor *)page[1].lru.next; | |
20058 | } | |
20059 | static inline __attribute__((always_inline)) int compound_order(struct page *page) | |
20060 | { | |
20061 | if (__builtin_constant_p(((!PageHead(page)))) ? !!((!PageHead(page))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 459, }; ______r = !!((!PageHead(page))); ______f.miss_hit[______r]++; ______r; })) | |
20062 | return 0; | |
20063 | return (unsigned long)page[1].lru.prev; | |
20064 | } | |
20065 | static inline __attribute__((always_inline)) int compound_trans_order(struct page *page) | |
20066 | { | |
20067 | int order; | |
20068 | unsigned long flags; | |
20069 | if (__builtin_constant_p(((!PageHead(page)))) ? !!((!PageHead(page))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 469, }; ______r = !!((!PageHead(page))); ______f.miss_hit[______r]++; ______r; })) | |
20070 | return 0; | |
20071 | flags = compound_lock_irqsave(page); | |
20072 | order = compound_order(page); | |
20073 | compound_unlock_irqrestore(page, flags); | |
20074 | return order; | |
20075 | } | |
20076 | static inline __attribute__((always_inline)) void set_compound_order(struct page *page, unsigned long order) | |
20077 | { | |
20078 | page[1].lru.prev = (void *)order; | |
20079 | } | |
20080 | static inline __attribute__((always_inline)) pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) | |
20081 | { | |
20082 | if (__builtin_constant_p((((__builtin_constant_p(vma->vm_flags & 0x00000002) ? !!(vma->vm_flags & 0x00000002) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 492, }; ______r = __builtin_expect(!!(vma->vm_flags & 0x00000002), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(vma->vm_flags & 0x00000002) ? !!(vma->vm_flags & 0x00000002) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 492, }; ______r = __builtin_expect(!!(vma->vm_flags & 0x00000002), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 492, }; ______r = !!(((__builtin_constant_p(vma->vm_flags & 0x00000002) ? !!(vma->vm_flags & 0x00000002) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 492, }; ______r = __builtin_expect(!!(vma->vm_flags & 0x00000002), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
20083 | pte = pte_mkwrite(pte); | |
20084 | return pte; | |
20085 | } | |
20086 | static inline __attribute__((always_inline)) enum zone_type page_zonenum(struct page *page) | |
20087 | { | |
20088 | return (page->flags >> (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0))) & ((1UL << 2) - 1); | |
20089 | } | |
20090 | static inline __attribute__((always_inline)) int page_zone_id(struct page *page) | |
20091 | { | |
20092 | return (page->flags >> ((((((sizeof(unsigned long)*8) - 0) - 0) < ((((sizeof(unsigned long)*8) - 0) - 0) - 2))? (((sizeof(unsigned long)*8) - 0) - 0) : ((((sizeof(unsigned long)*8) - 0) - 0) - 2)) * ((0 + 2) != 0))) & ((1UL << (0 + 2)) - 1); | |
20093 | } | |
20094 | static inline __attribute__((always_inline)) int zone_to_nid(struct zone *zone) | |
20095 | { | |
20096 | return 0; | |
20097 | } | |
20098 | static inline __attribute__((always_inline)) int page_to_nid(struct page *page) | |
20099 | { | |
20100 | return (page->flags >> ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0))) & ((1UL << 0) - 1); | |
20101 | } | |
20102 | static inline __attribute__((always_inline)) struct zone *page_zone(struct page *page) | |
20103 | { | |
20104 | return &(&contig_page_data)->node_zones[page_zonenum(page)]; | |
20105 | } | |
20106 | static inline __attribute__((always_inline)) void set_page_zone(struct page *page, enum zone_type zone) | |
20107 | { | |
20108 | page->flags &= ~(((1UL << 2) - 1) << (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0))); | |
20109 | page->flags |= (zone & ((1UL << 2) - 1)) << (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0)); | |
20110 | } | |
20111 | static inline __attribute__((always_inline)) void set_page_node(struct page *page, unsigned long node) | |
20112 | { | |
20113 | page->flags &= ~(((1UL << 0) - 1) << ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0))); | |
20114 | page->flags |= (node & ((1UL << 0) - 1)) << ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0)); | |
20115 | } | |
20116 | static inline __attribute__((always_inline)) void set_page_links(struct page *page, enum zone_type zone, | |
20117 | unsigned long node, unsigned long pfn) | |
20118 | { | |
20119 | set_page_zone(page, zone); | |
20120 | set_page_node(page, node); | |
20121 | } | |
20122 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |
20123 | PGALLOC_DMA, PGALLOC_NORMAL , PGALLOC_HIGH , PGALLOC_MOVABLE, | |
20124 | PGFREE, PGACTIVATE, PGDEACTIVATE, | |
20125 | PGFAULT, PGMAJFAULT, | |
20126 | PGREFILL_DMA, PGREFILL_NORMAL , PGREFILL_HIGH , PGREFILL_MOVABLE, | |
20127 | PGSTEAL_DMA, PGSTEAL_NORMAL , PGSTEAL_HIGH , PGSTEAL_MOVABLE, | |
20128 | PGSCAN_KSWAPD_DMA, PGSCAN_KSWAPD_NORMAL , PGSCAN_KSWAPD_HIGH , PGSCAN_KSWAPD_MOVABLE, | |
20129 | PGSCAN_DIRECT_DMA, PGSCAN_DIRECT_NORMAL , PGSCAN_DIRECT_HIGH , PGSCAN_DIRECT_MOVABLE, | |
20130 | PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, | |
20131 | KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, | |
20132 | KSWAPD_SKIP_CONGESTION_WAIT, | |
20133 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, | |
20134 | COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED, | |
20135 | COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, | |
20136 | UNEVICTABLE_PGCULLED, | |
20137 | UNEVICTABLE_PGSCANNED, | |
20138 | UNEVICTABLE_PGRESCUED, | |
20139 | UNEVICTABLE_PGMLOCKED, | |
20140 | UNEVICTABLE_PGMUNLOCKED, | |
20141 | UNEVICTABLE_PGCLEARED, | |
20142 | UNEVICTABLE_PGSTRANDED, | |
20143 | UNEVICTABLE_MLOCKFREED, | |
20144 | THP_FAULT_ALLOC, | |
20145 | THP_FAULT_FALLBACK, | |
20146 | THP_COLLAPSE_ALLOC, | |
20147 | THP_COLLAPSE_ALLOC_FAILED, | |
20148 | THP_SPLIT, | |
20149 | NR_VM_EVENT_ITEMS | |
20150 | }; | |
20151 | extern int sysctl_stat_interval; | |
20152 | struct vm_event_state { | |
20153 | unsigned long event[NR_VM_EVENT_ITEMS]; | |
20154 | }; | |
20155 | extern __attribute__((section(".data..percpu" ""))) __typeof__(struct vm_event_state) vm_event_states; | |
20156 | static inline __attribute__((always_inline)) void __count_vm_event(enum vm_event_item item) | |
20157 | { | |
20158 | do { do { const void *__vpp_verify = (typeof(&(((vm_event_states.event[item])))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((vm_event_states.event[item])))) { case 1: do { typedef typeof((((vm_event_states.event[item])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((vm_event_states.event[item]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((((vm_event_states.event[item])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((vm_event_states.event[item]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((((vm_event_states.event[item])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((vm_event_states.event[item]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((((vm_event_states.event[item])))))); (typeof(*(&((((vm_event_states.event[item])))))) *)tcp_ptr__; }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); | |
20159 | } | |
20160 | static inline __attribute__((always_inline)) void count_vm_event(enum vm_event_item item) | |
20161 | { | |
20162 | do { do { const void *__vpp_verify = (typeof(&(((vm_event_states.event[item])))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((vm_event_states.event[item])))) { case 1: do { typedef typeof((((vm_event_states.event[item])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((vm_event_states.event[item]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((((vm_event_states.event[item])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((vm_event_states.event[item]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((((vm_event_states.event[item])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((vm_event_states.event[item]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((((vm_event_states.event[item])))))); (typeof(*(&((((vm_event_states.event[item])))))) *)tcp_ptr__; }) += ((1)); do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); | |
20163 | } | |
20164 | static inline __attribute__((always_inline)) void __count_vm_events(enum vm_event_item item, long delta) | |
20165 | { | |
20166 | do { do { const void *__vpp_verify = (typeof(&((vm_event_states.event[item]))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((vm_event_states.event[item]))) { case 1: do { typedef typeof(((vm_event_states.event[item]))) pao_T__; const int pao_ID__ = (__builtin_constant_p((delta)) && (((delta)) == 1 || ((delta)) == -1)) ? ((delta)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((delta)); (void)pao_tmp__; } switch (sizeof(((vm_event_states.event[item])))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "qi" ((pao_T__)((delta)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "re" ((pao_T__)((delta)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof(((vm_event_states.event[item]))) pao_T__; const int pao_ID__ = (__builtin_constant_p((delta)) && (((delta)) == 1 || ((delta)) == -1)) ? ((delta)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((delta)); (void)pao_tmp__; } switch (sizeof(((vm_event_states.event[item])))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "qi" ((pao_T__)((delta)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "re" ((pao_T__)((delta)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof(((vm_event_states.event[item]))) pao_T__; const int pao_ID__ = (__builtin_constant_p((delta)) && (((delta)) == 1 || ((delta)) == -1)) ? ((delta)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((delta)); (void)pao_tmp__; } switch (sizeof(((vm_event_states.event[item])))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "qi" ((pao_T__)((delta)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "re" ((pao_T__)((delta)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&(((vm_event_states.event[item])))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&(((vm_event_states.event[item]))))); (typeof(*(&(((vm_event_states.event[item]))))) *)tcp_ptr__; }) += ((delta)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); | |
20167 | } | |
20168 | static inline __attribute__((always_inline)) void count_vm_events(enum vm_event_item item, long delta) | |
20169 | { | |
20170 | do { do { const void *__vpp_verify = (typeof(&((vm_event_states.event[item]))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((vm_event_states.event[item]))) { case 1: do { typedef typeof(((vm_event_states.event[item]))) pao_T__; const int pao_ID__ = (__builtin_constant_p((delta)) && (((delta)) == 1 || ((delta)) == -1)) ? ((delta)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((delta)); (void)pao_tmp__; } switch (sizeof(((vm_event_states.event[item])))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "qi" ((pao_T__)((delta)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "re" ((pao_T__)((delta)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof(((vm_event_states.event[item]))) pao_T__; const int pao_ID__ = (__builtin_constant_p((delta)) && (((delta)) == 1 || ((delta)) == -1)) ? ((delta)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((delta)); (void)pao_tmp__; } switch (sizeof(((vm_event_states.event[item])))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "qi" ((pao_T__)((delta)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "re" ((pao_T__)((delta)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof(((vm_event_states.event[item]))) pao_T__; const int pao_ID__ = (__builtin_constant_p((delta)) && (((delta)) == 1 || ((delta)) == -1)) ? ((delta)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((delta)); (void)pao_tmp__; } switch (sizeof(((vm_event_states.event[item])))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "qi" ((pao_T__)((delta)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "re" ((pao_T__)((delta)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&(((vm_event_states.event[item])))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&(((vm_event_states.event[item]))))); (typeof(*(&(((vm_event_states.event[item]))))) *)tcp_ptr__; }) += ((delta)); do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); | |
20171 | } | |
20172 | extern void all_vm_events(unsigned long *); | |
20173 | extern void vm_events_fold_cpu(int cpu); | |
20174 | extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | |
20175 | static inline __attribute__((always_inline)) void zone_page_state_add(long x, struct zone *zone, | |
20176 | enum zone_stat_item item) | |
20177 | { | |
20178 | atomic_long_add(x, &zone->vm_stat[item]); | |
20179 | atomic_long_add(x, &vm_stat[item]); | |
20180 | } | |
20181 | static inline __attribute__((always_inline)) unsigned long global_page_state(enum zone_stat_item item) | |
20182 | { | |
20183 | long x = atomic_long_read(&vm_stat[item]); | |
20184 | if (__builtin_constant_p(((x < 0))) ? !!((x < 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 103, }; ______r = !!((x < 0)); ______f.miss_hit[______r]++; ______r; })) | |
20185 | x = 0; | |
20186 | return x; | |
20187 | } | |
20188 | static inline __attribute__((always_inline)) unsigned long zone_page_state(struct zone *zone, | |
20189 | enum zone_stat_item item) | |
20190 | { | |
20191 | long x = atomic_long_read(&zone->vm_stat[item]); | |
20192 | if (__builtin_constant_p(((x < 0))) ? !!((x < 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 114, }; ______r = !!((x < 0)); ______f.miss_hit[______r]++; ______r; })) | |
20193 | x = 0; | |
20194 | return x; | |
20195 | } | |
20196 | static inline __attribute__((always_inline)) unsigned long zone_page_state_snapshot(struct zone *zone, | |
20197 | enum zone_stat_item item) | |
20198 | { | |
20199 | long x = atomic_long_read(&zone->vm_stat[item]); | |
20200 | int cpu; | |
20201 | for (((cpu)) = -1; ((cpu)) = cpumask_next(((cpu)), (cpu_online_mask)), ((cpu)) < nr_cpu_ids;) | |
20202 | x += ({ do { const void *__vpp_verify = (typeof(((zone->pageset))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((zone->pageset))) *)((zone->pageset)))); (typeof((typeof(*((zone->pageset))) *)((zone->pageset)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->vm_stat_diff[item]; | |
20203 | if (__builtin_constant_p(((x < 0))) ? !!((x < 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 136, }; ______r = !!((x < 0)); ______f.miss_hit[______r]++; ______r; })) | |
20204 | x = 0; | |
20205 | return x; | |
20206 | } | |
20207 | extern unsigned long global_reclaimable_pages(void); | |
20208 | extern unsigned long zone_reclaimable_pages(struct zone *zone); | |
20209 | static inline __attribute__((always_inline)) void zap_zone_vm_stats(struct zone *zone) | |
20210 | { | |
20211 | __builtin_memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); | |
20212 | } | |
20213 | extern void inc_zone_state(struct zone *, enum zone_stat_item); | |
20214 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); | |
20215 | void __inc_zone_page_state(struct page *, enum zone_stat_item); | |
20216 | void __dec_zone_page_state(struct page *, enum zone_stat_item); | |
20217 | void mod_zone_page_state(struct zone *, enum zone_stat_item, int); | |
20218 | void inc_zone_page_state(struct page *, enum zone_stat_item); | |
20219 | void dec_zone_page_state(struct page *, enum zone_stat_item); | |
20220 | extern void inc_zone_state(struct zone *, enum zone_stat_item); | |
20221 | extern void __inc_zone_state(struct zone *, enum zone_stat_item); | |
20222 | extern void dec_zone_state(struct zone *, enum zone_stat_item); | |
20223 | extern void __dec_zone_state(struct zone *, enum zone_stat_item); | |
20224 | void refresh_cpu_vm_stats(int); | |
20225 | void refresh_zone_stat_thresholds(void); | |
20226 | int calculate_pressure_threshold(struct zone *zone); | |
20227 | int calculate_normal_threshold(struct zone *zone); | |
20228 | void set_pgdat_percpu_threshold(pg_data_t *pgdat, | |
20229 | int (*calculate_pressure)(struct zone *)); | |
20230 | extern const char * const vmstat_text[]; | |
20231 | static inline __attribute__((always_inline)) __attribute__((always_inline)) void *lowmem_page_address(struct page *page) | |
20232 | { | |
20233 | return ((void *)((unsigned long)(((phys_addr_t)(((unsigned long)((page) - mem_map) + (0UL))) << 12))+((unsigned long)(0xC0000000UL)))); | |
20234 | } | |
20235 | void *page_address(struct page *page); | |
20236 | void set_page_address(struct page *page, void *virtual); | |
20237 | void page_address_init(void); | |
20238 | extern struct address_space swapper_space; | |
20239 | static inline __attribute__((always_inline)) struct address_space *page_mapping(struct page *page) | |
20240 | { | |
20241 | struct address_space *mapping = page->mapping; | |
20242 | do { (void)(PageSlab(page)); } while (0); | |
20243 | if (__builtin_constant_p((((__builtin_constant_p(PageSwapCache(page)) ? !!(PageSwapCache(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 776, }; ______r = __builtin_expect(!!(PageSwapCache(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(PageSwapCache(page)) ? !!(PageSwapCache(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 776, }; ______r = __builtin_expect(!!(PageSwapCache(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 776, }; ______r = !!(((__builtin_constant_p(PageSwapCache(page)) ? !!(PageSwapCache(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 776, }; ______r = __builtin_expect(!!(PageSwapCache(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
20244 | mapping = &swapper_space; | |
20245 | else if (__builtin_constant_p((((unsigned long)mapping & 1))) ? !!(((unsigned long)mapping & 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 778, }; ______r = !!(((unsigned long)mapping & 1)); ______f.miss_hit[______r]++; ______r; })) | |
20246 | mapping = ((void *)0); | |
20247 | return mapping; | |
20248 | } | |
20249 | static inline __attribute__((always_inline)) void *page_rmapping(struct page *page) | |
20250 | { | |
20251 | return (void *)((unsigned long)page->mapping & ~(1 | 2)); | |
20252 | } | |
20253 | static inline __attribute__((always_inline)) int PageAnon(struct page *page) | |
20254 | { | |
20255 | return ((unsigned long)page->mapping & 1) != 0; | |
20256 | } | |
20257 | static inline __attribute__((always_inline)) unsigned long page_index(struct page *page) | |
20258 | { | |
20259 | if (__builtin_constant_p((((__builtin_constant_p(PageSwapCache(page)) ? !!(PageSwapCache(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 800, }; ______r = __builtin_expect(!!(PageSwapCache(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(PageSwapCache(page)) ? !!(PageSwapCache(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 800, }; ______r = __builtin_expect(!!(PageSwapCache(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 800, }; ______r = !!(((__builtin_constant_p(PageSwapCache(page)) ? !!(PageSwapCache(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 800, }; ______r = __builtin_expect(!!(PageSwapCache(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
20260 | return ((page)->private); | |
20261 | return page->index; | |
20262 | } | |
20263 | static inline __attribute__((always_inline)) void reset_page_mapcount(struct page *page) | |
20264 | { | |
20265 | atomic_set(&(page)->_mapcount, -1); | |
20266 | } | |
20267 | static inline __attribute__((always_inline)) int page_mapcount(struct page *page) | |
20268 | { | |
20269 | return atomic_read(&(page)->_mapcount) + 1; | |
20270 | } | |
20271 | static inline __attribute__((always_inline)) int page_mapped(struct page *page) | |
20272 | { | |
20273 | return atomic_read(&(page)->_mapcount) >= 0; | |
20274 | } | |
20275 | extern void pagefault_out_of_memory(void); | |
20276 | extern void show_free_areas(unsigned int flags); | |
20277 | extern bool skip_free_areas_node(unsigned int flags, int nid); | |
20278 | int shmem_lock(struct file *file, int lock, struct user_struct *user); | |
20279 | struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); | |
20280 | int shmem_zero_setup(struct vm_area_struct *); | |
20281 | extern int can_do_mlock(void); | |
20282 | extern int user_shm_lock(size_t, struct user_struct *); | |
20283 | extern void user_shm_unlock(size_t, struct user_struct *); | |
20284 | struct zap_details { | |
20285 | struct vm_area_struct *nonlinear_vma; | |
20286 | struct address_space *check_mapping; | |
20287 | unsigned long first_index; | |
20288 | unsigned long last_index; | |
20289 | }; | |
20290 | struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, | |
20291 | pte_t pte); | |
20292 | int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, | |
20293 | unsigned long size); | |
20294 | unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, | |
20295 | unsigned long size, struct zap_details *); | |
20296 | unsigned long unmap_vmas(struct mmu_gather *tlb, | |
20297 | struct vm_area_struct *start_vma, unsigned long start_addr, | |
20298 | unsigned long end_addr, unsigned long *nr_accounted, | |
20299 | struct zap_details *); | |
20300 | struct mm_walk { | |
20301 | int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *); | |
20302 | int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *); | |
20303 | int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); | |
20304 | int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); | |
20305 | int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); | |
20306 | int (*hugetlb_entry)(pte_t *, unsigned long, | |
20307 | unsigned long, unsigned long, struct mm_walk *); | |
20308 | struct mm_struct *mm; | |
20309 | void *private; | |
20310 | }; | |
20311 | int walk_page_range(unsigned long addr, unsigned long end, | |
20312 | struct mm_walk *walk); | |
20313 | void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, | |
20314 | unsigned long end, unsigned long floor, unsigned long ceiling); | |
20315 | int copy_page_range(struct mm_struct *dst, struct mm_struct *src, | |
20316 | struct vm_area_struct *vma); | |
20317 | void unmap_mapping_range(struct address_space *mapping, | |
20318 | loff_t const holebegin, loff_t const holelen, int even_cows); | |
20319 | int follow_pfn(struct vm_area_struct *vma, unsigned long address, | |
20320 | unsigned long *pfn); | |
20321 | int follow_phys(struct vm_area_struct *vma, unsigned long address, | |
20322 | unsigned int flags, unsigned long *prot, resource_size_t *phys); | |
20323 | int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, | |
20324 | void *buf, int len, int write); | |
20325 | static inline __attribute__((always_inline)) void unmap_shared_mapping_range(struct address_space *mapping, | |
20326 | loff_t const holebegin, loff_t const holelen) | |
20327 | { | |
20328 | unmap_mapping_range(mapping, holebegin, holelen, 0); | |
20329 | } | |
20330 | extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new); | |
20331 | extern void truncate_setsize(struct inode *inode, loff_t newsize); | |
20332 | extern int vmtruncate(struct inode *inode, loff_t offset); | |
20333 | extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end); | |
20334 | int truncate_inode_page(struct address_space *mapping, struct page *page); | |
20335 | int generic_error_remove_page(struct address_space *mapping, struct page *page); | |
20336 | int invalidate_inode_page(struct page *page); | |
20337 | extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |
20338 | unsigned long address, unsigned int flags); | |
20339 | extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, | |
20340 | unsigned long address, unsigned int fault_flags); | |
20341 | extern int make_pages_present(unsigned long addr, unsigned long end); | |
20342 | extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); | |
20343 | extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, | |
20344 | void *buf, int len, int write); | |
20345 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |
20346 | unsigned long start, int len, unsigned int foll_flags, | |
20347 | struct page **pages, struct vm_area_struct **vmas, | |
20348 | int *nonblocking); | |
20349 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |
20350 | unsigned long start, int nr_pages, int write, int force, | |
20351 | struct page **pages, struct vm_area_struct **vmas); | |
20352 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, | |
20353 | struct page **pages); | |
20354 | struct page *get_dump_page(unsigned long addr); | |
20355 | extern int try_to_release_page(struct page * page, gfp_t gfp_mask); | |
20356 | extern void do_invalidatepage(struct page *page, unsigned long offset); | |
20357 | int __set_page_dirty_nobuffers(struct page *page); | |
20358 | int __set_page_dirty_no_writeback(struct page *page); | |
20359 | int redirty_page_for_writepage(struct writeback_control *wbc, | |
20360 | struct page *page); | |
20361 | void account_page_dirtied(struct page *page, struct address_space *mapping); | |
20362 | void account_page_writeback(struct page *page); | |
20363 | int set_page_dirty(struct page *page); | |
20364 | int set_page_dirty_lock(struct page *page); | |
20365 | int clear_page_dirty_for_io(struct page *page); | |
20366 | static inline __attribute__((always_inline)) int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) | |
20367 | { | |
20368 | return vma && (vma->vm_end == addr) && (vma->vm_flags & 0x00000100); | |
20369 | } | |
20370 | static inline __attribute__((always_inline)) int stack_guard_page_start(struct vm_area_struct *vma, | |
20371 | unsigned long addr) | |
20372 | { | |
20373 | return (vma->vm_flags & 0x00000100) && | |
20374 | (vma->vm_start == addr) && | |
20375 | !vma_growsdown(vma->vm_prev, addr); | |
20376 | } | |
20377 | static inline __attribute__((always_inline)) int vma_growsup(struct vm_area_struct *vma, unsigned long addr) | |
20378 | { | |
20379 | return vma && (vma->vm_start == addr) && (vma->vm_flags & 0x00000000); | |
20380 | } | |
20381 | static inline __attribute__((always_inline)) int stack_guard_page_end(struct vm_area_struct *vma, | |
20382 | unsigned long addr) | |
20383 | { | |
20384 | return (vma->vm_flags & 0x00000000) && | |
20385 | (vma->vm_end == addr) && | |
20386 | !vma_growsup(vma->vm_next, addr); | |
20387 | } | |
20388 | extern unsigned long move_page_tables(struct vm_area_struct *vma, | |
20389 | unsigned long old_addr, struct vm_area_struct *new_vma, | |
20390 | unsigned long new_addr, unsigned long len); | |
20391 | extern unsigned long do_mremap(unsigned long addr, | |
20392 | unsigned long old_len, unsigned long new_len, | |
20393 | unsigned long flags, unsigned long new_addr); | |
20394 | extern int mprotect_fixup(struct vm_area_struct *vma, | |
20395 | struct vm_area_struct **pprev, unsigned long start, | |
20396 | unsigned long end, unsigned long newflags); | |
20397 | int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | |
20398 | struct page **pages); | |
20399 | static inline __attribute__((always_inline)) void set_mm_counter(struct mm_struct *mm, int member, long value) | |
20400 | { | |
20401 | atomic_long_set(&mm->rss_stat.count[member], value); | |
20402 | } | |
20403 | static inline __attribute__((always_inline)) unsigned long get_mm_counter(struct mm_struct *mm, int member) | |
20404 | { | |
20405 | return atomic_long_read(&mm->rss_stat.count[member]); | |
20406 | } | |
20407 | static inline __attribute__((always_inline)) void add_mm_counter(struct mm_struct *mm, int member, long value) | |
20408 | { | |
20409 | atomic_long_add(value, &mm->rss_stat.count[member]); | |
20410 | } | |
20411 | static inline __attribute__((always_inline)) void inc_mm_counter(struct mm_struct *mm, int member) | |
20412 | { | |
20413 | atomic_long_inc(&mm->rss_stat.count[member]); | |
20414 | } | |
20415 | static inline __attribute__((always_inline)) void dec_mm_counter(struct mm_struct *mm, int member) | |
20416 | { | |
20417 | atomic_long_dec(&mm->rss_stat.count[member]); | |
20418 | } | |
20419 | static inline __attribute__((always_inline)) unsigned long get_mm_rss(struct mm_struct *mm) | |
20420 | { | |
20421 | return get_mm_counter(mm, MM_FILEPAGES) + | |
20422 | get_mm_counter(mm, MM_ANONPAGES); | |
20423 | } | |
20424 | static inline __attribute__((always_inline)) unsigned long get_mm_hiwater_rss(struct mm_struct *mm) | |
20425 | { | |
20426 | return ({ typeof(mm->hiwater_rss) _max1 = (mm->hiwater_rss); typeof(get_mm_rss(mm)) _max2 = (get_mm_rss(mm)); (void) (&_max1 == &_max2); _max1 > _max2 ? _max1 : _max2; }); | |
20427 | } | |
20428 | static inline __attribute__((always_inline)) unsigned long get_mm_hiwater_vm(struct mm_struct *mm) | |
20429 | { | |
20430 | return ({ typeof(mm->hiwater_vm) _max1 = (mm->hiwater_vm); typeof(mm->total_vm) _max2 = (mm->total_vm); (void) (&_max1 == &_max2); _max1 > _max2 ? _max1 : _max2; }); | |
20431 | } | |
20432 | static inline __attribute__((always_inline)) void update_hiwater_rss(struct mm_struct *mm) | |
20433 | { | |
20434 | unsigned long _rss = get_mm_rss(mm); | |
20435 | if (__builtin_constant_p((((mm)->hiwater_rss < _rss))) ? !!(((mm)->hiwater_rss < _rss)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1107, }; ______r = !!(((mm)->hiwater_rss < _rss)); ______f.miss_hit[______r]++; ______r; })) | |
20436 | (mm)->hiwater_rss = _rss; | |
20437 | } | |
20438 | static inline __attribute__((always_inline)) void update_hiwater_vm(struct mm_struct *mm) | |
20439 | { | |
20440 | if (__builtin_constant_p(((mm->hiwater_vm < mm->total_vm))) ? !!((mm->hiwater_vm < mm->total_vm)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1113, }; ______r = !!((mm->hiwater_vm < mm->total_vm)); ______f.miss_hit[______r]++; ______r; })) | |
20441 | mm->hiwater_vm = mm->total_vm; | |
20442 | } | |
20443 | static inline __attribute__((always_inline)) void setmax_mm_hiwater_rss(unsigned long *maxrss, | |
20444 | struct mm_struct *mm) | |
20445 | { | |
20446 | unsigned long hiwater_rss = get_mm_hiwater_rss(mm); | |
20447 | if (__builtin_constant_p(((*maxrss < hiwater_rss))) ? !!((*maxrss < hiwater_rss)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1122, }; ______r = !!((*maxrss < hiwater_rss)); ______f.miss_hit[______r]++; ______r; })) | |
20448 | *maxrss = hiwater_rss; | |
20449 | } | |
20450 | static inline __attribute__((always_inline)) void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) | |
20451 | { | |
20452 | } | |
20453 | struct shrink_control { | |
20454 | gfp_t gfp_mask; | |
20455 | unsigned long nr_to_scan; | |
20456 | }; | |
20457 | struct shrinker { | |
20458 | int (*shrink)(struct shrinker *, struct shrink_control *sc); | |
20459 | int seeks; | |
20460 | struct list_head list; | |
20461 | long nr; | |
20462 | }; | |
20463 | extern void register_shrinker(struct shrinker *); | |
20464 | extern void unregister_shrinker(struct shrinker *); | |
20465 | int vma_wants_writenotify(struct vm_area_struct *vma); | |
20466 | extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, | |
20467 | spinlock_t **ptl); | |
20468 | static inline __attribute__((always_inline)) pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, | |
20469 | spinlock_t **ptl) | |
20470 | { | |
20471 | pte_t *ptep; | |
20472 | (ptep = __get_locked_pte(mm, addr, ptl)); | |
20473 | return ptep; | |
20474 | } | |
20475 | static inline __attribute__((always_inline)) int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, | |
20476 | unsigned long address) | |
20477 | { | |
20478 | return 0; | |
20479 | } | |
20480 | int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); | |
20481 | int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, | |
20482 | pmd_t *pmd, unsigned long address); | |
20483 | int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); | |
20484 | static inline __attribute__((always_inline)) pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) | |
20485 | { | |
20486 | return ((__builtin_constant_p(pgd_none(*pgd)) ? !!(pgd_none(*pgd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1215, }; ______r = __builtin_expect(!!(pgd_none(*pgd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })) && __pud_alloc(mm, pgd, address))? | |
20487 | ((void *)0): pud_offset(pgd, address); | |
20488 | } | |
20489 | static inline __attribute__((always_inline)) pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) | |
20490 | { | |
20491 | return ((__builtin_constant_p(pud_none(*pud)) ? !!(pud_none(*pud)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1221, }; ______r = __builtin_expect(!!(pud_none(*pud)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })) && __pmd_alloc(mm, pud, address))? | |
20492 | ((void *)0): pmd_offset(pud, address); | |
20493 | } | |
20494 | static inline __attribute__((always_inline)) void pgtable_page_ctor(struct page *page) | |
20495 | { | |
20496 | do {} while (0); | |
20497 | inc_zone_page_state(page, NR_PAGETABLE); | |
20498 | } | |
20499 | static inline __attribute__((always_inline)) void pgtable_page_dtor(struct page *page) | |
20500 | { | |
20501 | do {} while (0); | |
20502 | dec_zone_page_state(page, NR_PAGETABLE); | |
20503 | } | |
20504 | extern void free_area_init(unsigned long * zones_size); | |
20505 | extern void free_area_init_node(int nid, unsigned long * zones_size, | |
20506 | unsigned long zone_start_pfn, unsigned long *zholes_size); | |
20507 | extern void free_area_init_nodes(unsigned long *max_zone_pfn); | |
20508 | extern void add_active_range(unsigned int nid, unsigned long start_pfn, | |
20509 | unsigned long end_pfn); | |
20510 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, | |
20511 | unsigned long end_pfn); | |
20512 | extern void remove_all_active_ranges(void); | |
20513 | void sort_node_map(void); | |
20514 | unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, | |
20515 | unsigned long end_pfn); | |
20516 | extern unsigned long absent_pages_in_range(unsigned long start_pfn, | |
20517 | unsigned long end_pfn); | |
20518 | extern void get_pfn_range_for_nid(unsigned int nid, | |
20519 | unsigned long *start_pfn, unsigned long *end_pfn); | |
20520 | extern unsigned long find_min_pfn_with_active_regions(void); | |
20521 | extern void free_bootmem_with_active_regions(int nid, | |
20522 | unsigned long max_low_pfn); | |
20523 | int add_from_early_node_map(struct range *range, int az, | |
20524 | int nr_range, int nid); | |
20525 | u64 __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) find_memory_core_early(int nid, u64 size, u64 align, | |
20526 | u64 goal, u64 limit); | |
20527 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); | |
20528 | extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); | |
20529 | extern void sparse_memory_present_with_active_regions(int nid); | |
20530 | extern int __attribute__ ((__section__(".meminit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) early_pfn_to_nid(unsigned long pfn); | |
20531 | extern void set_dma_reserve(unsigned long new_dma_reserve); | |
20532 | extern void memmap_init_zone(unsigned long, int, unsigned long, | |
20533 | unsigned long, enum memmap_context); | |
20534 | extern void setup_per_zone_wmarks(void); | |
20535 | extern int __attribute__ ((__section__(".meminit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) init_per_zone_wmark_min(void); | |
20536 | extern void mem_init(void); | |
20537 | extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) mmap_init(void); | |
20538 | extern void show_mem(unsigned int flags); | |
20539 | extern void si_meminfo(struct sysinfo * val); | |
20540 | extern void si_meminfo_node(struct sysinfo *val, int nid); | |
20541 | extern int after_bootmem; | |
20542 | extern void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...); | |
20543 | extern void setup_per_cpu_pageset(void); | |
20544 | extern void zone_pcp_update(struct zone *zone); | |
20545 | extern atomic_long_t mmap_pages_allocated; | |
20546 | extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); | |
20547 | void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); | |
20548 | void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *); | |
20549 | void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *); | |
20550 | struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma, | |
20551 | struct prio_tree_iter *iter); | |
20552 | static inline __attribute__((always_inline)) void vma_nonlinear_insert(struct vm_area_struct *vma, | |
20553 | struct list_head *list) | |
20554 | { | |
20555 | vma->shared.vm_set.parent = ((void *)0); | |
20556 | list_add_tail(&vma->shared.vm_set.list, list); | |
20557 | } | |
20558 | extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); | |
20559 | extern int vma_adjust(struct vm_area_struct *vma, unsigned long start, | |
20560 | unsigned long end, unsigned long pgoff, struct vm_area_struct *insert); | |
20561 | extern struct vm_area_struct *vma_merge(struct mm_struct *, | |
20562 | struct vm_area_struct *prev, unsigned long addr, unsigned long end, | |
20563 | unsigned long vm_flags, struct anon_vma *, struct file *, unsigned long, | |
20564 | struct mempolicy *); | |
20565 | extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); | |
20566 | extern int split_vma(struct mm_struct *, | |
20567 | struct vm_area_struct *, unsigned long addr, int new_below); | |
20568 | extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); | |
20569 | extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, | |
20570 | struct rb_node **, struct rb_node *); | |
20571 | extern void unlink_file_vma(struct vm_area_struct *); | |
20572 | extern struct vm_area_struct *copy_vma(struct vm_area_struct **, | |
20573 | unsigned long addr, unsigned long len, unsigned long pgoff); | |
20574 | extern void exit_mmap(struct mm_struct *); | |
20575 | extern int mm_take_all_locks(struct mm_struct *mm); | |
20576 | extern void mm_drop_all_locks(struct mm_struct *mm); | |
20577 | extern void added_exe_file_vma(struct mm_struct *mm); | |
20578 | extern void removed_exe_file_vma(struct mm_struct *mm); | |
20579 | extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); | |
20580 | extern struct file *get_mm_exe_file(struct mm_struct *mm); | |
20581 | extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); | |
20582 | extern int install_special_mapping(struct mm_struct *mm, | |
20583 | unsigned long addr, unsigned long len, | |
20584 | unsigned long flags, struct page **pages); | |
20585 | extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); | |
20586 | extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | |
20587 | unsigned long len, unsigned long prot, | |
20588 | unsigned long flag, unsigned long pgoff); | |
20589 | extern unsigned long mmap_region(struct file *file, unsigned long addr, | |
20590 | unsigned long len, unsigned long flags, | |
20591 | vm_flags_t vm_flags, unsigned long pgoff); | |
20592 | static inline __attribute__((always_inline)) unsigned long do_mmap(struct file *file, unsigned long addr, | |
20593 | unsigned long len, unsigned long prot, | |
20594 | unsigned long flag, unsigned long offset) | |
20595 | { | |
20596 | unsigned long ret = -22; | |
20597 | if (__builtin_constant_p((((offset + ((((len)) + ((typeof((len)))((((1UL) << 12))) - 1)) & ~((typeof((len)))((((1UL) << 12))) - 1))) < offset))) ? !!(((offset + ((((len)) + ((typeof((len)))((((1UL) << 12))) - 1)) & ~((typeof((len)))((((1UL) << 12))) - 1))) < offset)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1446, }; ______r = !!(((offset + ((((len)) + ((typeof((len)))((((1UL) << 12))) - 1)) & ~((typeof((len)))((((1UL) << 12))) - 1))) < offset)); ______f.miss_hit[______r]++; ______r; })) | |
20598 | goto out; | |
20599 | if (__builtin_constant_p(((!(offset & ~(~(((1UL) << 12)-1)))))) ? !!((!(offset & ~(~(((1UL) << 12)-1))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1448, }; ______r = !!((!(offset & ~(~(((1UL) << 12)-1))))); ______f.miss_hit[______r]++; ______r; })) | |
20600 | ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> 12); | |
20601 | out: | |
20602 | return ret; | |
20603 | } | |
20604 | extern int do_munmap(struct mm_struct *, unsigned long, size_t); | |
20605 | extern unsigned long do_brk(unsigned long, unsigned long); | |
20606 | extern unsigned long page_unuse(struct page *); | |
20607 | extern void truncate_inode_pages(struct address_space *, loff_t); | |
20608 | extern void truncate_inode_pages_range(struct address_space *, | |
20609 | loff_t lstart, loff_t lend); | |
20610 | extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); | |
20611 | int write_one_page(struct page *page, int wait); | |
20612 | void task_dirty_inc(struct task_struct *tsk); | |
20613 | int force_page_cache_readahead(struct address_space *mapping, struct file *filp, | |
20614 | unsigned long offset, unsigned long nr_to_read); | |
20615 | void page_cache_sync_readahead(struct address_space *mapping, | |
20616 | struct file_ra_state *ra, | |
20617 | struct file *filp, | |
20618 | unsigned long offset, | |
20619 | unsigned long size); | |
20620 | void page_cache_async_readahead(struct address_space *mapping, | |
20621 | struct file_ra_state *ra, | |
20622 | struct file *filp, | |
20623 | struct page *pg, | |
20624 | unsigned long offset, | |
20625 | unsigned long size); | |
20626 | unsigned long max_sane_readahead(unsigned long nr); | |
20627 | unsigned long ra_submit(struct file_ra_state *ra, | |
20628 | struct address_space *mapping, | |
20629 | struct file *filp); | |
20630 | extern int expand_stack(struct vm_area_struct *vma, unsigned long address); | |
20631 | extern int expand_downwards(struct vm_area_struct *vma, | |
20632 | unsigned long address); | |
20633 | extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); | |
20634 | extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, | |
20635 | struct vm_area_struct **pprev); | |
20636 | static inline __attribute__((always_inline)) struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) | |
20637 | { | |
20638 | struct vm_area_struct * vma = find_vma(mm,start_addr); | |
20639 | if (__builtin_constant_p(((vma && end_addr <= vma->vm_start))) ? !!((vma && end_addr <= vma->vm_start)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1519, }; ______r = !!((vma && end_addr <= vma->vm_start)); ______f.miss_hit[______r]++; ______r; })) | |
20640 | vma = ((void *)0); | |
20641 | return vma; | |
20642 | } | |
20643 | static inline __attribute__((always_inline)) unsigned long vma_pages(struct vm_area_struct *vma) | |
20644 | { | |
20645 | return (vma->vm_end - vma->vm_start) >> 12; | |
20646 | } | |
20647 | pgprot_t vm_get_page_prot(unsigned long vm_flags); | |
20648 | struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); | |
20649 | int remap_pfn_range(struct vm_area_struct *, unsigned long addr, | |
20650 | unsigned long pfn, unsigned long size, pgprot_t); | |
20651 | int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); | |
20652 | int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, | |
20653 | unsigned long pfn); | |
20654 | int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, | |
20655 | unsigned long pfn); | |
20656 | struct page *follow_page(struct vm_area_struct *, unsigned long address, | |
20657 | unsigned int foll_flags); | |
20658 | typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, | |
20659 | void *data); | |
20660 | extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, | |
20661 | unsigned long size, pte_fn_t fn, void *data); | |
20662 | void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long); | |
20663 | static inline __attribute__((always_inline)) void | |
20664 | kernel_map_pages(struct page *page, int numpages, int enable) {} | |
20665 | static inline __attribute__((always_inline)) void enable_debug_pagealloc(void) | |
20666 | { | |
20667 | } | |
20668 | static inline __attribute__((always_inline)) bool kernel_page_present(struct page *page) { return true; } | |
20669 | extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); | |
20670 | int in_gate_area_no_mm(unsigned long addr); | |
20671 | int in_gate_area(struct mm_struct *mm, unsigned long addr); | |
20672 | int drop_caches_sysctl_handler(struct ctl_table *, int, | |
20673 | void *, size_t *, loff_t *); | |
20674 | unsigned long shrink_slab(struct shrink_control *shrink, | |
20675 | unsigned long nr_pages_scanned, | |
20676 | unsigned long lru_pages); | |
20677 | extern int randomize_va_space; | |
20678 | const char * arch_vma_name(struct vm_area_struct *vma); | |
20679 | void print_vma_addr(char *prefix, unsigned long rip); | |
20680 | void sparse_mem_maps_populate_node(struct page **map_map, | |
20681 | unsigned long pnum_begin, | |
20682 | unsigned long pnum_end, | |
20683 | unsigned long map_count, | |
20684 | int nodeid); | |
20685 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid); | |
20686 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); | |
20687 | pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); | |
20688 | pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); | |
20689 | pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); | |
20690 | void *vmemmap_alloc_block(unsigned long size, int node); | |
20691 | void *vmemmap_alloc_block_buf(unsigned long size, int node); | |
20692 | void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); | |
20693 | int vmemmap_populate_basepages(struct page *start_page, | |
20694 | unsigned long pages, int node); | |
20695 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); | |
20696 | void vmemmap_populate_print_last(void); | |
20697 | enum mf_flags { | |
20698 | MF_COUNT_INCREASED = 1 << 0, | |
20699 | }; | |
20700 | extern void memory_failure(unsigned long pfn, int trapno); | |
20701 | extern int __memory_failure(unsigned long pfn, int trapno, int flags); | |
20702 | extern int unpoison_memory(unsigned long pfn); | |
20703 | extern int sysctl_memory_failure_early_kill; | |
20704 | extern int sysctl_memory_failure_recovery; | |
20705 | extern void shake_page(struct page *p, int access); | |
20706 | extern atomic_long_t mce_bad_pages; | |
20707 | extern int soft_offline_page(struct page *page, int flags); | |
20708 | extern void dump_page(struct page *page); | |
20709 | extern void clear_huge_page(struct page *page, | |
20710 | unsigned long addr, | |
20711 | unsigned int pages_per_huge_page); | |
20712 | extern void copy_user_huge_page(struct page *dst, struct page *src, | |
20713 | unsigned long addr, struct vm_area_struct *vma, | |
20714 | unsigned int pages_per_huge_page); | |
20715 | static inline __attribute__((always_inline)) unsigned long get_page_memtype(struct page *pg) | |
20716 | { | |
20717 | unsigned long pg_flags = pg->flags & (1UL << PG_uncached | 1UL << PG_arch_1); | |
20718 | if (__builtin_constant_p(((pg_flags == 0))) ? !!((pg_flags == 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/cacheflush.h", .line = 28, }; ______r = !!((pg_flags == 0)); ______f.miss_hit[______r]++; ______r; })) | |
20719 | return -1; | |
20720 | else if (__builtin_constant_p(((pg_flags == (1UL << PG_arch_1)))) ? !!((pg_flags == (1UL << PG_arch_1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/cacheflush.h", .line = 30, }; ______r = !!((pg_flags == (1UL << PG_arch_1))); ______f.miss_hit[______r]++; ______r; })) | |
20721 | return ((((pteval_t)(1)) << 3)); | |
20722 | else if (__builtin_constant_p(((pg_flags == (1UL << PG_uncached)))) ? !!((pg_flags == (1UL << PG_uncached))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/cacheflush.h", .line = 32, }; ______r = !!((pg_flags == (1UL << PG_uncached))); ______f.miss_hit[______r]++; ______r; })) | |
20723 | return ((((pteval_t)(1)) << 4)); | |
20724 | else | |
20725 | return (0); | |
20726 | } | |
20727 | static inline __attribute__((always_inline)) void set_page_memtype(struct page *pg, unsigned long memtype) | |
20728 | { | |
20729 | unsigned long memtype_flags = 0; | |
20730 | unsigned long old_flags; | |
20731 | unsigned long new_flags; | |
20732 | switch (memtype) { | |
20733 | case ((((pteval_t)(1)) << 3)): | |
20734 | memtype_flags = (1UL << PG_arch_1); | |
20735 | break; | |
20736 | case ((((pteval_t)(1)) << 4)): | |
20737 | memtype_flags = (1UL << PG_uncached); | |
20738 | break; | |
20739 | case (0): | |
20740 | memtype_flags = (1UL << PG_uncached | 1UL << PG_arch_1); | |
20741 | break; | |
20742 | } | |
20743 | do { | |
20744 | old_flags = pg->flags; | |
20745 | new_flags = (old_flags & (~(1UL << PG_uncached | 1UL << PG_arch_1))) | memtype_flags; | |
20746 | } while (({ __typeof__(*(((&pg->flags)))) __ret; __typeof__(*(((&pg->flags)))) __old = (((old_flags))); __typeof__(*(((&pg->flags)))) __new = (((new_flags))); switch ((sizeof(*&pg->flags))) { case 1: { volatile u8 *__ptr = (volatile u8 *)(((&pg->flags))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgb %2,%1" : "=a" (__ret), "+m" (*__ptr) : "q" (__new), "0" (__old) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)(((&pg->flags))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgw %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)(((&pg->flags))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgl %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } default: __cmpxchg_wrong_size(); } __ret; }) != old_flags); | |
20747 | } | |
20748 | int _set_memory_uc(unsigned long addr, int numpages); | |
20749 | int _set_memory_wc(unsigned long addr, int numpages); | |
20750 | int _set_memory_wb(unsigned long addr, int numpages); | |
20751 | int set_memory_uc(unsigned long addr, int numpages); | |
20752 | int set_memory_wc(unsigned long addr, int numpages); | |
20753 | int set_memory_wb(unsigned long addr, int numpages); | |
20754 | int set_memory_x(unsigned long addr, int numpages); | |
20755 | int set_memory_nx(unsigned long addr, int numpages); | |
20756 | int set_memory_ro(unsigned long addr, int numpages); | |
20757 | int set_memory_rw(unsigned long addr, int numpages); | |
20758 | int set_memory_np(unsigned long addr, int numpages); | |
20759 | int set_memory_4k(unsigned long addr, int numpages); | |
20760 | int set_memory_array_uc(unsigned long *addr, int addrinarray); | |
20761 | int set_memory_array_wc(unsigned long *addr, int addrinarray); | |
20762 | int set_memory_array_wb(unsigned long *addr, int addrinarray); | |
20763 | int set_pages_array_uc(struct page **pages, int addrinarray); | |
20764 | int set_pages_array_wc(struct page **pages, int addrinarray); | |
20765 | int set_pages_array_wb(struct page **pages, int addrinarray); | |
20766 | int set_pages_uc(struct page *page, int numpages); | |
20767 | int set_pages_wb(struct page *page, int numpages); | |
20768 | int set_pages_x(struct page *page, int numpages); | |
20769 | int set_pages_nx(struct page *page, int numpages); | |
20770 | int set_pages_ro(struct page *page, int numpages); | |
20771 | int set_pages_rw(struct page *page, int numpages); | |
20772 | void clflush_cache_range(void *addr, unsigned int size); | |
20773 | void mark_rodata_ro(void); | |
20774 | extern const int rodata_test_data; | |
20775 | extern int kernel_set_to_readonly; | |
20776 | void set_kernel_text_rw(void); | |
20777 | void set_kernel_text_ro(void); | |
20778 | static inline __attribute__((always_inline)) int rodata_test(void) | |
20779 | { | |
20780 | return 0; | |
20781 | } | |
20782 | static inline __attribute__((always_inline)) void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) | |
20783 | { | |
20784 | } | |
20785 | static inline __attribute__((always_inline)) void flush_kernel_dcache_page(struct page *page) | |
20786 | { | |
20787 | } | |
20788 | static inline __attribute__((always_inline)) void flush_kernel_vmap_range(void *vaddr, int size) | |
20789 | { | |
20790 | } | |
20791 | static inline __attribute__((always_inline)) void invalidate_kernel_vmap_range(void *vaddr, int size) | |
20792 | { | |
20793 | } | |
20794 | static inline __attribute__((always_inline)) void __native_flush_tlb(void) | |
20795 | { | |
20796 | native_write_cr3(native_read_cr3()); | |
20797 | } | |
20798 | static inline __attribute__((always_inline)) void __native_flush_tlb_global(void) | |
20799 | { | |
20800 | unsigned long flags; | |
20801 | unsigned long cr4; | |
20802 | do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); | |
20803 | cr4 = native_read_cr4(); | |
20804 | native_write_cr4(cr4 & ~0x00000080); | |
20805 | native_write_cr4(cr4); | |
20806 | do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); | |
20807 | } | |
20808 | static inline __attribute__((always_inline)) void __native_flush_tlb_single(unsigned long addr) | |
20809 | { | |
20810 | asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); | |
20811 | } | |
20812 | static inline __attribute__((always_inline)) void __flush_tlb_all(void) | |
20813 | { | |
20814 | if (__builtin_constant_p((((__builtin_constant_p((0*32+13)) && ( ((((0*32+13))>>5)==0 && (1UL<<(((0*32+13))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+13))>>5)==1 && (1UL<<(((0*32+13))&31) & (0|0))) || ((((0*32+13))>>5)==2 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==3 && (1UL<<(((0*32+13))&31) & (0))) || ((((0*32+13))>>5)==4 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==5 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==6 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==7 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==8 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==9 && (1UL<<(((0*32+13))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+13))) ? constant_test_bit(((0*32+13)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+13)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))))) ? !!(((__builtin_constant_p((0*32+13)) && ( ((((0*32+13))>>5)==0 && (1UL<<(((0*32+13))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+13))>>5)==1 && (1UL<<(((0*32+13))&31) & (0|0))) || ((((0*32+13))>>5)==2 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==3 && (1UL<<(((0*32+13))&31) & (0))) || ((((0*32+13))>>5)==4 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==5 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==6 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==7 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==8 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==9 && (1UL<<(((0*32+13))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+13))) ? constant_test_bit(((0*32+13)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+13)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/tlbflush.h", .line = 51, }; ______r = !!(((__builtin_constant_p((0*32+13)) && ( ((((0*32+13))>>5)==0 && (1UL<<(((0*32+13))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+13))>>5)==1 && (1UL<<(((0*32+13))&31) & (0|0))) || ((((0*32+13))>>5)==2 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==3 && (1UL<<(((0*32+13))&31) & (0))) || ((((0*32+13))>>5)==4 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==5 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==6 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==7 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==8 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==9 && (1UL<<(((0*32+13))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+13))) ? constant_test_bit(((0*32+13)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+13)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; })) | |
20815 | __flush_tlb_global(); | |
20816 | else | |
20817 | __flush_tlb(); | |
20818 | } | |
20819 | static inline __attribute__((always_inline)) void __flush_tlb_one(unsigned long addr) | |
20820 | { | |
20821 | if (__builtin_constant_p(((1))) ? !!((1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/tlbflush.h", .line = 59, }; ______r = !!((1)); ______f.miss_hit[______r]++; ______r; })) | |
20822 | __flush_tlb_single(addr); | |
20823 | else | |
20824 | __flush_tlb(); | |
20825 | } | |
20826 | extern void flush_tlb_all(void); | |
20827 | extern void flush_tlb_current_task(void); | |
20828 | extern void flush_tlb_mm(struct mm_struct *); | |
20829 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); | |
20830 | static inline __attribute__((always_inline)) void flush_tlb_range(struct vm_area_struct *vma, | |
20831 | unsigned long start, unsigned long end) | |
20832 | { | |
20833 | flush_tlb_mm(vma->vm_mm); | |
20834 | } | |
20835 | void native_flush_tlb_others(const struct cpumask *cpumask, | |
20836 | struct mm_struct *mm, unsigned long va); | |
20837 | struct tlb_state { | |
20838 | struct mm_struct *active_mm; | |
20839 | int state; | |
20840 | }; | |
20841 | extern __attribute__((section(".data..percpu" ""))) __typeof__(struct tlb_state) cpu_tlbstate __attribute__((__aligned__((1 << (6))))); | |
20842 | static inline __attribute__((always_inline)) void reset_lazy_tlbstate(void) | |
20843 | { | |
20844 | do { typedef typeof(cpu_tlbstate.state) pto_T__; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/tlbflush.h", .line = 159, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pto_T__ pto_tmp__; pto_tmp__ = (0); (void)pto_tmp__; } switch (sizeof(cpu_tlbstate.state)) { case 1: asm("mov" "b %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.state) : "qi" ((pto_T__)(0))); break; case 2: asm("mov" "w %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.state) : "ri" ((pto_T__)(0))); break; case 4: asm("mov" "l %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.state) : "ri" ((pto_T__)(0))); break; case 8: asm("mov" "q %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.state) : "re" ((pto_T__)(0))); break; default: __bad_percpu_size(); } } while (0); | |
20845 | do { typedef typeof(cpu_tlbstate.active_mm) pto_T__; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/tlbflush.h", .line = 160, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pto_T__ pto_tmp__; pto_tmp__ = (&init_mm); (void)pto_tmp__; } switch (sizeof(cpu_tlbstate.active_mm)) { case 1: asm("mov" "b %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.active_mm) : "qi" ((pto_T__)(&init_mm))); break; case 2: asm("mov" "w %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.active_mm) : "ri" ((pto_T__)(&init_mm))); break; case 4: asm("mov" "l %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.active_mm) : "ri" ((pto_T__)(&init_mm))); break; case 8: asm("mov" "q %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.active_mm) : "re" ((pto_T__)(&init_mm))); break; default: __bad_percpu_size(); } } while (0); | |
20846 | } | |
20847 | static inline __attribute__((always_inline)) void flush_tlb_kernel_range(unsigned long start, | |
20848 | unsigned long end) | |
20849 | { | |
20850 | flush_tlb_all(); | |
20851 | } | |
20852 | extern unsigned long highstart_pfn, highend_pfn; | |
20853 | extern void *kmap_high(struct page *page); | |
20854 | extern void kunmap_high(struct page *page); | |
20855 | void *kmap(struct page *page); | |
20856 | void kunmap(struct page *page); | |
20857 | void *kmap_atomic_prot(struct page *page, pgprot_t prot); | |
20858 | void *__kmap_atomic(struct page *page); | |
20859 | void __kunmap_atomic(void *kvaddr); | |
20860 | void *kmap_atomic_pfn(unsigned long pfn); | |
20861 | void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); | |
20862 | struct page *kmap_atomic_to_page(void *ptr); | |
20863 | extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn, | |
20864 | unsigned long end_pfn); | |
20865 | unsigned int nr_free_highpages(void); | |
20866 | extern unsigned long totalhigh_pages; | |
20867 | void kmap_flush_unused(void); | |
20868 | extern __attribute__((section(".data..percpu" ""))) __typeof__(int) __kmap_atomic_idx; | |
20869 | static inline __attribute__((always_inline)) int kmap_atomic_idx_push(void) | |
20870 | { | |
20871 | int idx = ({ typeof(__kmap_atomic_idx) pscr2_ret__; do { const void *__vpp_verify = (typeof(&(__kmap_atomic_idx)))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(__kmap_atomic_idx)) { case 1: pscr2_ret__ = ({ typeof(__kmap_atomic_idx) paro_ret__ = 1; switch (sizeof(__kmap_atomic_idx)) { case 1: asm("xaddb %0, ""%%""fs"":" "%P" "1" : "+q" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 2: asm("xaddw %0, ""%%""fs"":" "%P" "1" : "+r" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 4: asm("xaddl %0, ""%%""fs"":" "%P" "1" : "+r" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 8: asm("xaddq %0, ""%%""fs"":" "%P" "1" : "+re" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; default: __bad_percpu_size(); } paro_ret__ += 1; paro_ret__; }); break; case 2: pscr2_ret__ = ({ typeof(__kmap_atomic_idx) paro_ret__ = 1; switch (sizeof(__kmap_atomic_idx)) { case 1: asm("xaddb %0, ""%%""fs"":" "%P" "1" : "+q" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 2: asm("xaddw %0, ""%%""fs"":" "%P" "1" : "+r" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 4: asm("xaddl %0, ""%%""fs"":" "%P" "1" : "+r" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 8: asm("xaddq %0, ""%%""fs"":" "%P" "1" : "+re" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; default: __bad_percpu_size(); } paro_ret__ += 1; paro_ret__; }); break; case 4: pscr2_ret__ = ({ typeof(__kmap_atomic_idx) paro_ret__ = 1; switch (sizeof(__kmap_atomic_idx)) { case 1: asm("xaddb %0, ""%%""fs"":" "%P" "1" : "+q" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 2: asm("xaddw %0, ""%%""fs"":" "%P" "1" : "+r" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 4: asm("xaddl %0, ""%%""fs"":" "%P" "1" : "+r" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 8: asm("xaddq %0, ""%%""fs"":" "%P" "1" : "+re" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; default: __bad_percpu_size(); } paro_ret__ += 1; paro_ret__; }); break; case 8: pscr2_ret__ = ({ typeof(__kmap_atomic_idx) ret__; do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); do { do { const void *__vpp_verify = (typeof(&((__kmap_atomic_idx))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((__kmap_atomic_idx))) { case 1: do { typedef typeof(((__kmap_atomic_idx))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof(((__kmap_atomic_idx)))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof(((__kmap_atomic_idx))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof(((__kmap_atomic_idx)))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof(((__kmap_atomic_idx))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof(((__kmap_atomic_idx)))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&(((__kmap_atomic_idx)))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&(((__kmap_atomic_idx))))); (typeof(*(&(((__kmap_atomic_idx))))) *)tcp_ptr__; }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); ret__ = ({ typeof((__kmap_atomic_idx)) pscr_ret__; do { const void *__vpp_verify = (typeof(&((__kmap_atomic_idx))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((__kmap_atomic_idx))) { case 1: pscr_ret__ = ({ typeof(((__kmap_atomic_idx))) pfo_ret__; switch (sizeof(((__kmap_atomic_idx)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 2: pscr_ret__ = ({ typeof(((__kmap_atomic_idx))) pfo_ret__; switch (sizeof(((__kmap_atomic_idx)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 4: pscr_ret__ = ({ typeof(((__kmap_atomic_idx))) pfo_ret__; switch (sizeof(((__kmap_atomic_idx)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 8: pscr_ret__ = (*({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((__kmap_atomic_idx))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((__kmap_atomic_idx)))); (typeof(*(&((__kmap_atomic_idx)))) *)tcp_ptr__; }));break; default: __bad_size_call_parameter();break; } pscr_ret__; }); do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); ret__; }); break; default: __bad_size_call_parameter(); break; } pscr2_ret__; }) - 1; | |
20872 | return idx; | |
20873 | } | |
20874 | static inline __attribute__((always_inline)) int kmap_atomic_idx(void) | |
20875 | { | |
20876 | return ({ typeof((__kmap_atomic_idx)) pscr_ret__; do { const void *__vpp_verify = (typeof(&((__kmap_atomic_idx))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((__kmap_atomic_idx))) { case 1: pscr_ret__ = ({ typeof(((__kmap_atomic_idx))) pfo_ret__; switch (sizeof(((__kmap_atomic_idx)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 2: pscr_ret__ = ({ typeof(((__kmap_atomic_idx))) pfo_ret__; switch (sizeof(((__kmap_atomic_idx)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 4: pscr_ret__ = ({ typeof(((__kmap_atomic_idx))) pfo_ret__; switch (sizeof(((__kmap_atomic_idx)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 8: pscr_ret__ = (*({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((__kmap_atomic_idx))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((__kmap_atomic_idx)))); (typeof(*(&((__kmap_atomic_idx)))) *)tcp_ptr__; }));break; default: __bad_size_call_parameter();break; } pscr_ret__; }) - 1; | |
20877 | } | |
20878 | static inline __attribute__((always_inline)) void kmap_atomic_idx_pop(void) | |
20879 | { | |
20880 | do { do { const void *__vpp_verify = (typeof(&((((__kmap_atomic_idx))))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((((__kmap_atomic_idx))))) { case 1: do { typedef typeof(((((__kmap_atomic_idx))))) pao_T__; const int pao_ID__ = (__builtin_constant_p((-(1))) && (((-(1))) == 1 || ((-(1))) == -1)) ? ((-(1))) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((-(1))); (void)pao_tmp__; } switch (sizeof(((((__kmap_atomic_idx)))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "qi" ((pao_T__)((-(1))))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "ri" ((pao_T__)((-(1))))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "ri" ((pao_T__)((-(1))))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "re" ((pao_T__)((-(1))))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof(((((__kmap_atomic_idx))))) pao_T__; const int pao_ID__ = (__builtin_constant_p((-(1))) && (((-(1))) == 1 || ((-(1))) == -1)) ? ((-(1))) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((-(1))); (void)pao_tmp__; } switch (sizeof(((((__kmap_atomic_idx)))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "qi" ((pao_T__)((-(1))))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "ri" ((pao_T__)((-(1))))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "ri" ((pao_T__)((-(1))))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "re" ((pao_T__)((-(1))))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof(((((__kmap_atomic_idx))))) pao_T__; const int pao_ID__ = (__builtin_constant_p((-(1))) && (((-(1))) == 1 || ((-(1))) == -1)) ? ((-(1))) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((-(1))); (void)pao_tmp__; } switch (sizeof(((((__kmap_atomic_idx)))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "qi" ((pao_T__)((-(1))))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "ri" ((pao_T__)((-(1))))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "ri" ((pao_T__)((-(1))))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "re" ((pao_T__)((-(1))))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&(((((__kmap_atomic_idx)))))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&(((((__kmap_atomic_idx))))))); (typeof(*(&(((((__kmap_atomic_idx))))))) *)tcp_ptr__; }) += ((-(1))); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); | |
20881 | } | |
20882 | static inline __attribute__((always_inline)) void clear_user_highpage(struct page *page, unsigned long vaddr) | |
20883 | { | |
20884 | void *addr = __kmap_atomic(page); | |
20885 | clear_user_page(addr, vaddr, page); | |
20886 | do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((addr)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((addr)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((addr)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 132, }; ______r = !!((__builtin_types_compatible_p(typeof((addr)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(addr); } while (0); | |
20887 | } | |
20888 | static inline __attribute__((always_inline)) struct page * | |
20889 | alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, | |
20890 | unsigned long vaddr) | |
20891 | { | |
20892 | return alloc_pages_node(numa_node_id(), ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u) | (( gfp_t)0x20000u) | (( gfp_t)0x02u)) | (( gfp_t)0x8000u) | (( gfp_t)0x08u), 0); | |
20893 | } | |
20894 | static inline __attribute__((always_inline)) void clear_highpage(struct page *page) | |
20895 | { | |
20896 | void *kaddr = __kmap_atomic(page); | |
20897 | clear_page(kaddr); | |
20898 | do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 185, }; ______r = !!((__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(kaddr); } while (0); | |
20899 | } | |
20900 | static inline __attribute__((always_inline)) void zero_user_segments(struct page *page, | |
20901 | unsigned start1, unsigned end1, | |
20902 | unsigned start2, unsigned end2) | |
20903 | { | |
20904 | void *kaddr = __kmap_atomic(page); | |
20905 | do { if (__builtin_constant_p((((__builtin_constant_p(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)) ? !!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 194, }; ______r = __builtin_expect(!!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)) ? !!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 194, }; ______r = __builtin_expect(!!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 194, }; ______r = !!(((__builtin_constant_p(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)) ? !!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 194, }; ______r = __builtin_expect(!!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/highmem.h"), "i" (194), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
20906 | if (__builtin_constant_p(((end1 > start1))) ? !!((end1 > start1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 196, }; ______r = !!((end1 > start1)); ______f.miss_hit[______r]++; ______r; })) | |
20907 | __builtin_memset(kaddr + start1, 0, end1 - start1); | |
20908 | if (__builtin_constant_p(((end2 > start2))) ? !!((end2 > start2)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 199, }; ______r = !!((end2 > start2)); ______f.miss_hit[______r]++; ______r; })) | |
20909 | __builtin_memset(kaddr + start2, 0, end2 - start2); | |
20910 | do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 202, }; ______r = !!((__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(kaddr); } while (0); | |
20911 | do { } while (0); | |
20912 | } | |
20913 | static inline __attribute__((always_inline)) void zero_user_segment(struct page *page, | |
20914 | unsigned start, unsigned end) | |
20915 | { | |
20916 | zero_user_segments(page, start, end, 0, 0); | |
20917 | } | |
20918 | static inline __attribute__((always_inline)) void zero_user(struct page *page, | |
20919 | unsigned start, unsigned size) | |
20920 | { | |
20921 | zero_user_segments(page, start, start + size, 0, 0); | |
20922 | } | |
20923 | static inline __attribute__((always_inline)) void __attribute__((deprecated)) memclear_highpage_flush(struct page *page, | |
20924 | unsigned int offset, unsigned int size) | |
20925 | { | |
20926 | zero_user(page, offset, size); | |
20927 | } | |
20928 | static inline __attribute__((always_inline)) void copy_user_highpage(struct page *to, struct page *from, | |
20929 | unsigned long vaddr, struct vm_area_struct *vma) | |
20930 | { | |
20931 | char *vfrom, *vto; | |
20932 | vfrom = __kmap_atomic(from); | |
20933 | vto = __kmap_atomic(to); | |
20934 | copy_user_page(vto, vfrom, vaddr, to); | |
20935 | do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((vto)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((vto)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((vto)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 234, }; ______r = !!((__builtin_types_compatible_p(typeof((vto)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(vto); } while (0); | |
20936 | do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 235, }; ______r = !!((__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(vfrom); } while (0); | |
20937 | } | |
20938 | static inline __attribute__((always_inline)) void copy_highpage(struct page *to, struct page *from) | |
20939 | { | |
20940 | char *vfrom, *vto; | |
20941 | vfrom = __kmap_atomic(from); | |
20942 | vto = __kmap_atomic(to); | |
20943 | copy_page(vto, vfrom); | |
20944 | do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((vto)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((vto)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((vto)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 247, }; ______r = !!((__builtin_types_compatible_p(typeof((vto)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(vto); } while (0); | |
20945 | do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 248, }; ______r = !!((__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(vfrom); } while (0); | |
20946 | } | |
20947 | struct scatterlist { | |
20948 | unsigned long page_link; | |
20949 | unsigned int offset; | |
20950 | unsigned int length; | |
20951 | dma_addr_t dma_address; | |
20952 | unsigned int dma_length; | |
20953 | }; | |
20954 | struct sg_table { | |
20955 | struct scatterlist *sgl; | |
20956 | unsigned int nents; | |
20957 | unsigned int orig_nents; | |
20958 | }; | |
20959 | static inline __attribute__((always_inline)) void sg_assign_page(struct scatterlist *sg, struct page *page) | |
20960 | { | |
20961 | unsigned long page_link = sg->page_link & 0x3; | |
20962 | do { if (__builtin_constant_p((((__builtin_constant_p((unsigned long) page & 0x03) ? !!((unsigned long) page & 0x03) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/scatterlist.h", .line = 63, }; ______r = __builtin_expect(!!((unsigned long) page & 0x03), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned long) page & 0x03) ? !!((unsigned long) page & 0x03) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/scatterlist.h", .line = 63, }; ______r = __builtin_expect(!!((unsigned long) page & 0x03), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/scatterlist.h", .line = 63, }; ______r = !!(((__builtin_constant_p((unsigned long) page & 0x03) ? !!((unsigned long) page & 0x03) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/scatterlist.h", .line = 63, }; ______r = __builtin_expect(!!((unsigned long) page & 0x03), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/scatterlist.h"), "i" (63), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0); | |
20963 | sg->page_link = page_link | (unsigned long) page; | |
20964 | } | |
20965 | static inline __attribute__((always_inline)) void sg_set_page(struct scatterlist *sg, struct page *page, | |
20966 | unsigned int len, unsigned int offset) | |
20967 | { | |
20968 | sg_assign_page(sg, page); | |
20969 | sg->offset = offset; | |
20970 | sg->length = len; | |
20971 | } | |
20972 | static inline __attribute__((always_inline)) struct page *sg_page(struct scatterlist *sg) | |
20973 | { | |
20974 | return (struct page *)((sg)->page_link & ~0x3); | |
20975 | } | |
20976 | static inline __attribute__((always_inline)) void sg_set_buf(struct scatterlist *sg, const void *buf, | |
20977 | unsigned int buflen) | |
20978 | { | |
20979 | sg_set_page(sg, (mem_map + (((((unsigned long)(buf)) - ((unsigned long)(0xC0000000UL))) >> 12) - (0UL))), buflen, ((unsigned long)(buf) & ~(~(((1UL) << 12)-1)))); | |
20980 | } | |
20981 | static inline __attribute__((always_inline)) void sg_chain(struct scatterlist *prv, unsigned int prv_nents, | |
20982 | struct scatterlist *sgl) | |
20983 | { | |
20984 | prv[prv_nents - 1].offset = 0; | |
20985 | prv[prv_nents - 1].length = 0; | |
20986 | prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02; | |
20987 | } | |
20988 | static inline __attribute__((always_inline)) void sg_mark_end(struct scatterlist *sg) | |
20989 | { | |
20990 | sg->page_link |= 0x02; | |
20991 | sg->page_link &= ~0x01; | |
20992 | } | |
20993 | static inline __attribute__((always_inline)) dma_addr_t sg_phys(struct scatterlist *sg) | |
20994 | { | |
20995 | return ((dma_addr_t)((unsigned long)((sg_page(sg)) - mem_map) + (0UL)) << 12) + sg->offset; | |
20996 | } | |
20997 | static inline __attribute__((always_inline)) void *sg_virt(struct scatterlist *sg) | |
20998 | { | |
20999 | return page_address(sg_page(sg)) + sg->offset; | |
21000 | } | |
21001 | struct scatterlist *sg_next(struct scatterlist *); | |
21002 | struct scatterlist *sg_last(struct scatterlist *s, unsigned int); | |
21003 | void sg_init_table(struct scatterlist *, unsigned int); | |
21004 | void sg_init_one(struct scatterlist *, const void *, unsigned int); | |
21005 | typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); | |
21006 | typedef void (sg_free_fn)(struct scatterlist *, unsigned int); | |
21007 | void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *); | |
21008 | void sg_free_table(struct sg_table *); | |
21009 | int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t, | |
21010 | sg_alloc_fn *); | |
21011 | int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); | |
21012 | size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, | |
21013 | void *buf, size_t buflen); | |
21014 | size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, | |
21015 | void *buf, size_t buflen); | |
21016 | struct sg_mapping_iter { | |
21017 | struct page *page; | |
21018 | void *addr; | |
21019 | size_t length; | |
21020 | size_t consumed; | |
21021 | struct scatterlist *__sg; | |
21022 | unsigned int __nents; | |
21023 | unsigned int __offset; | |
21024 | unsigned int __flags; | |
21025 | }; | |
21026 | void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, | |
21027 | unsigned int nents, unsigned int flags); | |
21028 | bool sg_miter_next(struct sg_mapping_iter *miter); | |
21029 | void sg_miter_stop(struct sg_mapping_iter *miter); | |
21030 | static inline __attribute__((always_inline)) enum km_type crypto_kmap_type(int out) | |
21031 | { | |
21032 | enum km_type type; | |
21033 | if (__builtin_constant_p((((((current_thread_info()->preempt_count) & (((1UL << (8))-1) << (0 + 8))))))) ? !!(((((current_thread_info()->preempt_count) & (((1UL << (8))-1) << (0 + 8)))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/scatterwalk.h", .line = 32, }; ______r = !!(((((current_thread_info()->preempt_count) & (((1UL << (8))-1) << (0 + 8)))))); ______f.miss_hit[______r]++; ______r; })) | |
21034 | type = out * (KM_SOFTIRQ1 - KM_SOFTIRQ0) + KM_SOFTIRQ0; | |
21035 | else | |
21036 | type = out * (KM_USER1 - KM_USER0) + KM_USER0; | |
21037 | return type; | |
21038 | } | |
21039 | static inline __attribute__((always_inline)) void *crypto_kmap(struct page *page, int out) | |
21040 | { | |
21041 | return __kmap_atomic(page); | |
21042 | } | |
21043 | static inline __attribute__((always_inline)) void crypto_kunmap(void *vaddr, int out) | |
21044 | { | |
21045 | do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((vaddr)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((vaddr)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((vaddr)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/scatterwalk.h", .line = 47, }; ______r = !!((__builtin_types_compatible_p(typeof((vaddr)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(vaddr); } while (0); | |
21046 | } | |
21047 | static inline __attribute__((always_inline)) void crypto_yield(u32 flags) | |
21048 | { | |
21049 | if (__builtin_constant_p(((flags & 0x00000200))) ? !!((flags & 0x00000200)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/scatterwalk.h", .line = 52, }; ______r = !!((flags & 0x00000200)); ______f.miss_hit[______r]++; ______r; })) | |
21050 | ({ __might_sleep("include/crypto/scatterwalk.h", 53, 0); _cond_resched(); }); | |
21051 | } | |
21052 | static inline __attribute__((always_inline)) void scatterwalk_sg_chain(struct scatterlist *sg1, int num, | |
21053 | struct scatterlist *sg2) | |
21054 | { | |
21055 | sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0); | |
21056 | sg1[num - 1].page_link &= ~0x02; | |
21057 | } | |
21058 | static inline __attribute__((always_inline)) struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg) | |
21059 | { | |
21060 | if (__builtin_constant_p(((((sg)->page_link & 0x02)))) ? !!((((sg)->page_link & 0x02))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/scatterwalk.h", .line = 65, }; ______r = !!((((sg)->page_link & 0x02))); ______f.miss_hit[______r]++; ______r; })) | |
21061 | return ((void *)0); | |
21062 | return (++sg)->length ? sg : (void *)sg_page(sg); | |
21063 | } | |
21064 | static inline __attribute__((always_inline)) void scatterwalk_crypto_chain(struct scatterlist *head, | |
21065 | struct scatterlist *sg, | |
21066 | int chain, int num) | |
21067 | { | |
21068 | if (__builtin_constant_p(((chain))) ? !!((chain)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/scatterwalk.h", .line = 75, }; ______r = !!((chain)); ______f.miss_hit[______r]++; ______r; })) { | |
21069 | head->length += sg->length; | |
21070 | sg = scatterwalk_sg_next(sg); | |
21071 | } | |
21072 | if (__builtin_constant_p(((sg))) ? !!((sg)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/scatterwalk.h", .line = 80, }; ______r = !!((sg)); ______f.miss_hit[______r]++; ______r; })) | |
21073 | scatterwalk_sg_chain(head, num, sg); | |
21074 | else | |
21075 | sg_mark_end(head); | |
21076 | } | |
21077 | static inline __attribute__((always_inline)) unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in, | |
21078 | struct scatter_walk *walk_out) | |
21079 | { | |
21080 | return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << 12) + | |
21081 | (int)(walk_in->offset - walk_out->offset)); | |
21082 | } | |
21083 | static inline __attribute__((always_inline)) unsigned int scatterwalk_pagelen(struct scatter_walk *walk) | |
21084 | { | |
21085 | unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; | |
21086 | unsigned int len_this_page = ((unsigned long)(~walk->offset) & ~(~(((1UL) << 12)-1))) + 1; | |
21087 | return len_this_page > len ? len : len_this_page; | |
21088 | } | |
21089 | static inline __attribute__((always_inline)) unsigned int scatterwalk_clamp(struct scatter_walk *walk, | |
21090 | unsigned int nbytes) | |
21091 | { | |
21092 | unsigned int len_this_page = scatterwalk_pagelen(walk); | |
21093 | return nbytes > len_this_page ? len_this_page : nbytes; | |
21094 | } | |
21095 | static inline __attribute__((always_inline)) void scatterwalk_advance(struct scatter_walk *walk, | |
21096 | unsigned int nbytes) | |
21097 | { | |
21098 | walk->offset += nbytes; | |
21099 | } | |
21100 | static inline __attribute__((always_inline)) unsigned int scatterwalk_aligned(struct scatter_walk *walk, | |
21101 | unsigned int alignmask) | |
21102 | { | |
21103 | return !(walk->offset & alignmask); | |
21104 | } | |
21105 | static inline __attribute__((always_inline)) struct page *scatterwalk_page(struct scatter_walk *walk) | |
21106 | { | |
21107 | return sg_page(walk->sg) + (walk->offset >> 12); | |
21108 | } | |
21109 | static inline __attribute__((always_inline)) void scatterwalk_unmap(void *vaddr, int out) | |
21110 | { | |
21111 | crypto_kunmap(vaddr, out); | |
21112 | } | |
21113 | void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg); | |
21114 | void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, | |
21115 | size_t nbytes, int out); | |
21116 | void *scatterwalk_map(struct scatter_walk *walk, int out); | |
21117 | void scatterwalk_done(struct scatter_walk *walk, int out, int more); | |
21118 | void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, | |
21119 | unsigned int start, unsigned int nbytes, int out); | |
21120 | struct aead_givcrypt_request { | |
21121 | u64 seq; | |
21122 | u8 *giv; | |
21123 | struct aead_request areq; | |
21124 | }; | |
21125 | static inline __attribute__((always_inline)) struct crypto_aead *aead_givcrypt_reqtfm( | |
21126 | struct aead_givcrypt_request *req) | |
21127 | { | |
21128 | return crypto_aead_reqtfm(&req->areq); | |
21129 | } | |
21130 | static inline __attribute__((always_inline)) int crypto_aead_givencrypt(struct aead_givcrypt_request *req) | |
21131 | { | |
21132 | struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req)); | |
21133 | return crt->givencrypt(req); | |
21134 | }; | |
21135 | static inline __attribute__((always_inline)) int crypto_aead_givdecrypt(struct aead_givcrypt_request *req) | |
21136 | { | |
21137 | struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req)); | |
21138 | return crt->givdecrypt(req); | |
21139 | }; | |
21140 | static inline __attribute__((always_inline)) void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req, | |
21141 | struct crypto_aead *tfm) | |
21142 | { | |
21143 | req->areq.base.tfm = crypto_aead_tfm(tfm); | |
21144 | } | |
21145 | static inline __attribute__((always_inline)) struct aead_givcrypt_request *aead_givcrypt_alloc( | |
21146 | struct crypto_aead *tfm, gfp_t gfp) | |
21147 | { | |
21148 | struct aead_givcrypt_request *req; | |
21149 | req = kmalloc(sizeof(struct aead_givcrypt_request) + | |
21150 | crypto_aead_reqsize(tfm), gfp); | |
21151 | if (__builtin_constant_p((((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/aead.h", .line = 65, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/aead.h", .line = 65, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/aead.h", .line = 65, }; ______r = !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/aead.h", .line = 65, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; })) | |
21152 | aead_givcrypt_set_tfm(req, tfm); | |
21153 | return req; | |
21154 | } | |
21155 | static inline __attribute__((always_inline)) void aead_givcrypt_free(struct aead_givcrypt_request *req) | |
21156 | { | |
21157 | kfree(req); | |
21158 | } | |
21159 | static inline __attribute__((always_inline)) void aead_givcrypt_set_callback( | |
21160 | struct aead_givcrypt_request *req, u32 flags, | |
21161 | crypto_completion_t complete, void *data) | |
21162 | { | |
21163 | aead_request_set_callback(&req->areq, flags, complete, data); | |
21164 | } | |
21165 | static inline __attribute__((always_inline)) void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req, | |
21166 | struct scatterlist *src, | |
21167 | struct scatterlist *dst, | |
21168 | unsigned int nbytes, void *iv) | |
21169 | { | |
21170 | aead_request_set_crypt(&req->areq, src, dst, nbytes, iv); | |
21171 | } | |
21172 | static inline __attribute__((always_inline)) void aead_givcrypt_set_assoc(struct aead_givcrypt_request *req, | |
21173 | struct scatterlist *assoc, | |
21174 | unsigned int assoclen) | |
21175 | { | |
21176 | aead_request_set_assoc(&req->areq, assoc, assoclen); | |
21177 | } | |
21178 | static inline __attribute__((always_inline)) void aead_givcrypt_set_giv(struct aead_givcrypt_request *req, | |
21179 | u8 *giv, u64 seq) | |
21180 | { | |
21181 | req->giv = giv; | |
21182 | req->seq = seq; | |
21183 | } | |
21184 | struct rtattr; | |
21185 | struct crypto_aead_spawn { | |
21186 | struct crypto_spawn base; | |
21187 | }; | |
21188 | extern const struct crypto_type crypto_nivaead_type; | |
21189 | static inline __attribute__((always_inline)) void crypto_set_aead_spawn( | |
21190 | struct crypto_aead_spawn *spawn, struct crypto_instance *inst) | |
21191 | { | |
21192 | crypto_set_spawn(&spawn->base, inst); | |
21193 | } | |
21194 | int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name, | |
21195 | u32 type, u32 mask); | |
21196 | static inline __attribute__((always_inline)) void crypto_drop_aead(struct crypto_aead_spawn *spawn) | |
21197 | { | |
21198 | crypto_drop_spawn(&spawn->base); | |
21199 | } | |
21200 | static inline __attribute__((always_inline)) struct crypto_alg *crypto_aead_spawn_alg( | |
21201 | struct crypto_aead_spawn *spawn) | |
21202 | { | |
21203 | return spawn->base.alg; | |
21204 | } | |
21205 | static inline __attribute__((always_inline)) struct crypto_aead *crypto_spawn_aead( | |
21206 | struct crypto_aead_spawn *spawn) | |
21207 | { | |
21208 | return __crypto_aead_cast( | |
21209 | crypto_spawn_tfm(&spawn->base, 0x00000003, | |
21210 | 0x0000000f)); | |
21211 | } | |
21212 | struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl, | |
21213 | struct rtattr **tb, u32 type, | |
21214 | u32 mask); | |
21215 | void aead_geniv_free(struct crypto_instance *inst); | |
21216 | int aead_geniv_init(struct crypto_tfm *tfm); | |
21217 | void aead_geniv_exit(struct crypto_tfm *tfm); | |
21218 | static inline __attribute__((always_inline)) struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv) | |
21219 | { | |
21220 | return crypto_aead_crt(geniv)->base; | |
21221 | } | |
21222 | static inline __attribute__((always_inline)) void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req) | |
21223 | { | |
21224 | return aead_request_ctx(&req->areq); | |
21225 | } | |
21226 | static inline __attribute__((always_inline)) void aead_givcrypt_complete(struct aead_givcrypt_request *req, | |
21227 | int err) | |
21228 | { | |
21229 | aead_request_complete(&req->areq, err); | |
21230 | } | |
21231 | struct async_aes_ctx { | |
21232 | struct cryptd_ablkcipher *cryptd_tfm; | |
21233 | }; | |
21234 | struct aesni_rfc4106_gcm_ctx { | |
21235 | u8 hash_subkey[16]; | |
21236 | struct crypto_aes_ctx aes_key_expanded; | |
21237 | u8 nonce[4]; | |
21238 | struct cryptd_aead *cryptd_tfm; | |
21239 | }; | |
21240 | struct aesni_gcm_set_hash_subkey_result { | |
21241 | int err; | |
21242 | struct completion completion; | |
21243 | }; | |
21244 | struct aesni_hash_subkey_req_data { | |
21245 | u8 iv[16]; | |
21246 | struct aesni_gcm_set_hash_subkey_result result; | |
21247 | struct scatterlist sg; | |
21248 | }; | |
21249 | __attribute__((regparm(0))) int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, | |
21250 | unsigned int key_len); | |
21251 | __attribute__((regparm(0))) void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out, | |
21252 | const u8 *in); | |
21253 | __attribute__((regparm(0))) void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out, | |
21254 | const u8 *in); | |
21255 | __attribute__((regparm(0))) void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out, | |
21256 | const u8 *in, unsigned int len); | |
21257 | __attribute__((regparm(0))) void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out, | |
21258 | const u8 *in, unsigned int len); | |
21259 | __attribute__((regparm(0))) void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, | |
21260 | const u8 *in, unsigned int len, u8 *iv); | |
21261 | __attribute__((regparm(0))) void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, | |
21262 | const u8 *in, unsigned int len, u8 *iv); | |
21263 | int crypto_fpu_init(void); | |
21264 | void crypto_fpu_exit(void); | |
21265 | static inline __attribute__((always_inline)) struct crypto_aes_ctx *aes_ctx(void *raw_ctx) | |
21266 | { | |
21267 | unsigned long addr = (unsigned long)raw_ctx; | |
21268 | unsigned long align = (16); | |
21269 | if (__builtin_constant_p(((align <= crypto_tfm_ctx_alignment()))) ? !!((align <= crypto_tfm_ctx_alignment())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 162, }; ______r = !!((align <= crypto_tfm_ctx_alignment())); ______f.miss_hit[______r]++; ______r; })) | |
21270 | align = 1; | |
21271 | return (struct crypto_aes_ctx *)((((addr)) + ((typeof((addr)))((align)) - 1)) & ~((typeof((addr)))((align)) - 1)); | |
21272 | } | |
21273 | static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, | |
21274 | const u8 *in_key, unsigned int key_len) | |
21275 | { | |
21276 | struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx); | |
21277 | u32 *flags = &tfm->crt_flags; | |
21278 | int err; | |
21279 | if (__builtin_constant_p(((key_len != 16 && key_len != 24 && key_len != 32))) ? !!((key_len != 16 && key_len != 24 && key_len != 32)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = | |
21280 | "arch/x86/crypto/aesni-intel_glue.c" | |
21281 | , .line = | |
21282 | 175 | |
21283 | , }; ______r = !!((key_len != 16 && key_len != 24 && key_len != 32)); ______f.miss_hit[______r]++; ______r; })) | |
21284 | { | |
21285 | *flags |= 0x00200000; | |
21286 | return -22; | |
21287 | } | |
21288 | if (__builtin_constant_p(((!irq_fpu_usable()))) ? !!((!irq_fpu_usable())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 180, }; ______r = !!((!irq_fpu_usable())); ______f.miss_hit[______r]++; ______r; })) | |
21289 | err = crypto_aes_expand_key(ctx, in_key, key_len); | |
21290 | else { | |
21291 | kernel_fpu_begin(); | |
21292 | err = aesni_set_key(ctx, in_key, key_len); | |
21293 | kernel_fpu_end(); | |
21294 | } | |
21295 | return err; | |
21296 | } | |
21297 | static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |
21298 | unsigned int key_len) | |
21299 | { | |
21300 | return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len); | |
21301 | } | |
21302 | static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |
21303 | { | |
21304 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); | |
21305 | if (__builtin_constant_p(((!irq_fpu_usable()))) ? !!((!irq_fpu_usable())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 201, }; ______r = !!((!irq_fpu_usable())); ______f.miss_hit[______r]++; ______r; })) | |
21306 | crypto_aes_encrypt_x86(ctx, dst, src); | |
21307 | else { | |
21308 | kernel_fpu_begin(); | |
21309 | aesni_enc(ctx, dst, src); | |
21310 | kernel_fpu_end(); | |
21311 | } | |
21312 | } | |
21313 | static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |
21314 | { | |
21315 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); | |
21316 | if (__builtin_constant_p(((!irq_fpu_usable()))) ? !!((!irq_fpu_usable())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 214, }; ______r = !!((!irq_fpu_usable())); ______f.miss_hit[______r]++; ______r; })) | |
21317 | crypto_aes_decrypt_x86(ctx, dst, src); | |
21318 | else { | |
21319 | kernel_fpu_begin(); | |
21320 | aesni_dec(ctx, dst, src); | |
21321 | kernel_fpu_end(); | |
21322 | } | |
21323 | } | |
21324 | static struct crypto_alg aesni_alg = { | |
21325 | .cra_name = "aes", | |
21326 | .cra_driver_name = "aes-aesni", | |
21327 | .cra_priority = 300, | |
21328 | .cra_flags = 0x00000001, | |
21329 | .cra_blocksize = 16, | |
21330 | .cra_ctxsize = sizeof(struct crypto_aes_ctx)+(16)-1, | |
21331 | .cra_alignmask = 0, | |
21332 | .cra_module = (&__this_module), | |
21333 | .cra_list = { &(aesni_alg.cra_list), &(aesni_alg.cra_list) }, | |
21334 | .cra_u = { | |
21335 | .cipher = { | |
21336 | .cia_min_keysize = 16, | |
21337 | .cia_max_keysize = 32, | |
21338 | .cia_setkey = aes_set_key, | |
21339 | .cia_encrypt = aes_encrypt, | |
21340 | .cia_decrypt = aes_decrypt | |
21341 | } | |
21342 | } | |
21343 | }; | |
21344 | static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |
21345 | { | |
21346 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); | |
21347 | aesni_enc(ctx, dst, src); | |
21348 | } | |
21349 | static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | |
21350 | { | |
21351 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); | |
21352 | aesni_dec(ctx, dst, src); | |
21353 | } | |
21354 | static struct crypto_alg __aesni_alg = { | |
21355 | .cra_name = "__aes-aesni", | |
21356 | .cra_driver_name = "__driver-aes-aesni", | |
21357 | .cra_priority = 0, | |
21358 | .cra_flags = 0x00000001, | |
21359 | .cra_blocksize = 16, | |
21360 | .cra_ctxsize = sizeof(struct crypto_aes_ctx)+(16)-1, | |
21361 | .cra_alignmask = 0, | |
21362 | .cra_module = (&__this_module), | |
21363 | .cra_list = { &(__aesni_alg.cra_list), &(__aesni_alg.cra_list) }, | |
21364 | .cra_u = { | |
21365 | .cipher = { | |
21366 | .cia_min_keysize = 16, | |
21367 | .cia_max_keysize = 32, | |
21368 | .cia_setkey = aes_set_key, | |
21369 | .cia_encrypt = __aes_encrypt, | |
21370 | .cia_decrypt = __aes_decrypt | |
21371 | } | |
21372 | } | |
21373 | }; | |
21374 | static int ecb_encrypt(struct blkcipher_desc *desc, | |
21375 | struct scatterlist *dst, struct scatterlist *src, | |
21376 | unsigned int nbytes) | |
21377 | { | |
21378 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); | |
21379 | struct blkcipher_walk walk; | |
21380 | int err; | |
21381 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
21382 | err = blkcipher_walk_virt(desc, &walk); | |
21383 | desc->flags &= ~0x00000200; | |
21384 | kernel_fpu_begin(); | |
21385 | while ((nbytes = walk.nbytes)) { | |
21386 | aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, | |
21387 | nbytes & (~(16 -1))); | |
21388 | nbytes &= 16 - 1; | |
21389 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
21390 | } | |
21391 | kernel_fpu_end(); | |
21392 | return err; | |
21393 | } | |
21394 | static int ecb_decrypt(struct blkcipher_desc *desc, | |
21395 | struct scatterlist *dst, struct scatterlist *src, | |
21396 | unsigned int nbytes) | |
21397 | { | |
21398 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); | |
21399 | struct blkcipher_walk walk; | |
21400 | int err; | |
21401 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
21402 | err = blkcipher_walk_virt(desc, &walk); | |
21403 | desc->flags &= ~0x00000200; | |
21404 | kernel_fpu_begin(); | |
21405 | while ((nbytes = walk.nbytes)) { | |
21406 | aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, | |
21407 | nbytes & (~(16 -1))); | |
21408 | nbytes &= 16 - 1; | |
21409 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
21410 | } | |
21411 | kernel_fpu_end(); | |
21412 | return err; | |
21413 | } | |
21414 | static struct crypto_alg blk_ecb_alg = { | |
21415 | .cra_name = "__ecb-aes-aesni", | |
21416 | .cra_driver_name = "__driver-ecb-aes-aesni", | |
21417 | .cra_priority = 0, | |
21418 | .cra_flags = 0x00000004, | |
21419 | .cra_blocksize = 16, | |
21420 | .cra_ctxsize = sizeof(struct crypto_aes_ctx)+(16)-1, | |
21421 | .cra_alignmask = 0, | |
21422 | .cra_type = &crypto_blkcipher_type, | |
21423 | .cra_module = (&__this_module), | |
21424 | .cra_list = { &(blk_ecb_alg.cra_list), &(blk_ecb_alg.cra_list) }, | |
21425 | .cra_u = { | |
21426 | .blkcipher = { | |
21427 | .min_keysize = 16, | |
21428 | .max_keysize = 32, | |
21429 | .setkey = aes_set_key, | |
21430 | .encrypt = ecb_encrypt, | |
21431 | .decrypt = ecb_decrypt, | |
21432 | }, | |
21433 | }, | |
21434 | }; | |
21435 | static int cbc_encrypt(struct blkcipher_desc *desc, | |
21436 | struct scatterlist *dst, struct scatterlist *src, | |
21437 | unsigned int nbytes) | |
21438 | { | |
21439 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); | |
21440 | struct blkcipher_walk walk; | |
21441 | int err; | |
21442 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
21443 | err = blkcipher_walk_virt(desc, &walk); | |
21444 | desc->flags &= ~0x00000200; | |
21445 | kernel_fpu_begin(); | |
21446 | while ((nbytes = walk.nbytes)) { | |
21447 | aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, | |
21448 | nbytes & (~(16 -1)), walk.iv); | |
21449 | nbytes &= 16 - 1; | |
21450 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
21451 | } | |
21452 | kernel_fpu_end(); | |
21453 | return err; | |
21454 | } | |
21455 | static int cbc_decrypt(struct blkcipher_desc *desc, | |
21456 | struct scatterlist *dst, struct scatterlist *src, | |
21457 | unsigned int nbytes) | |
21458 | { | |
21459 | struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); | |
21460 | struct blkcipher_walk walk; | |
21461 | int err; | |
21462 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
21463 | err = blkcipher_walk_virt(desc, &walk); | |
21464 | desc->flags &= ~0x00000200; | |
21465 | kernel_fpu_begin(); | |
21466 | while ((nbytes = walk.nbytes)) { | |
21467 | aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, | |
21468 | nbytes & (~(16 -1)), walk.iv); | |
21469 | nbytes &= 16 - 1; | |
21470 | err = blkcipher_walk_done(desc, &walk, nbytes); | |
21471 | } | |
21472 | kernel_fpu_end(); | |
21473 | return err; | |
21474 | } | |
21475 | static struct crypto_alg blk_cbc_alg = { | |
21476 | .cra_name = "__cbc-aes-aesni", | |
21477 | .cra_driver_name = "__driver-cbc-aes-aesni", | |
21478 | .cra_priority = 0, | |
21479 | .cra_flags = 0x00000004, | |
21480 | .cra_blocksize = 16, | |
21481 | .cra_ctxsize = sizeof(struct crypto_aes_ctx)+(16)-1, | |
21482 | .cra_alignmask = 0, | |
21483 | .cra_type = &crypto_blkcipher_type, | |
21484 | .cra_module = (&__this_module), | |
21485 | .cra_list = { &(blk_cbc_alg.cra_list), &(blk_cbc_alg.cra_list) }, | |
21486 | .cra_u = { | |
21487 | .blkcipher = { | |
21488 | .min_keysize = 16, | |
21489 | .max_keysize = 32, | |
21490 | .setkey = aes_set_key, | |
21491 | .encrypt = cbc_encrypt, | |
21492 | .decrypt = cbc_decrypt, | |
21493 | }, | |
21494 | }, | |
21495 | }; | |
21496 | static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, | |
21497 | unsigned int key_len) | |
21498 | { | |
21499 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | |
21500 | struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base; | |
21501 | int err; | |
21502 | crypto_ablkcipher_clear_flags(child, 0x000fff00); | |
21503 | crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm) | |
21504 | & 0x000fff00); | |
21505 | err = crypto_ablkcipher_setkey(child, key, key_len); | |
21506 | crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child) | |
21507 | & 0xfff00000); | |
21508 | return err; | |
21509 | } | |
21510 | static int ablk_encrypt(struct ablkcipher_request *req) | |
21511 | { | |
21512 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
21513 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | |
21514 | if (__builtin_constant_p(((!irq_fpu_usable()))) ? !!((!irq_fpu_usable())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 508, }; ______r = !!((!irq_fpu_usable())); ______f.miss_hit[______r]++; ______r; })) { | |
21515 | struct ablkcipher_request *cryptd_req = | |
21516 | ablkcipher_request_ctx(req); | |
21517 | __builtin_memcpy(cryptd_req, req, sizeof(*req)); | |
21518 | ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); | |
21519 | return crypto_ablkcipher_encrypt(cryptd_req); | |
21520 | } else { | |
21521 | struct blkcipher_desc desc; | |
21522 | desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); | |
21523 | desc.info = req->info; | |
21524 | desc.flags = 0; | |
21525 | return crypto_blkcipher_crt(desc.tfm)->encrypt( | |
21526 | &desc, req->dst, req->src, req->nbytes); | |
21527 | } | |
21528 | } | |
21529 | static int ablk_decrypt(struct ablkcipher_request *req) | |
21530 | { | |
21531 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
21532 | struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); | |
21533 | if (__builtin_constant_p(((!irq_fpu_usable()))) ? !!((!irq_fpu_usable())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 529, }; ______r = !!((!irq_fpu_usable())); ______f.miss_hit[______r]++; ______r; })) { | |
21534 | struct ablkcipher_request *cryptd_req = | |
21535 | ablkcipher_request_ctx(req); | |
21536 | __builtin_memcpy(cryptd_req, req, sizeof(*req)); | |
21537 | ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); | |
21538 | return crypto_ablkcipher_decrypt(cryptd_req); | |
21539 | } else { | |
21540 | struct blkcipher_desc desc; | |
21541 | desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); | |
21542 | desc.info = req->info; | |
21543 | desc.flags = 0; | |
21544 | return crypto_blkcipher_crt(desc.tfm)->decrypt( | |
21545 | &desc, req->dst, req->src, req->nbytes); | |
21546 | } | |
21547 | } | |
21548 | static void ablk_exit(struct crypto_tfm *tfm) | |
21549 | { | |
21550 | struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm); | |
21551 | cryptd_free_ablkcipher(ctx->cryptd_tfm); | |
21552 | } | |
21553 | static void ablk_init_common(struct crypto_tfm *tfm, | |
21554 | struct cryptd_ablkcipher *cryptd_tfm) | |
21555 | { | |
21556 | struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm); | |
21557 | ctx->cryptd_tfm = cryptd_tfm; | |
21558 | tfm->crt_u.ablkcipher.reqsize = sizeof(struct ablkcipher_request) + | |
21559 | crypto_ablkcipher_reqsize(&cryptd_tfm->base); | |
21560 | } | |
21561 | static int ablk_ecb_init(struct crypto_tfm *tfm) | |
21562 | { | |
21563 | struct cryptd_ablkcipher *cryptd_tfm; | |
21564 | cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0); | |
21565 | if (__builtin_constant_p(((IS_ERR(cryptd_tfm)))) ? !!((IS_ERR(cryptd_tfm))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 567, }; ______r = !!((IS_ERR(cryptd_tfm))); ______f.miss_hit[______r]++; ______r; })) | |
21566 | return PTR_ERR(cryptd_tfm); | |
21567 | ablk_init_common(tfm, cryptd_tfm); | |
21568 | return 0; | |
21569 | } | |
21570 | static struct crypto_alg ablk_ecb_alg = { | |
21571 | .cra_name = "ecb(aes)", | |
21572 | .cra_driver_name = "ecb-aes-aesni", | |
21573 | .cra_priority = 400, | |
21574 | .cra_flags = 0x00000005|0x00000080, | |
21575 | .cra_blocksize = 16, | |
21576 | .cra_ctxsize = sizeof(struct async_aes_ctx), | |
21577 | .cra_alignmask = 0, | |
21578 | .cra_type = &crypto_ablkcipher_type, | |
21579 | .cra_module = (&__this_module), | |
21580 | .cra_list = { &(ablk_ecb_alg.cra_list), &(ablk_ecb_alg.cra_list) }, | |
21581 | .cra_init = ablk_ecb_init, | |
21582 | .cra_exit = ablk_exit, | |
21583 | .cra_u = { | |
21584 | .ablkcipher = { | |
21585 | .min_keysize = 16, | |
21586 | .max_keysize = 32, | |
21587 | .setkey = ablk_set_key, | |
21588 | .encrypt = ablk_encrypt, | |
21589 | .decrypt = ablk_decrypt, | |
21590 | }, | |
21591 | }, | |
21592 | }; | |
21593 | static int ablk_cbc_init(struct crypto_tfm *tfm) | |
21594 | { | |
21595 | struct cryptd_ablkcipher *cryptd_tfm; | |
21596 | cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0); | |
21597 | if (__builtin_constant_p(((IS_ERR(cryptd_tfm)))) ? !!((IS_ERR(cryptd_tfm))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 602, }; ______r = !!((IS_ERR(cryptd_tfm))); ______f.miss_hit[______r]++; ______r; })) | |
21598 | return PTR_ERR(cryptd_tfm); | |
21599 | ablk_init_common(tfm, cryptd_tfm); | |
21600 | return 0; | |
21601 | } | |
21602 | static struct crypto_alg ablk_cbc_alg = { | |
21603 | .cra_name = "cbc(aes)", | |
21604 | .cra_driver_name = "cbc-aes-aesni", | |
21605 | .cra_priority = 400, | |
21606 | .cra_flags = 0x00000005|0x00000080, | |
21607 | .cra_blocksize = 16, | |
21608 | .cra_ctxsize = sizeof(struct async_aes_ctx), | |
21609 | .cra_alignmask = 0, | |
21610 | .cra_type = &crypto_ablkcipher_type, | |
21611 | .cra_module = (&__this_module), | |
21612 | .cra_list = { &(ablk_cbc_alg.cra_list), &(ablk_cbc_alg.cra_list) }, | |
21613 | .cra_init = ablk_cbc_init, | |
21614 | .cra_exit = ablk_exit, | |
21615 | .cra_u = { | |
21616 | .ablkcipher = { | |
21617 | .min_keysize = 16, | |
21618 | .max_keysize = 32, | |
21619 | .ivsize = 16, | |
21620 | .setkey = ablk_set_key, | |
21621 | .encrypt = ablk_encrypt, | |
21622 | .decrypt = ablk_decrypt, | |
21623 | }, | |
21624 | }, | |
21625 | }; | |
21626 | static int ablk_pcbc_init(struct crypto_tfm *tfm) | |
21627 | { | |
21628 | struct cryptd_ablkcipher *cryptd_tfm; | |
21629 | cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))", | |
21630 | 0, 0); | |
21631 | if (__builtin_constant_p(((IS_ERR(cryptd_tfm)))) ? !!((IS_ERR(cryptd_tfm))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 758, }; ______r = !!((IS_ERR(cryptd_tfm))); ______f.miss_hit[______r]++; ______r; })) | |
21632 | return PTR_ERR(cryptd_tfm); | |
21633 | ablk_init_common(tfm, cryptd_tfm); | |
21634 | return 0; | |
21635 | } | |
21636 | static struct crypto_alg ablk_pcbc_alg = { | |
21637 | .cra_name = "pcbc(aes)", | |
21638 | .cra_driver_name = "pcbc-aes-aesni", | |
21639 | .cra_priority = 400, | |
21640 | .cra_flags = 0x00000005|0x00000080, | |
21641 | .cra_blocksize = 16, | |
21642 | .cra_ctxsize = sizeof(struct async_aes_ctx), | |
21643 | .cra_alignmask = 0, | |
21644 | .cra_type = &crypto_ablkcipher_type, | |
21645 | .cra_module = (&__this_module), | |
21646 | .cra_list = { &(ablk_pcbc_alg.cra_list), &(ablk_pcbc_alg.cra_list) }, | |
21647 | .cra_init = ablk_pcbc_init, | |
21648 | .cra_exit = ablk_exit, | |
21649 | .cra_u = { | |
21650 | .ablkcipher = { | |
21651 | .min_keysize = 16, | |
21652 | .max_keysize = 32, | |
21653 | .ivsize = 16, | |
21654 | .setkey = ablk_set_key, | |
21655 | .encrypt = ablk_encrypt, | |
21656 | .decrypt = ablk_decrypt, | |
21657 | }, | |
21658 | }, | |
21659 | }; | |
21660 | static int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) aesni_init(void) | |
21661 | { | |
21662 | int err; | |
21663 | if (__builtin_constant_p(((!(__builtin_constant_p((4*32+25)) && ( ((((4*32+25))>>5)==0 && (1UL<<(((4*32+25))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+25))>>5)==1 && (1UL<<(((4*32+25))&31) & (0|0))) || ((((4*32+25))>>5)==2 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==3 && (1UL<<(((4*32+25))&31) & (0))) || ((((4*32+25))>>5)==4 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==5 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==6 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==7 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==8 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==9 && (1UL<<(((4*32+25))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+25))) ? constant_test_bit(((4*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((4*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))))) ? !!((!(__builtin_constant_p((4*32+25)) && ( ((((4*32+25))>>5)==0 && (1UL<<(((4*32+25))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+25))>>5)==1 && (1UL<<(((4*32+25))&31) & (0|0))) || ((((4*32+25))>>5)==2 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==3 && (1UL<<(((4*32+25))&31) & (0))) || ((((4*32+25))>>5)==4 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==5 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==6 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==7 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==8 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==9 && (1UL<<(((4*32+25))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+25))) ? constant_test_bit(((4*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((4*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1259, }; ______r = !!((!(__builtin_constant_p((4*32+25)) && ( ((((4*32+25))>>5)==0 && (1UL<<(((4*32+25))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+25))>>5)==1 && (1UL<<(((4*32+25))&31) & (0|0))) || ((((4*32+25))>>5)==2 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==3 && (1UL<<(((4*32+25))&31) & (0))) || ((((4*32+25))>>5)==4 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==5 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==6 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==7 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==8 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==9 && (1UL<<(((4*32+25))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+25))) ? constant_test_bit(((4*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((4*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; })) { | |
21664 | printk("<6>" "Intel AES-NI instructions are not detected.\n"); | |
21665 | return -19; | |
21666 | } | |
21667 | if (__builtin_constant_p((((err = crypto_fpu_init())))) ? !!(((err = crypto_fpu_init()))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1264, }; ______r = !!(((err = crypto_fpu_init()))); ______f.miss_hit[______r]++; ______r; })) | |
21668 | goto fpu_err; | |
21669 | if (__builtin_constant_p((((err = crypto_register_alg(&aesni_alg))))) ? !!(((err = crypto_register_alg(&aesni_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1266, }; ______r = !!(((err = crypto_register_alg(&aesni_alg)))); ______f.miss_hit[______r]++; ______r; })) | |
21670 | goto aes_err; | |
21671 | if (__builtin_constant_p((((err = crypto_register_alg(&__aesni_alg))))) ? !!(((err = crypto_register_alg(&__aesni_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1268, }; ______r = !!(((err = crypto_register_alg(&__aesni_alg)))); ______f.miss_hit[______r]++; ______r; })) | |
21672 | goto __aes_err; | |
21673 | if (__builtin_constant_p((((err = crypto_register_alg(&blk_ecb_alg))))) ? !!(((err = crypto_register_alg(&blk_ecb_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1270, }; ______r = !!(((err = crypto_register_alg(&blk_ecb_alg)))); ______f.miss_hit[______r]++; ______r; })) | |
21674 | goto blk_ecb_err; | |
21675 | if (__builtin_constant_p((((err = crypto_register_alg(&blk_cbc_alg))))) ? !!(((err = crypto_register_alg(&blk_cbc_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1272, }; ______r = !!(((err = crypto_register_alg(&blk_cbc_alg)))); ______f.miss_hit[______r]++; ______r; })) | |
21676 | goto blk_cbc_err; | |
21677 | if (__builtin_constant_p((((err = crypto_register_alg(&ablk_ecb_alg))))) ? !!(((err = crypto_register_alg(&ablk_ecb_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1274, }; ______r = !!(((err = crypto_register_alg(&ablk_ecb_alg)))); ______f.miss_hit[______r]++; ______r; })) | |
21678 | goto ablk_ecb_err; | |
21679 | if (__builtin_constant_p((((err = crypto_register_alg(&ablk_cbc_alg))))) ? !!(((err = crypto_register_alg(&ablk_cbc_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1276, }; ______r = !!(((err = crypto_register_alg(&ablk_cbc_alg)))); ______f.miss_hit[______r]++; ______r; })) | |
21680 | goto ablk_cbc_err; | |
21681 | if (__builtin_constant_p((((err = crypto_register_alg(&ablk_pcbc_alg))))) ? !!(((err = crypto_register_alg(&ablk_pcbc_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1297, }; ______r = !!(((err = crypto_register_alg(&ablk_pcbc_alg)))); ______f.miss_hit[______r]++; ______r; })) | |
21682 | goto ablk_pcbc_err; | |
21683 | return err; | |
21684 | crypto_unregister_alg(&ablk_pcbc_alg); | |
21685 | ablk_pcbc_err: | |
21686 | crypto_unregister_alg(&ablk_cbc_alg); | |
21687 | ablk_cbc_err: | |
21688 | crypto_unregister_alg(&ablk_ecb_alg); | |
21689 | ablk_ecb_err: | |
21690 | crypto_unregister_alg(&blk_cbc_alg); | |
21691 | blk_cbc_err: | |
21692 | crypto_unregister_alg(&blk_ecb_alg); | |
21693 | blk_ecb_err: | |
21694 | crypto_unregister_alg(&__aesni_alg); | |
21695 | __aes_err: | |
21696 | crypto_unregister_alg(&aesni_alg); | |
21697 | aes_err: | |
21698 | fpu_err: | |
21699 | return err; | |
21700 | } | |
21701 | static void __attribute__ ((__section__(".exit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) aesni_exit(void) | |
21702 | { | |
21703 | crypto_unregister_alg(&ablk_pcbc_alg); | |
21704 | crypto_unregister_alg(&ablk_cbc_alg); | |
21705 | crypto_unregister_alg(&ablk_ecb_alg); | |
21706 | crypto_unregister_alg(&blk_cbc_alg); | |
21707 | crypto_unregister_alg(&blk_ecb_alg); | |
21708 | crypto_unregister_alg(&__aesni_alg); | |
21709 | crypto_unregister_alg(&aesni_alg); | |
21710 | crypto_fpu_exit(); | |
21711 | } | |
21712 | static inline __attribute__((always_inline)) initcall_t __inittest(void) { return aesni_init; } int init_module(void) __attribute__((alias("aesni_init")));; | |
21713 | static inline __attribute__((always_inline)) exitcall_t __exittest(void) { return aesni_exit; } void cleanup_module(void) __attribute__((alias("aesni_exit")));; | |
21714 | static const char __mod_description1380[] __attribute__((__used__)) __attribute__((section(".modinfo"), unused, aligned(1))) = "description" "=" "Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized"; | |
21715 | static const char __mod_license1381[] __attribute__((__used__)) __attribute__((section(".modinfo"), unused, aligned(1))) = "license" "=" "GPL"; | |
21716 | static const char __mod_alias1382[] __attribute__((__used__)) __attribute__((section(".modinfo"), unused, aligned(1))) = "alias" "=" "aes"; |