1 typedef __signed__ char __s8;
2 typedef unsigned char __u8;
3 typedef __signed__ short __s16;
4 typedef unsigned short __u16;
5 typedef __signed__ int __s32;
6 typedef unsigned int __u32;
7 __extension__ typedef __signed__ long long __s64;
8 __extension__ typedef unsigned long long __u64;
9 typedef signed char s8;
10 typedef unsigned char u8;
11 typedef signed short s16;
12 typedef unsigned short u16;
13 typedef signed int s32;
14 typedef unsigned int u32;
15 typedef signed long long s64;
16 typedef unsigned long long u64;
17 typedef unsigned short umode_t;
18 struct ftrace_branch_data {
24 unsigned long correct;
25 unsigned long incorrect;
31 unsigned long miss_hit[2];
34 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
40 unsigned long fds_bits [(1024/(8 * sizeof(unsigned long)))];
42 typedef void (*__kernel_sighandler_t)(int);
43 typedef int __kernel_key_t;
44 typedef int __kernel_mqd_t;
45 typedef unsigned long __kernel_ino_t;
46 typedef unsigned short __kernel_mode_t;
47 typedef unsigned short __kernel_nlink_t;
48 typedef long __kernel_off_t;
49 typedef int __kernel_pid_t;
50 typedef unsigned short __kernel_ipc_pid_t;
51 typedef unsigned short __kernel_uid_t;
52 typedef unsigned short __kernel_gid_t;
53 typedef unsigned int __kernel_size_t;
54 typedef int __kernel_ssize_t;
55 typedef int __kernel_ptrdiff_t;
56 typedef long __kernel_time_t;
57 typedef long __kernel_suseconds_t;
58 typedef long __kernel_clock_t;
59 typedef int __kernel_timer_t;
60 typedef int __kernel_clockid_t;
61 typedef int __kernel_daddr_t;
62 typedef char * __kernel_caddr_t;
63 typedef unsigned short __kernel_uid16_t;
64 typedef unsigned short __kernel_gid16_t;
65 typedef unsigned int __kernel_uid32_t;
66 typedef unsigned int __kernel_gid32_t;
67 typedef unsigned short __kernel_old_uid_t;
68 typedef unsigned short __kernel_old_gid_t;
69 typedef unsigned short __kernel_old_dev_t;
70 typedef long long __kernel_loff_t;
74 typedef __u32 __kernel_dev_t;
75 typedef __kernel_fd_set fd_set;
76 typedef __kernel_dev_t dev_t;
77 typedef __kernel_ino_t ino_t;
78 typedef __kernel_mode_t mode_t;
79 typedef __kernel_nlink_t nlink_t;
80 typedef __kernel_off_t off_t;
81 typedef __kernel_pid_t pid_t;
82 typedef __kernel_daddr_t daddr_t;
83 typedef __kernel_key_t key_t;
84 typedef __kernel_suseconds_t suseconds_t;
85 typedef __kernel_timer_t timer_t;
86 typedef __kernel_clockid_t clockid_t;
87 typedef __kernel_mqd_t mqd_t;
89 typedef __kernel_uid32_t uid_t;
90 typedef __kernel_gid32_t gid_t;
91 typedef __kernel_uid16_t uid16_t;
92 typedef __kernel_gid16_t gid16_t;
93 typedef unsigned long uintptr_t;
94 typedef __kernel_old_uid_t old_uid_t;
95 typedef __kernel_old_gid_t old_gid_t;
96 typedef __kernel_loff_t loff_t;
97 typedef __kernel_size_t size_t;
98 typedef __kernel_ssize_t ssize_t;
99 typedef __kernel_ptrdiff_t ptrdiff_t;
100 typedef __kernel_time_t time_t;
101 typedef __kernel_clock_t clock_t;
102 typedef __kernel_caddr_t caddr_t;
103 typedef unsigned char u_char;
104 typedef unsigned short u_short;
105 typedef unsigned int u_int;
106 typedef unsigned long u_long;
107 typedef unsigned char unchar;
108 typedef unsigned short ushort;
109 typedef unsigned int uint;
110 typedef unsigned long ulong;
111 typedef __u8 u_int8_t;
113 typedef __u16 u_int16_t;
114 typedef __s16 int16_t;
115 typedef __u32 u_int32_t;
116 typedef __s32 int32_t;
117 typedef __u8 uint8_t;
118 typedef __u16 uint16_t;
119 typedef __u32 uint32_t;
120 typedef __u64 uint64_t;
121 typedef __u64 u_int64_t;
122 typedef __s64 int64_t;
123 typedef u64 sector_t;
124 typedef u64 blkcnt_t;
125 typedef u64 dma_addr_t;
126 typedef __u16 __le16;
127 typedef __u16 __be16;
128 typedef __u32 __le32;
129 typedef __u32 __be32;
130 typedef __u64 __le64;
131 typedef __u64 __be64;
132 typedef __u16 __sum16;
133 typedef __u32 __wsum;
134 typedef unsigned gfp_t;
135 typedef unsigned fmode_t;
136 typedef u64 phys_addr_t;
137 typedef phys_addr_t resource_size_t;
142 struct list_head *next, *prev;
145 struct hlist_node *first;
148 struct hlist_node *next, **pprev;
151 __kernel_daddr_t f_tfree;
152 __kernel_ino_t f_tinode;
157 struct compat_timespec;
158 struct restart_block {
159 long (*fn)(struct restart_block *);
171 struct timespec *rmtp;
178 unsigned long tv_sec;
179 unsigned long tv_nsec;
183 extern long do_no_restart_syscall(struct restart_block *parm);
184 extern unsigned int __sw_hweight8(unsigned int w);
185 extern unsigned int __sw_hweight16(unsigned int w);
186 extern unsigned int __sw_hweight32(unsigned int w);
187 extern unsigned long __sw_hweight64(__u64 w);
195 extern void alternative_instructions(void);
196 extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
198 extern void alternatives_smp_module_add(struct module *mod, char *name,
199 void *locks, void *locks_end,
200 void *text, void *text_end);
201 extern void alternatives_smp_module_del(struct module *mod);
202 extern void alternatives_smp_switch(int smp);
203 extern int alternatives_text_reserved(void *start, void *end);
204 extern bool skip_smp_alternatives;
205 extern const char * const x86_cap_flags[10*32];
206 extern const char * const x86_power_flags[32];
207 static inline __attribute__((always_inline)) __attribute__((always_inline)) __attribute__((pure)) bool __static_cpu_has(u16 bit)
209 asm goto("1: jmp %l[t_no]\n"
211 ".section .altinstructions,\"a\"\n"
212 " " ".balign 4" " " "\n"
213 " " ".long" " " "1b\n"
214 " " ".long" " " "0\n"
219 : : "i" (bit) : : t_no);
224 struct paravirt_patch_site;
225 void apply_paravirt(struct paravirt_patch_site *start,
226 struct paravirt_patch_site *end);
227 extern void *text_poke_early(void *addr, const void *opcode, size_t len);
228 struct text_poke_param {
233 extern void *text_poke(void *addr, const void *opcode, size_t len);
234 extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
235 extern void text_poke_smp_batch(struct text_poke_param *params, int n);
236 static inline __attribute__((always_inline)) __attribute__((always_inline)) void
237 set_bit(unsigned int nr, volatile unsigned long *addr)
239 if (__builtin_constant_p((((__builtin_constant_p(nr))))) ? !!(((__builtin_constant_p(nr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/bitops.h", .line = 62, }; ______r = !!(((__builtin_constant_p(nr)))); ______f.miss_hit[______r]++; ______r; })) {
240 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "orb %1,%0"
241 : "+m" (*(volatile long *) ((void *)(addr) + ((nr)>>3)))
242 : "iq" ((u8)(1 << ((nr) & 7)))
245 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "bts %1,%0"
246 : "+m" (*(volatile long *) (addr)) : "Ir" (nr) : "memory");
249 static inline __attribute__((always_inline)) void __set_bit(int nr, volatile unsigned long *addr)
251 asm volatile("bts %1,%0" : "+m" (*(volatile long *) (addr)) : "Ir" (nr) : "memory");
253 static inline __attribute__((always_inline)) __attribute__((always_inline)) void
254 clear_bit(int nr, volatile unsigned long *addr)
256 if (__builtin_constant_p((((__builtin_constant_p(nr))))) ? !!(((__builtin_constant_p(nr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/bitops.h", .line = 100, }; ______r = !!(((__builtin_constant_p(nr)))); ______f.miss_hit[______r]++; ______r; })) {
257 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "andb %1,%0"
258 : "+m" (*(volatile long *) ((void *)(addr) + ((nr)>>3)))
259 : "iq" ((u8)~(1 << ((nr) & 7))));
261 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "btr %1,%0"
262 : "+m" (*(volatile long *) (addr))
266 static inline __attribute__((always_inline)) void clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
268 __asm__ __volatile__("": : :"memory");
271 static inline __attribute__((always_inline)) void __clear_bit(int nr, volatile unsigned long *addr)
273 asm volatile("btr %1,%0" : "+m" (*(volatile long *) (addr)) : "Ir" (nr));
275 static inline __attribute__((always_inline)) void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
277 __asm__ __volatile__("": : :"memory");
278 __clear_bit(nr, addr);
280 static inline __attribute__((always_inline)) void __change_bit(int nr, volatile unsigned long *addr)
282 asm volatile("btc %1,%0" : "+m" (*(volatile long *) (addr)) : "Ir" (nr));
284 static inline __attribute__((always_inline)) void change_bit(int nr, volatile unsigned long *addr)
286 if (__builtin_constant_p((((__builtin_constant_p(nr))))) ? !!(((__builtin_constant_p(nr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/bitops.h", .line = 176, }; ______r = !!(((__builtin_constant_p(nr)))); ______f.miss_hit[______r]++; ______r; })) {
287 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "xorb %1,%0"
288 : "+m" (*(volatile long *) ((void *)(addr) + ((nr)>>3)))
289 : "iq" ((u8)(1 << ((nr) & 7))));
291 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "btc %1,%0"
292 : "+m" (*(volatile long *) (addr))
296 static inline __attribute__((always_inline)) int test_and_set_bit(int nr, volatile unsigned long *addr)
299 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "bts %2,%1\n\t"
300 "sbb %0,%0" : "=r" (oldbit), "+m" (*(volatile long *) (addr)) : "Ir" (nr) : "memory");
303 static inline __attribute__((always_inline)) __attribute__((always_inline)) int
304 test_and_set_bit_lock(int nr, volatile unsigned long *addr)
306 return test_and_set_bit(nr, addr);
308 static inline __attribute__((always_inline)) int __test_and_set_bit(int nr, volatile unsigned long *addr)
313 : "=r" (oldbit), "+m" (*(volatile long *) (addr))
317 static inline __attribute__((always_inline)) int test_and_clear_bit(int nr, volatile unsigned long *addr)
320 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "btr %2,%1\n\t"
322 : "=r" (oldbit), "+m" (*(volatile long *) (addr)) : "Ir" (nr) : "memory");
325 static inline __attribute__((always_inline)) int __test_and_clear_bit(int nr, volatile unsigned long *addr)
328 asm volatile("btr %2,%1\n\t"
330 : "=r" (oldbit), "+m" (*(volatile long *) (addr))
334 static inline __attribute__((always_inline)) int __test_and_change_bit(int nr, volatile unsigned long *addr)
337 asm volatile("btc %2,%1\n\t"
339 : "=r" (oldbit), "+m" (*(volatile long *) (addr))
340 : "Ir" (nr) : "memory");
343 static inline __attribute__((always_inline)) int test_and_change_bit(int nr, volatile unsigned long *addr)
346 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "btc %2,%1\n\t"
348 : "=r" (oldbit), "+m" (*(volatile long *) (addr)) : "Ir" (nr) : "memory");
351 static inline __attribute__((always_inline)) __attribute__((always_inline)) int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
353 return ((1UL << (nr % 32)) &
354 (addr[nr / 32])) != 0;
356 static inline __attribute__((always_inline)) int variable_test_bit(int nr, volatile const unsigned long *addr)
359 asm volatile("bt %2,%1\n\t"
362 : "m" (*(unsigned long *)addr), "Ir" (nr));
365 static inline __attribute__((always_inline)) unsigned long __ffs(unsigned long word)
372 static inline __attribute__((always_inline)) unsigned long ffz(unsigned long word)
379 static inline __attribute__((always_inline)) unsigned long __fls(unsigned long word)
386 static inline __attribute__((always_inline)) int ffs(int x)
391 : "=r" (r) : "rm" (x), "r" (-1));
394 static inline __attribute__((always_inline)) int fls(int x)
399 : "=&r" (r) : "rm" (x), "rm" (-1));
402 extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
403 size, unsigned long offset);
404 extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
405 long size, unsigned long offset);
406 extern unsigned long find_first_bit(const unsigned long *addr,
408 extern unsigned long find_first_zero_bit(const unsigned long *addr,
410 static inline __attribute__((always_inline)) int sched_find_first_bit(const unsigned long *b)
412 if (__builtin_constant_p(((b[0]))) ? !!((b[0])) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/bitops/sched.h", .line = 19, }; ______r = !!((b[0])); ______f.miss_hit[______r]++; ______r; }))
414 if (__builtin_constant_p(((b[1]))) ? !!((b[1])) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/bitops/sched.h", .line = 21, }; ______r = !!((b[1])); ______f.miss_hit[______r]++; ______r; }))
415 return __ffs(b[1]) + 32;
416 if (__builtin_constant_p(((b[2]))) ? !!((b[2])) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/bitops/sched.h", .line = 23, }; ______r = !!((b[2])); ______f.miss_hit[______r]++; ______r; }))
417 return __ffs(b[2]) + 64;
418 return __ffs(b[3]) + 96;
420 static inline __attribute__((always_inline)) unsigned int __arch_hweight32(unsigned int w)
422 unsigned int res = 0;
423 asm ("661:\n\t" "call __sw_hweight32" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(4*32+23)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" ".byte 0xf3,0x0f,0xb8,0xc0" "\n664:\n" ".previous"
428 static inline __attribute__((always_inline)) unsigned int __arch_hweight16(unsigned int w)
430 return __arch_hweight32(w & 0xffff);
432 static inline __attribute__((always_inline)) unsigned int __arch_hweight8(unsigned int w)
434 return __arch_hweight32(w & 0xff);
436 static inline __attribute__((always_inline)) unsigned long __arch_hweight64(__u64 w)
438 unsigned long res = 0;
439 return __arch_hweight32((u32)w) +
440 __arch_hweight32((u32)(w >> 32));
443 static inline __attribute__((always_inline)) __attribute__((always_inline)) int fls64(__u64 x)
446 if (__builtin_constant_p(((h))) ? !!((h)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/bitops/fls64.h", .line = 21, }; ______r = !!((h)); ______f.miss_hit[______r]++; ______r; }))
450 static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __arch_swab32(__u32 val)
452 asm("bswap %0" : "=r" (val) : "0" (val));
455 static inline __attribute__((always_inline)) __attribute__((__const__)) __u64 __arch_swab64(__u64 val)
465 asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
466 : "=r" (v.s.a), "=r" (v.s.b)
467 : "0" (v.s.a), "1" (v.s.b));
470 static inline __attribute__((always_inline)) __attribute__((__const__)) __u16 __fswab16(__u16 val)
472 return ((__u16)( (((__u16)(val) & (__u16)0x00ffU) << 8) | (((__u16)(val) & (__u16)0xff00U) >> 8)));
474 static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswab32(__u32 val)
476 return __arch_swab32(val);
478 static inline __attribute__((always_inline)) __attribute__((__const__)) __u64 __fswab64(__u64 val)
480 return __arch_swab64(val);
482 static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswahw32(__u32 val)
484 return ((__u32)( (((__u32)(val) & (__u32)0x0000ffffUL) << 16) | (((__u32)(val) & (__u32)0xffff0000UL) >> 16)));
486 static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswahb32(__u32 val)
488 return ((__u32)( (((__u32)(val) & (__u32)0x00ff00ffUL) << 8) | (((__u32)(val) & (__u32)0xff00ff00UL) >> 8)));
490 static inline __attribute__((always_inline)) __u16 __swab16p(const __u16 *p)
492 return (__builtin_constant_p((__u16)(*p)) ? ((__u16)( (((__u16)(*p) & (__u16)0x00ffU) << 8) | (((__u16)(*p) & (__u16)0xff00U) >> 8))) : __fswab16(*p));
494 static inline __attribute__((always_inline)) __u32 __swab32p(const __u32 *p)
496 return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x000000ffUL) << 24) | (((__u32)(*p) & (__u32)0x0000ff00UL) << 8) | (((__u32)(*p) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(*p) & (__u32)0xff000000UL) >> 24))) : __fswab32(*p));
498 static inline __attribute__((always_inline)) __u64 __swab64p(const __u64 *p)
500 return (__builtin_constant_p((__u64)(*p)) ? ((__u64)( (((__u64)(*p) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(*p) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(*p) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(*p) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(*p) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(*p) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(*p) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(*p) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(*p));
502 static inline __attribute__((always_inline)) __u32 __swahw32p(const __u32 *p)
504 return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x0000ffffUL) << 16) | (((__u32)(*p) & (__u32)0xffff0000UL) >> 16))) : __fswahw32(*p));
506 static inline __attribute__((always_inline)) __u32 __swahb32p(const __u32 *p)
508 return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x00ff00ffUL) << 8) | (((__u32)(*p) & (__u32)0xff00ff00UL) >> 8))) : __fswahb32(*p));
510 static inline __attribute__((always_inline)) void __swab16s(__u16 *p)
514 static inline __attribute__((always_inline)) void __swab32s(__u32 *p)
518 static inline __attribute__((always_inline)) void __swab64s(__u64 *p)
522 static inline __attribute__((always_inline)) void __swahw32s(__u32 *p)
526 static inline __attribute__((always_inline)) void __swahb32s(__u32 *p)
530 static inline __attribute__((always_inline)) __le64 __cpu_to_le64p(const __u64 *p)
534 static inline __attribute__((always_inline)) __u64 __le64_to_cpup(const __le64 *p)
538 static inline __attribute__((always_inline)) __le32 __cpu_to_le32p(const __u32 *p)
542 static inline __attribute__((always_inline)) __u32 __le32_to_cpup(const __le32 *p)
546 static inline __attribute__((always_inline)) __le16 __cpu_to_le16p(const __u16 *p)
550 static inline __attribute__((always_inline)) __u16 __le16_to_cpup(const __le16 *p)
554 static inline __attribute__((always_inline)) __be64 __cpu_to_be64p(const __u64 *p)
556 return ( __be64)__swab64p(p);
558 static inline __attribute__((always_inline)) __u64 __be64_to_cpup(const __be64 *p)
560 return __swab64p((__u64 *)p);
562 static inline __attribute__((always_inline)) __be32 __cpu_to_be32p(const __u32 *p)
564 return ( __be32)__swab32p(p);
566 static inline __attribute__((always_inline)) __u32 __be32_to_cpup(const __be32 *p)
568 return __swab32p((__u32 *)p);
570 static inline __attribute__((always_inline)) __be16 __cpu_to_be16p(const __u16 *p)
572 return ( __be16)__swab16p(p);
574 static inline __attribute__((always_inline)) __u16 __be16_to_cpup(const __be16 *p)
576 return __swab16p((__u16 *)p);
578 static inline __attribute__((always_inline)) void le16_add_cpu(__le16 *var, u16 val)
580 *var = (( __le16)(__u16)((( __u16)(__le16)(*var)) + val));
582 static inline __attribute__((always_inline)) void le32_add_cpu(__le32 *var, u32 val)
584 *var = (( __le32)(__u32)((( __u32)(__le32)(*var)) + val));
586 static inline __attribute__((always_inline)) void le64_add_cpu(__le64 *var, u64 val)
588 *var = (( __le64)(__u64)((( __u64)(__le64)(*var)) + val));
590 static inline __attribute__((always_inline)) void be16_add_cpu(__be16 *var, u16 val)
592 *var = (( __be16)(__builtin_constant_p((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val))) ? ((__u16)( (((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val)) & (__u16)0x00ffU) << 8) | (((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val)) & (__u16)0xff00U) >> 8))) : __fswab16(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val))));
594 static inline __attribute__((always_inline)) void be32_add_cpu(__be32 *var, u32 val)
596 *var = (( __be32)(__builtin_constant_p((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val))) ? ((__u32)( (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x000000ffUL) << 24) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0xff000000UL) >> 24))) : __fswab32(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val))));
598 static inline __attribute__((always_inline)) void be64_add_cpu(__be64 *var, u64 val)
600 *var = (( __be64)(__builtin_constant_p((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val))) ? ((__u64)( (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val))));
602 static inline __attribute__((always_inline)) unsigned long find_next_zero_bit_le(const void *addr,
603 unsigned long size, unsigned long offset)
605 return find_next_zero_bit(addr, size, offset);
607 static inline __attribute__((always_inline)) unsigned long find_next_bit_le(const void *addr,
608 unsigned long size, unsigned long offset)
610 return find_next_bit(addr, size, offset);
612 static inline __attribute__((always_inline)) unsigned long find_first_zero_bit_le(const void *addr,
615 return find_first_zero_bit(addr, size);
617 static inline __attribute__((always_inline)) int test_bit_le(int nr, const void *addr)
619 return (__builtin_constant_p((nr ^ 0)) ? constant_test_bit((nr ^ 0), (addr)) : variable_test_bit((nr ^ 0), (addr)));
621 static inline __attribute__((always_inline)) void __set_bit_le(int nr, void *addr)
623 __set_bit(nr ^ 0, addr);
625 static inline __attribute__((always_inline)) void __clear_bit_le(int nr, void *addr)
627 __clear_bit(nr ^ 0, addr);
629 static inline __attribute__((always_inline)) int test_and_set_bit_le(int nr, void *addr)
631 return test_and_set_bit(nr ^ 0, addr);
633 static inline __attribute__((always_inline)) int test_and_clear_bit_le(int nr, void *addr)
635 return test_and_clear_bit(nr ^ 0, addr);
637 static inline __attribute__((always_inline)) int __test_and_set_bit_le(int nr, void *addr)
639 return __test_and_set_bit(nr ^ 0, addr);
641 static inline __attribute__((always_inline)) int __test_and_clear_bit_le(int nr, void *addr)
643 return __test_and_clear_bit(nr ^ 0, addr);
645 static __inline__ __attribute__((always_inline)) int get_bitmask_order(unsigned int count)
651 static __inline__ __attribute__((always_inline)) int get_count_order(unsigned int count)
654 order = fls(count) - 1;
655 if (__builtin_constant_p(((count & (count - 1)))) ? !!((count & (count - 1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitops.h", .line = 42, }; ______r = !!((count & (count - 1))); ______f.miss_hit[______r]++; ______r; }))
659 static inline __attribute__((always_inline)) unsigned long hweight_long(unsigned long w)
661 return sizeof(w) == 4 ? (__builtin_constant_p(w) ? ((( (!!((w) & (1ULL << 0))) + (!!((w) & (1ULL << 1))) + (!!((w) & (1ULL << 2))) + (!!((w) & (1ULL << 3))) + (!!((w) & (1ULL << 4))) + (!!((w) & (1ULL << 5))) + (!!((w) & (1ULL << 6))) + (!!((w) & (1ULL << 7))) ) + ( (!!(((w) >> 8) & (1ULL << 0))) + (!!(((w) >> 8) & (1ULL << 1))) + (!!(((w) >> 8) & (1ULL << 2))) + (!!(((w) >> 8) & (1ULL << 3))) + (!!(((w) >> 8) & (1ULL << 4))) + (!!(((w) >> 8) & (1ULL << 5))) + (!!(((w) >> 8) & (1ULL << 6))) + (!!(((w) >> 8) & (1ULL << 7))) )) + (( (!!(((w) >> 16) & (1ULL << 0))) + (!!(((w) >> 16) & (1ULL << 1))) + (!!(((w) >> 16) & (1ULL << 2))) + (!!(((w) >> 16) & (1ULL << 3))) + (!!(((w) >> 16) & (1ULL << 4))) + (!!(((w) >> 16) & (1ULL << 5))) + (!!(((w) >> 16) & (1ULL << 6))) + (!!(((w) >> 16) & (1ULL << 7))) ) + ( (!!((((w) >> 16) >> 8) & (1ULL << 0))) + (!!((((w) >> 16) >> 8) & (1ULL << 1))) + (!!((((w) >> 16) >> 8) & (1ULL << 2))) + (!!((((w) >> 16) >> 8) & (1ULL << 3))) + (!!((((w) >> 16) >> 8) & (1ULL << 4))) + (!!((((w) >> 16) >> 8) & (1ULL << 5))) + (!!((((w) >> 16) >> 8) & (1ULL << 6))) + (!!((((w) >> 16) >> 8) & (1ULL << 7))) ))) : __arch_hweight32(w)) : (__builtin_constant_p(w) ? (((( (!!((w) & (1ULL << 0))) + (!!((w) & (1ULL << 1))) + (!!((w) & (1ULL << 2))) + (!!((w) & (1ULL << 3))) + (!!((w) & (1ULL << 4))) + (!!((w) & (1ULL << 5))) + (!!((w) & (1ULL << 6))) + (!!((w) & (1ULL << 7))) ) + ( (!!(((w) >> 8) & (1ULL << 0))) + (!!(((w) >> 8) & (1ULL << 1))) + (!!(((w) >> 8) & (1ULL << 2))) + (!!(((w) >> 8) & (1ULL << 3))) + (!!(((w) >> 8) & (1ULL << 4))) + (!!(((w) >> 8) & (1ULL << 5))) + (!!(((w) >> 8) & (1ULL << 6))) + (!!(((w) >> 8) & (1ULL << 7))) )) + (( (!!(((w) >> 16) & (1ULL << 0))) + (!!(((w) >> 16) & (1ULL << 1))) + (!!(((w) >> 16) & (1ULL << 2))) + (!!(((w) >> 16) & (1ULL << 3))) + (!!(((w) >> 16) & (1ULL << 4))) + (!!(((w) >> 16) & (1ULL << 5))) + (!!(((w) >> 16) & (1ULL << 6))) + (!!(((w) >> 16) & (1ULL << 7))) ) + ( (!!((((w) >> 16) >> 8) & (1ULL << 0))) + (!!((((w) >> 16) >> 8) & (1ULL << 1))) + (!!((((w) >> 16) >> 8) & (1ULL << 2))) + (!!((((w) >> 16) >> 8) & (1ULL << 3))) + (!!((((w) >> 16) >> 8) & (1ULL << 4))) + (!!((((w) >> 16) >> 8) & (1ULL << 5))) + (!!((((w) >> 16) >> 8) & (1ULL << 6))) + (!!((((w) >> 16) >> 8) & (1ULL << 7))) ))) + ((( (!!(((w) >> 32) & (1ULL << 0))) + (!!(((w) >> 32) & (1ULL << 1))) + (!!(((w) >> 32) & (1ULL << 2))) + (!!(((w) >> 32) & (1ULL << 3))) + (!!(((w) >> 32) & (1ULL << 4))) + (!!(((w) >> 32) & (1ULL << 5))) + (!!(((w) >> 32) & (1ULL << 6))) + (!!(((w) >> 32) & (1ULL << 7))) ) + ( (!!((((w) >> 32) >> 8) & (1ULL << 0))) + (!!((((w) >> 32) >> 8) & (1ULL << 1))) + (!!((((w) >> 32) >> 8) & (1ULL << 2))) + (!!((((w) >> 32) >> 8) & (1ULL << 3))) + (!!((((w) >> 32) >> 8) & (1ULL << 4))) + (!!((((w) >> 32) >> 8) & (1ULL << 5))) + (!!((((w) >> 32) >> 8) & (1ULL << 6))) + (!!((((w) >> 32) >> 8) & (1ULL << 7))) )) + (( (!!((((w) >> 32) >> 16) & (1ULL << 0))) + (!!((((w) >> 32) >> 16) & (1ULL << 1))) + (!!((((w) >> 32) >> 16) & (1ULL << 2))) + (!!((((w) >> 32) >> 16) & (1ULL << 3))) + (!!((((w) >> 32) >> 16) & (1ULL << 4))) + (!!((((w) >> 32) >> 16) & (1ULL << 5))) + (!!((((w) >> 32) >> 16) & (1ULL << 6))) + (!!((((w) >> 32) >> 16) & (1ULL << 7))) ) + ( (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 0))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 1))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 2))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 3))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 4))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 5))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 6))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 7))) )))) : __arch_hweight64(w));
663 static inline __attribute__((always_inline)) __u32 rol32(__u32 word, unsigned int shift)
665 return (word << shift) | (word >> (32 - shift));
667 static inline __attribute__((always_inline)) __u32 ror32(__u32 word, unsigned int shift)
669 return (word >> shift) | (word << (32 - shift));
671 static inline __attribute__((always_inline)) __u16 rol16(__u16 word, unsigned int shift)
673 return (word << shift) | (word >> (16 - shift));
675 static inline __attribute__((always_inline)) __u16 ror16(__u16 word, unsigned int shift)
677 return (word >> shift) | (word << (16 - shift));
679 static inline __attribute__((always_inline)) __u8 rol8(__u8 word, unsigned int shift)
681 return (word << shift) | (word >> (8 - shift));
683 static inline __attribute__((always_inline)) __u8 ror8(__u8 word, unsigned int shift)
685 return (word >> shift) | (word << (8 - shift));
687 static inline __attribute__((always_inline)) __s32 sign_extend32(__u32 value, int index)
689 __u8 shift = 31 - index;
690 return (__s32)(value << shift) >> shift;
692 static inline __attribute__((always_inline)) unsigned fls_long(unsigned long l)
694 if (__builtin_constant_p(((sizeof(l) == 4))) ? !!((sizeof(l) == 4)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitops.h", .line = 125, }; ______r = !!((sizeof(l) == 4)); ______f.miss_hit[______r]++; ______r; }))
698 static inline __attribute__((always_inline)) unsigned long __ffs64(u64 word)
700 if (__builtin_constant_p(((((u32)word) == 0UL))) ? !!((((u32)word) == 0UL)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitops.h", .line = 141, }; ______r = !!((((u32)word) == 0UL)); ______f.miss_hit[______r]++; ______r; }))
701 return __ffs((u32)(word >> 32)) + 32;
702 return __ffs((unsigned long)word);
704 extern unsigned long find_last_bit(const unsigned long *addr,
706 extern unsigned int __VMALLOC_RESERVE;
707 extern int sysctl_legacy_va_layout;
708 extern void find_low_pfn_range(void);
709 extern void setup_bootmem_allocator(void);
710 extern int devmem_is_allowed(unsigned long pagenr);
711 extern unsigned long max_low_pfn_mapped;
712 extern unsigned long max_pfn_mapped;
713 static inline __attribute__((always_inline)) phys_addr_t get_max_mapped(void)
715 return (phys_addr_t)max_pfn_mapped << 12;
717 extern unsigned long init_memory_mapping(unsigned long start,
719 extern void initmem_init(void);
720 extern void free_initmem(void);
721 typedef __builtin_va_list __gnuc_va_list;
722 typedef __gnuc_va_list va_list;
723 extern char *strndup_user(const char *, long);
724 extern void *memdup_user(const void *, size_t);
725 extern char *strcpy(char *dest, const char *src);
726 extern char *strncpy(char *dest, const char *src, size_t count);
727 extern char *strcat(char *dest, const char *src);
728 extern char *strncat(char *dest, const char *src, size_t count);
729 extern int strcmp(const char *cs, const char *ct);
730 extern int strncmp(const char *cs, const char *ct, size_t count);
731 extern char *strchr(const char *s, int c);
732 extern size_t strlen(const char *s);
733 static inline __attribute__((always_inline)) __attribute__((always_inline)) void *__memcpy(void *to, const void *from, size_t n)
736 asm volatile("rep ; movsl\n\t"
742 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
743 : "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from)
747 static inline __attribute__((always_inline)) __attribute__((always_inline)) void *__constant_memcpy(void *to, const void *from,
751 if (__builtin_constant_p(((!n))) ? !!((!n)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/string_32.h", .line = 55, }; ______r = !!((!n)); ______f.miss_hit[______r]++; ______r; }))
755 *(char *)to = *(char *)from;
758 *(short *)to = *(short *)from;
761 *(int *)to = *(int *)from;
764 *(short *)to = *(short *)from;
765 *((char *)to + 2) = *((char *)from + 2);
768 *(int *)to = *(int *)from;
769 *((char *)to + 4) = *((char *)from + 4);
772 *(int *)to = *(int *)from;
773 *((short *)to + 2) = *((short *)from + 2);
776 *(int *)to = *(int *)from;
777 *((int *)to + 1) = *((int *)from + 1);
782 if (__builtin_constant_p(((n >= 5 * 4))) ? !!((n >= 5 * 4)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/string_32.h", .line = 88, }; ______r = !!((n >= 5 * 4)); ______f.miss_hit[______r]++; ______r; })) {
784 asm volatile("rep ; movsl"
785 : "=&c" (ecx), "=&D" (edi), "=&S" (esi)
786 : "0" (n / 4), "1" (edi), "2" (esi)
790 if (__builtin_constant_p(((n >= 4 * 4))) ? !!((n >= 4 * 4)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/string_32.h", .line = 98, }; ______r = !!((n >= 4 * 4)); ______f.miss_hit[______r]++; ______r; }))
792 : "=&D"(edi), "=&S"(esi)
795 if (__builtin_constant_p(((n >= 3 * 4))) ? !!((n >= 3 * 4)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/string_32.h", .line = 103, }; ______r = !!((n >= 3 * 4)); ______f.miss_hit[______r]++; ______r; }))
797 : "=&D"(edi), "=&S"(esi)
800 if (__builtin_constant_p(((n >= 2 * 4))) ? !!((n >= 2 * 4)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/string_32.h", .line = 108, }; ______r = !!((n >= 2 * 4)); ______f.miss_hit[______r]++; ______r; }))
802 : "=&D"(edi), "=&S"(esi)
805 if (__builtin_constant_p(((n >= 1 * 4))) ? !!((n >= 1 * 4)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/string_32.h", .line = 113, }; ______r = !!((n >= 1 * 4)); ______f.miss_hit[______r]++; ______r; }))
807 : "=&D"(edi), "=&S"(esi)
816 : "=&D"(edi), "=&S"(esi)
822 : "=&D"(edi), "=&S"(esi)
827 asm volatile("movsw\n\tmovsb"
828 : "=&D"(edi), "=&S"(esi)
834 void *memmove(void *dest, const void *src, size_t n);
835 extern void *memchr(const void *cs, int c, size_t count);
836 static inline __attribute__((always_inline)) void *__memset_generic(void *s, char c, size_t count)
839 asm volatile("rep\n\t"
841 : "=&c" (d0), "=&D" (d1)
842 : "a" (c), "1" (s), "0" (count)
846 static inline __attribute__((always_inline)) __attribute__((always_inline))
847 void *__constant_c_memset(void *s, unsigned long c, size_t count)
850 asm volatile("rep ; stosl\n\t"
854 "1:\ttestb $1,%b3\n\t"
858 : "=&c" (d0), "=&D" (d1)
859 : "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
863 extern size_t strnlen(const char *s, size_t count);
864 extern char *strstr(const char *cs, const char *ct);
865 static inline __attribute__((always_inline)) __attribute__((always_inline))
866 void *__constant_c_and_count_memset(void *s, unsigned long pattern,
873 *(unsigned char *)s = pattern & 0xff;
876 *(unsigned short *)s = pattern & 0xffff;
879 *(unsigned short *)s = pattern & 0xffff;
880 *((unsigned char *)s + 2) = pattern & 0xff;
883 *(unsigned long *)s = pattern;
888 unsigned long eax = pattern;
891 asm volatile("rep ; stosl" "" : "=&c" (d0), "=&D" (d1) : "a" (eax), "0" (count/4), "1" ((long)s) : "memory");
894 asm volatile("rep ; stosl" "\n\tstosb" : "=&c" (d0), "=&D" (d1) : "a" (eax), "0" (count/4), "1" ((long)s) : "memory");
897 asm volatile("rep ; stosl" "\n\tstosw" : "=&c" (d0), "=&D" (d1) : "a" (eax), "0" (count/4), "1" ((long)s) : "memory");
900 asm volatile("rep ; stosl" "\n\tstosw\n\tstosb" : "=&c" (d0), "=&D" (d1) : "a" (eax), "0" (count/4), "1" ((long)s) : "memory");
905 extern void *memscan(void *addr, int c, size_t size);
906 size_t strlcpy(char *, const char *, size_t);
907 extern size_t strlcat(char *, const char *, __kernel_size_t);
908 extern int strnicmp(const char *, const char *, __kernel_size_t);
909 extern int strcasecmp(const char *s1, const char *s2);
910 extern int strncasecmp(const char *s1, const char *s2, size_t n);
911 extern char * strnchr(const char *, size_t, int);
912 extern char * strrchr(const char *,int);
913 extern char * __attribute__((warn_unused_result)) skip_spaces(const char *);
914 extern char *strim(char *);
915 static inline __attribute__((always_inline)) __attribute__((warn_unused_result)) char *strstrip(char *str)
919 extern char * strnstr(const char *, const char *, size_t);
920 extern char * strpbrk(const char *,const char *);
921 extern char * strsep(char **,const char *);
922 extern __kernel_size_t strspn(const char *,const char *);
923 extern __kernel_size_t strcspn(const char *,const char *);
924 extern int __builtin_memcmp(const void *,const void *,__kernel_size_t);
925 extern char *kstrdup(const char *s, gfp_t gfp);
926 extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
927 extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
928 extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
929 extern void argv_free(char **argv);
930 extern bool sysfs_streq(const char *s1, const char *s2);
931 extern int strtobool(const char *s, bool *res);
932 int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
933 int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
934 int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __attribute__((format(printf,3,4)));
935 extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
936 const void *from, size_t available);
937 static inline __attribute__((always_inline)) bool strstarts(const char *str, const char *prefix)
939 return strncmp(str, prefix, strlen(prefix)) == 0;
941 static inline __attribute__((always_inline)) void clear_page(void *page)
943 __builtin_memset(page, 0, ((1UL) << 12));
945 static inline __attribute__((always_inline)) void copy_page(void *to, void *from)
947 __builtin_memcpy(to, from, ((1UL) << 12));
950 static inline __attribute__((always_inline)) void clear_user_page(void *page, unsigned long vaddr,
955 static inline __attribute__((always_inline)) void copy_user_page(void *to, void *from, unsigned long vaddr,
960 extern bool __virt_addr_valid(unsigned long kaddr);
961 static inline __attribute__((always_inline)) __attribute__((__const__)) int get_order(unsigned long size)
964 size = (size - 1) >> (12 - 1);
990 unsigned short cs, __csh;
993 unsigned short ss, __ssh;
994 unsigned short es, __esh;
995 unsigned short ds, __dsh;
996 unsigned short fs, __fsh;
997 unsigned short gs, __gsh;
999 struct revectored_struct {
1000 unsigned long __map[8];
1002 struct vm86_struct {
1003 struct vm86_regs regs;
1004 unsigned long flags;
1005 unsigned long screen_bitmap;
1006 unsigned long cpu_type;
1007 struct revectored_struct int_revectored;
1008 struct revectored_struct int21_revectored;
1010 struct vm86plus_info_struct {
1011 unsigned long force_return_for_pic:1;
1012 unsigned long vm86dbg_active:1;
1013 unsigned long vm86dbg_TFpendig:1;
1014 unsigned long unused:28;
1015 unsigned long is_vm86pus:1;
1016 unsigned char vm86dbg_intxxtab[32];
1018 struct vm86plus_struct {
1019 struct vm86_regs regs;
1020 unsigned long flags;
1021 unsigned long screen_bitmap;
1022 unsigned long cpu_type;
1023 struct revectored_struct int_revectored;
1024 struct revectored_struct int21_revectored;
1025 struct vm86plus_info_struct vm86plus;
1027 extern const char early_idt_handlers[32][10];
1040 unsigned long orig_ax;
1043 unsigned long flags;
1047 typedef int (*initcall_t)(void);
1048 typedef void (*exitcall_t)(void);
1049 extern initcall_t __con_initcall_start[], __con_initcall_end[];
1050 extern initcall_t __security_initcall_start[], __security_initcall_end[];
1051 typedef void (*ctor_fn_t)(void);
1052 extern int do_one_initcall(initcall_t fn);
1053 extern char __attribute__ ((__section__(".init.data"))) boot_command_line[];
1054 extern char *saved_command_line;
1055 extern unsigned int reset_devices;
1056 void setup_arch(char **);
1057 void prepare_namespace(void);
1058 extern void (*late_time_init)(void);
1059 extern int initcall_debug;
1062 extern unsigned long profile_pc(struct pt_regs *regs);
1063 extern unsigned long
1064 convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
1065 extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1066 int error_code, int si_code);
1067 void signal_fault(struct pt_regs *regs, void *frame, char *where);
1068 extern long syscall_trace_enter(struct pt_regs *);
1069 extern void syscall_trace_leave(struct pt_regs *);
1070 static inline __attribute__((always_inline)) unsigned long regs_return_value(struct pt_regs *regs)
1074 static inline __attribute__((always_inline)) int user_mode(struct pt_regs *regs)
1076 return (regs->cs & 0x3) == 0x3;
1078 static inline __attribute__((always_inline)) int user_mode_vm(struct pt_regs *regs)
1080 return ((regs->cs & 0x3) | (regs->flags & 0x00020000)) >=
1083 static inline __attribute__((always_inline)) int v8086_mode(struct pt_regs *regs)
1085 return (regs->flags & 0x00020000);
1087 static inline __attribute__((always_inline)) unsigned long kernel_stack_pointer(struct pt_regs *regs)
1089 return (unsigned long)(®s->sp);
1091 static inline __attribute__((always_inline)) unsigned long instruction_pointer(struct pt_regs *regs)
1093 return ((regs)->ip);
1095 static inline __attribute__((always_inline)) void instruction_pointer_set(struct pt_regs *regs,
1098 (((regs)->ip) = (val));
1100 static inline __attribute__((always_inline)) unsigned long user_stack_pointer(struct pt_regs *regs)
1102 return ((regs)->sp);
1104 static inline __attribute__((always_inline)) void user_stack_pointer_set(struct pt_regs *regs,
1107 (((regs)->sp) = (val));
1109 static inline __attribute__((always_inline)) unsigned long frame_pointer(struct pt_regs *regs)
1111 return ((regs)->bp);
1113 static inline __attribute__((always_inline)) void frame_pointer_set(struct pt_regs *regs,
1116 (((regs)->bp) = (val));
1118 extern int regs_query_register_offset(const char *name);
1119 extern const char *regs_query_register_name(unsigned int offset);
1120 static inline __attribute__((always_inline)) unsigned long regs_get_register(struct pt_regs *regs,
1121 unsigned int offset)
1123 if (__builtin_constant_p((((__builtin_constant_p(offset > (__builtin_offsetof(struct pt_regs,ss))) ? !!(offset > (__builtin_offsetof(struct pt_regs,ss))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/ptrace.h", .line = 229, }; ______r = __builtin_expect(!!(offset > (__builtin_offsetof(struct pt_regs,ss))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(offset > (__builtin_offsetof(struct pt_regs,ss))) ? !!(offset > (__builtin_offsetof(struct pt_regs,ss))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/ptrace.h", .line = 229, }; ______r = __builtin_expect(!!(offset > (__builtin_offsetof(struct pt_regs,ss))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/ptrace.h", .line = 229, }; ______r = !!(((__builtin_constant_p(offset > (__builtin_offsetof(struct pt_regs,ss))) ? !!(offset > (__builtin_offsetof(struct pt_regs,ss))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/ptrace.h", .line = 229, }; ______r = __builtin_expect(!!(offset > (__builtin_offsetof(struct pt_regs,ss))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
1125 return *(unsigned long *)((unsigned long)regs + offset);
1127 static inline __attribute__((always_inline)) int regs_within_kernel_stack(struct pt_regs *regs,
1130 return ((addr & ~((((1UL) << 12) << 1) - 1)) ==
1131 (kernel_stack_pointer(regs) & ~((((1UL) << 12) << 1) - 1)));
1133 static inline __attribute__((always_inline)) unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
1136 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
1138 if (__builtin_constant_p(((regs_within_kernel_stack(regs, (unsigned long)addr)))) ? !!((regs_within_kernel_stack(regs, (unsigned long)addr))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/ptrace.h", .line = 263, }; ______r = !!((regs_within_kernel_stack(regs, (unsigned long)addr))); ______f.miss_hit[______r]++; ______r; }))
1144 extern int do_get_thread_area(struct task_struct *p, int idx,
1145 struct user_desc *info);
1146 extern int do_set_thread_area(struct task_struct *p, int idx,
1147 struct user_desc *info, int can_allocate);
1148 struct kernel_vm86_regs {
1150 unsigned short es, __esh;
1151 unsigned short ds, __dsh;
1152 unsigned short fs, __fsh;
1153 unsigned short gs, __gsh;
1155 struct kernel_vm86_struct {
1156 struct kernel_vm86_regs regs;
1157 unsigned long flags;
1158 unsigned long screen_bitmap;
1159 unsigned long cpu_type;
1160 struct revectored_struct int_revectored;
1161 struct revectored_struct int21_revectored;
1162 struct vm86plus_info_struct vm86plus;
1163 struct pt_regs *regs32;
1165 void handle_vm86_fault(struct kernel_vm86_regs *, long);
1166 int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
1167 struct pt_regs *save_v86_state(struct kernel_vm86_regs *);
1169 void release_vm86_irqs(struct task_struct *);
1170 struct math_emu_info {
1173 struct pt_regs *regs;
1174 struct kernel_vm86_regs *vm86;
1177 struct _fpx_sw_bytes {
1179 __u32 extended_size;
1185 unsigned short significand[4];
1186 unsigned short exponent;
1189 unsigned short significand[4];
1190 unsigned short exponent;
1191 unsigned short padding[3];
1194 unsigned long element[4];
1200 unsigned long ipoff;
1201 unsigned long cssel;
1202 unsigned long dataoff;
1203 unsigned long datasel;
1204 struct _fpreg _st[8];
1205 unsigned short status;
1206 unsigned short magic;
1207 unsigned long _fxsr_env[6];
1208 unsigned long mxcsr;
1209 unsigned long reserved;
1210 struct _fpxreg _fxsr_st[8];
1211 struct _xmmreg _xmm[8];
1212 unsigned long padding1[44];
1214 unsigned long padding2[12];
1215 struct _fpx_sw_bytes sw_reserved;
1219 unsigned short gs, __gsh;
1220 unsigned short fs, __fsh;
1221 unsigned short es, __esh;
1222 unsigned short ds, __dsh;
1231 unsigned long trapno;
1234 unsigned short cs, __csh;
1235 unsigned long flags;
1236 unsigned long sp_at_signal;
1237 unsigned short ss, __ssh;
1239 unsigned long oldmask;
1247 struct _ymmh_state {
1248 __u32 ymmh_space[64];
1251 struct _fpstate fpstate;
1252 struct _xsave_hdr xstate_hdr;
1253 struct _ymmh_state ymmh;
1255 extern __attribute__((const, noreturn))
1256 int ____ilog2_NaN(void);
1257 static inline __attribute__((always_inline)) __attribute__((const))
1258 int __ilog2_u32(u32 n)
1262 static inline __attribute__((always_inline)) __attribute__((const))
1263 int __ilog2_u64(u64 n)
1265 return fls64(n) - 1;
1267 static inline __attribute__((always_inline)) __attribute__((const))
1268 bool is_power_of_2(unsigned long n)
1270 return (n != 0 && ((n & (n - 1)) == 0));
1272 static inline __attribute__((always_inline)) __attribute__((const))
1273 unsigned long __roundup_pow_of_two(unsigned long n)
1275 return 1UL << fls_long(n - 1);
1277 static inline __attribute__((always_inline)) __attribute__((const))
1278 unsigned long __rounddown_pow_of_two(unsigned long n)
1280 return 1UL << (fls_long(n) - 1);
1282 extern const char linux_banner[];
1283 extern const char linux_proc_banner[];
1284 extern int console_printk[];
1285 static inline __attribute__((always_inline)) void console_silent(void)
1287 (console_printk[0]) = 0;
1289 static inline __attribute__((always_inline)) void console_verbose(void)
1291 if (__builtin_constant_p((((console_printk[0])))) ? !!(((console_printk[0]))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/printk.h", .line = 41, }; ______r = !!(((console_printk[0]))); ______f.miss_hit[______r]++; ______r; }))
1292 (console_printk[0]) = 15;
1298 static inline __attribute__((always_inline)) __attribute__ ((format (printf, 1, 2)))
1299 int no_printk(const char *fmt, ...)
1303 extern __attribute__((regparm(0))) __attribute__ ((format (printf, 1, 2)))
1304 void early_printk(const char *fmt, ...);
1305 extern int printk_needs_cpu(int cpu);
1306 extern void printk_tick(void);
1307 __attribute__((regparm(0))) __attribute__ ((format (printf, 1, 0)))
1308 int vprintk(const char *fmt, va_list args);
1309 __attribute__((regparm(0))) __attribute__ ((format (printf, 1, 2))) __attribute__((__cold__))
1310 int printk(const char *fmt, ...);
1311 extern int __printk_ratelimit(const char *func);
1312 extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
1313 unsigned int interval_msec);
1314 extern int printk_delay_msec;
1315 extern int dmesg_restrict;
1316 extern int kptr_restrict;
1317 void log_buf_kexec_setup(void);
1318 void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) setup_log_buf(int early);
1319 extern void dump_stack(void) __attribute__((__cold__));
1322 DUMP_PREFIX_ADDRESS,
1325 extern void hex_dump_to_buffer(const void *buf, size_t len,
1326 int rowsize, int groupsize,
1327 char *linebuf, size_t linebuflen, bool ascii);
1328 extern void print_hex_dump(const char *level, const char *prefix_str,
1329 int prefix_type, int rowsize, int groupsize,
1330 const void *buf, size_t len, bool ascii);
1331 extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
1332 const void *buf, size_t len);
1333 extern long long dynamic_debug_enabled;
1334 extern long long dynamic_debug_enabled2;
1336 const char *modname;
1337 const char *function;
1338 const char *filename;
1340 unsigned int lineno:24;
1341 unsigned int flags:8;
1343 } __attribute__((aligned(8)));
1344 int ddebug_add_module(struct _ddebug *tab, unsigned int n,
1345 const char *modname);
1346 static inline __attribute__((always_inline)) int ddebug_remove_module(const char *mod)
1351 unsigned long bug_addr;
1353 unsigned short line;
1354 unsigned short flags;
1356 extern void warn_slowpath_fmt(const char *file, const int line,
1357 const char *fmt, ...) __attribute__((format(printf, 3, 4)));
1358 extern void warn_slowpath_fmt_taint(const char *file, const int line,
1359 unsigned taint, const char *fmt, ...)
1360 __attribute__((format(printf, 4, 5)));
1361 extern void warn_slowpath_null(const char *file, const int line);
1362 static inline __attribute__((always_inline)) u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
1371 if (__builtin_constant_p(((upper >= divisor))) ? !!((upper >= divisor)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/div64.h", .line = 46, }; ______r = !!((upper >= divisor)); ______f.miss_hit[______r]++; ______r; })) {
1372 d.v32[1] = upper / divisor;
1375 asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) :
1376 "rm" (divisor), "0" (d.v32[0]), "1" (upper));
1382 void __might_sleep(const char *file, int line, int preempt_offset);
1383 void might_fault(void);
1384 extern struct atomic_notifier_head panic_notifier_list;
1385 extern long (*panic_blink)(int state);
1386 void panic(const char * fmt, ...)
1387 __attribute__ ((noreturn, format (printf, 1, 2))) __attribute__((__cold__));
1388 extern void oops_enter(void);
1389 extern void oops_exit(void);
1390 void print_oops_end_marker(void);
1391 extern int oops_may_print(void);
1392 void do_exit(long error_code)
1393 __attribute__((noreturn));
1394 void complete_and_exit(struct completion *, long)
1395 __attribute__((noreturn));
1396 int __attribute__((warn_unused_result)) _kstrtoul(const char *s, unsigned int base, unsigned long *res);
1397 int __attribute__((warn_unused_result)) _kstrtol(const char *s, unsigned int base, long *res);
1398 int __attribute__((warn_unused_result)) kstrtoull(const char *s, unsigned int base, unsigned long long *res);
1399 int __attribute__((warn_unused_result)) kstrtoll(const char *s, unsigned int base, long long *res);
1400 static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtoul(const char *s, unsigned int base, unsigned long *res)
1402 if (__builtin_constant_p(((sizeof(unsigned long) == sizeof(unsigned long long) && __alignof__(unsigned long) == __alignof__(unsigned long long)))) ? !!((sizeof(unsigned long) == sizeof(unsigned long long) && __alignof__(unsigned long) == __alignof__(unsigned long long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
1403 "include/linux/kernel.h"
1406 , }; ______r = !!((sizeof(unsigned long) == sizeof(unsigned long long) && __alignof__(unsigned long) == __alignof__(unsigned long long))); ______f.miss_hit[______r]++; ______r; }))
1407 return kstrtoull(s, base, (unsigned long long *)res);
1409 return _kstrtoul(s, base, res);
1411 static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtol(const char *s, unsigned int base, long *res)
1413 if (__builtin_constant_p(((sizeof(long) == sizeof(long long) && __alignof__(long) == __alignof__(long long)))) ? !!((sizeof(long) == sizeof(long long) && __alignof__(long) == __alignof__(long long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
1414 "include/linux/kernel.h"
1417 , }; ______r = !!((sizeof(long) == sizeof(long long) && __alignof__(long) == __alignof__(long long))); ______f.miss_hit[______r]++; ______r; }))
1418 return kstrtoll(s, base, (long long *)res);
1420 return _kstrtol(s, base, res);
1422 int __attribute__((warn_unused_result)) kstrtouint(const char *s, unsigned int base, unsigned int *res);
1423 int __attribute__((warn_unused_result)) kstrtoint(const char *s, unsigned int base, int *res);
1424 static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtou64(const char *s, unsigned int base, u64 *res)
1426 return kstrtoull(s, base, res);
1428 static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtos64(const char *s, unsigned int base, s64 *res)
1430 return kstrtoll(s, base, res);
1432 static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtou32(const char *s, unsigned int base, u32 *res)
1434 return kstrtouint(s, base, res);
1436 static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtos32(const char *s, unsigned int base, s32 *res)
1438 return kstrtoint(s, base, res);
1440 int __attribute__((warn_unused_result)) kstrtou16(const char *s, unsigned int base, u16 *res);
1441 int __attribute__((warn_unused_result)) kstrtos16(const char *s, unsigned int base, s16 *res);
1442 int __attribute__((warn_unused_result)) kstrtou8(const char *s, unsigned int base, u8 *res);
1443 int __attribute__((warn_unused_result)) kstrtos8(const char *s, unsigned int base, s8 *res);
1444 int __attribute__((warn_unused_result)) kstrtoull_from_user(const char *s, size_t count, unsigned int base, unsigned long long *res);
1445 int __attribute__((warn_unused_result)) kstrtoll_from_user(const char *s, size_t count, unsigned int base, long long *res);
1446 int __attribute__((warn_unused_result)) kstrtoul_from_user(const char *s, size_t count, unsigned int base, unsigned long *res);
1447 int __attribute__((warn_unused_result)) kstrtol_from_user(const char *s, size_t count, unsigned int base, long *res);
1448 int __attribute__((warn_unused_result)) kstrtouint_from_user(const char *s, size_t count, unsigned int base, unsigned int *res);
1449 int __attribute__((warn_unused_result)) kstrtoint_from_user(const char *s, size_t count, unsigned int base, int *res);
1450 int __attribute__((warn_unused_result)) kstrtou16_from_user(const char *s, size_t count, unsigned int base, u16 *res);
1451 int __attribute__((warn_unused_result)) kstrtos16_from_user(const char *s, size_t count, unsigned int base, s16 *res);
1452 int __attribute__((warn_unused_result)) kstrtou8_from_user(const char *s, size_t count, unsigned int base, u8 *res);
1453 int __attribute__((warn_unused_result)) kstrtos8_from_user(const char *s, size_t count, unsigned int base, s8 *res);
1454 static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtou64_from_user(const char *s, size_t count, unsigned int base, u64 *res)
1456 return kstrtoull_from_user(s, count, base, res);
1458 static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtos64_from_user(const char *s, size_t count, unsigned int base, s64 *res)
1460 return kstrtoll_from_user(s, count, base, res);
1462 static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtou32_from_user(const char *s, size_t count, unsigned int base, u32 *res)
1464 return kstrtouint_from_user(s, count, base, res);
1466 static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtos32_from_user(const char *s, size_t count, unsigned int base, s32 *res)
1468 return kstrtoint_from_user(s, count, base, res);
1470 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
1471 extern long simple_strtol(const char *,char **,unsigned int);
1472 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
1473 extern long long simple_strtoll(const char *,char **,unsigned int);
1474 extern int sprintf(char * buf, const char * fmt, ...)
1475 __attribute__ ((format (printf, 2, 3)));
1476 extern int vsprintf(char *buf, const char *, va_list)
1477 __attribute__ ((format (printf, 2, 0)));
1478 extern int snprintf(char * buf, size_t size, const char * fmt, ...)
1479 __attribute__ ((format (printf, 3, 4)));
1480 extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1481 __attribute__ ((format (printf, 3, 0)));
1482 extern int scnprintf(char * buf, size_t size, const char * fmt, ...)
1483 __attribute__ ((format (printf, 3, 4)));
1484 extern int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
1485 __attribute__ ((format (printf, 3, 0)));
1486 extern char *kasprintf(gfp_t gfp, const char *fmt, ...)
1487 __attribute__ ((format (printf, 2, 3)));
1488 extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
1489 extern int sscanf(const char *, const char *, ...)
1490 __attribute__ ((format (scanf, 2, 3)));
1491 extern int vsscanf(const char *, const char *, va_list)
1492 __attribute__ ((format (scanf, 2, 0)));
1493 extern int get_option(char **str, int *pint);
1494 extern char *get_options(const char *str, int nints, int *ints);
1495 extern unsigned long long memparse(const char *ptr, char **retptr);
1496 extern int core_kernel_text(unsigned long addr);
1497 extern int core_kernel_data(unsigned long addr);
1498 extern int __kernel_text_address(unsigned long addr);
1499 extern int kernel_text_address(unsigned long addr);
1500 extern int func_ptr_is_kernel_text(void *ptr);
1502 extern struct pid *session_of_pgrp(struct pid *pgrp);
1503 unsigned long int_sqrt(unsigned long);
1504 extern void bust_spinlocks(int yes);
1505 extern void wake_up_klogd(void);
1506 extern int oops_in_progress;
1507 extern int panic_timeout;
1508 extern int panic_on_oops;
1509 extern int panic_on_unrecovered_nmi;
1510 extern int panic_on_io_nmi;
1511 extern const char *print_tainted(void);
1512 extern void add_taint(unsigned flag);
1513 extern int test_taint(unsigned flag);
1514 extern unsigned long get_taint(void);
1515 extern int root_mountflags;
1516 extern bool early_boot_irqs_disabled;
1517 extern enum system_states {
1523 SYSTEM_SUSPEND_DISK,
1525 extern const char hex_asc[];
1526 static inline __attribute__((always_inline)) char *pack_hex_byte(char *buf, u8 byte)
1528 *buf++ = hex_asc[((byte) & 0xf0) >> 4];
1529 *buf++ = hex_asc[((byte) & 0x0f)];
1532 extern int hex_to_bin(char ch);
1533 extern void hex2bin(u8 *dst, const char *src, size_t count);
1534 void tracing_on(void);
1535 void tracing_off(void);
1536 void tracing_off_permanent(void);
1537 int tracing_is_on(void);
1538 enum ftrace_dump_mode {
1543 extern void tracing_start(void);
1544 extern void tracing_stop(void);
1545 extern void ftrace_off_permanent(void);
1546 static inline __attribute__((always_inline)) void __attribute__ ((format (printf, 1, 2)))
1547 ____trace_printk_check_format(const char *fmt, ...)
1551 __trace_bprintk(unsigned long ip, const char *fmt, ...)
1552 __attribute__ ((format (printf, 2, 3)));
1554 __trace_printk(unsigned long ip, const char *fmt, ...)
1555 __attribute__ ((format (printf, 2, 3)));
1556 extern void trace_dump_stack(void);
1558 __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
1560 __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
1561 extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
1563 extern int do_sysinfo(struct sysinfo *info);
1566 unsigned long loads[3];
1567 unsigned long totalram;
1568 unsigned long freeram;
1569 unsigned long sharedram;
1570 unsigned long bufferram;
1571 unsigned long totalswap;
1572 unsigned long freeswap;
1573 unsigned short procs;
1575 unsigned long totalhigh;
1576 unsigned long freehigh;
1577 unsigned int mem_unit;
1578 char _f[20-2*sizeof(long)-sizeof(int)];
1580 extern int __build_bug_on_failed;
1581 extern void __bad_percpu_size(void);
1582 static inline __attribute__((always_inline)) __attribute__((always_inline)) int x86_this_cpu_constant_test_bit(unsigned int nr,
1583 const unsigned long *addr)
1585 unsigned long *a = (unsigned long *)addr + nr / 32;
1586 return ((1UL << (nr % 32)) & ({ typeof(*a) pfo_ret__; switch (sizeof(*a)) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m" (*a)); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m" (*a)); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m" (*a)); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m" (*a)); break; default: __bad_percpu_size(); } pfo_ret__; })) != 0;
1588 static inline __attribute__((always_inline)) int x86_this_cpu_variable_test_bit(int nr,
1589 const unsigned long *addr)
1592 asm volatile("bt ""%%""fs"":" "%P" "2"",%1\n\t"
1595 : "m" (*(unsigned long *)addr), "Ir" (nr));
1598 extern unsigned long __per_cpu_offset[8];
1599 extern void setup_per_cpu_areas(void);
1600 extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned long) this_cpu_off;
1602 extern __attribute__((section(".data..percpu" ""))) __typeof__(struct task_struct *) current_task;
1603 static inline __attribute__((always_inline)) __attribute__((always_inline)) struct task_struct *get_current(void)
1605 return ({ typeof(current_task) pfo_ret__; switch (sizeof(current_task)) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "p" (&(current_task))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "p" (&(current_task))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "p" (&(current_task))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "p" (&(current_task))); break; default: __bad_percpu_size(); } pfo_ret__; });
1607 extern void __xchg_wrong_size(void);
1608 static inline __attribute__((always_inline)) void set_64bit(volatile u64 *ptr, u64 value)
1611 u32 high = value >> 32;
1613 asm volatile("\n1:\t"
1614 ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchg8b %0\n\t"
1616 : "=m" (*ptr), "+A" (prev)
1617 : "b" (low), "c" (high)
1620 extern void __cmpxchg_wrong_size(void);
1621 static inline __attribute__((always_inline)) u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
1624 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchg8b %1"
1628 "c" ((u32)(new >> 32)),
1633 static inline __attribute__((always_inline)) u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
1636 asm volatile("cmpxchg8b %1"
1640 "c" ((u32)(new >> 32)),
1645 extern const unsigned char * const *ideal_nops;
1646 extern void arch_init_ideal_nops(void);
1647 static inline __attribute__((always_inline)) unsigned long native_save_fl(void)
1649 unsigned long flags;
1650 asm volatile("# __raw_save_flags\n\t"
1657 static inline __attribute__((always_inline)) void native_restore_fl(unsigned long flags)
1659 asm volatile("push %0 ; popf"
1664 static inline __attribute__((always_inline)) void native_irq_disable(void)
1666 asm volatile("cli": : :"memory");
1668 static inline __attribute__((always_inline)) void native_irq_enable(void)
1670 asm volatile("sti": : :"memory");
1672 static inline __attribute__((always_inline)) void native_safe_halt(void)
1674 asm volatile("sti; hlt": : :"memory");
1676 static inline __attribute__((always_inline)) void native_halt(void)
1678 asm volatile("hlt": : :"memory");
1680 typedef u64 pteval_t;
1681 typedef u64 pmdval_t;
1682 typedef u64 pudval_t;
1683 typedef u64 pgdval_t;
1684 typedef u64 pgprotval_t;
1687 unsigned long pte_low, pte_high;
1691 extern bool __vmalloc_start_set;
1692 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
1693 typedef struct { pgdval_t pgd; } pgd_t;
1694 static inline __attribute__((always_inline)) pgd_t native_make_pgd(pgdval_t val)
1696 return (pgd_t) { val };
1698 static inline __attribute__((always_inline)) pgdval_t native_pgd_val(pgd_t pgd)
1702 static inline __attribute__((always_inline)) pgdval_t pgd_flags(pgd_t pgd)
1704 return native_pgd_val(pgd) & (~((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))));
1706 typedef struct { pgd_t pgd; } pud_t;
1707 static inline __attribute__((always_inline)) int pgd_none(pgd_t pgd) { return 0; }
1708 static inline __attribute__((always_inline)) int pgd_bad(pgd_t pgd) { return 0; }
1709 static inline __attribute__((always_inline)) int pgd_present(pgd_t pgd) { return 1; }
1710 static inline __attribute__((always_inline)) void pgd_clear(pgd_t *pgd) { }
1711 static inline __attribute__((always_inline)) pud_t * pud_offset(pgd_t * pgd, unsigned long address)
1713 return (pud_t *)pgd;
1715 static inline __attribute__((always_inline)) pudval_t native_pud_val(pud_t pud)
1717 return native_pgd_val(pud.pgd);
1719 typedef struct { pmdval_t pmd; } pmd_t;
1720 static inline __attribute__((always_inline)) pmd_t native_make_pmd(pmdval_t val)
1722 return (pmd_t) { val };
1724 static inline __attribute__((always_inline)) pmdval_t native_pmd_val(pmd_t pmd)
1728 static inline __attribute__((always_inline)) pudval_t pud_flags(pud_t pud)
1730 return native_pud_val(pud) & (~((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))));
1732 static inline __attribute__((always_inline)) pmdval_t pmd_flags(pmd_t pmd)
1734 return native_pmd_val(pmd) & (~((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))));
1736 static inline __attribute__((always_inline)) pte_t native_make_pte(pteval_t val)
1738 return (pte_t) { .pte = val };
1740 static inline __attribute__((always_inline)) pteval_t native_pte_val(pte_t pte)
1744 static inline __attribute__((always_inline)) pteval_t pte_flags(pte_t pte)
1746 return native_pte_val(pte) & (~((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))));
1748 typedef struct page *pgtable_t;
1749 extern pteval_t __supported_pte_mask;
1750 extern void set_nx(void);
1751 extern int nx_enabled;
1752 extern pgprot_t pgprot_writecombine(pgprot_t prot);
1754 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
1755 unsigned long size, pgprot_t vma_prot);
1756 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
1757 unsigned long size, pgprot_t *vma_prot);
1758 void set_pte_vaddr(unsigned long vaddr, pte_t pte);
1759 extern void native_pagetable_reserve(u64 start, u64 end);
1760 extern void native_pagetable_setup_start(pgd_t *base);
1761 extern void native_pagetable_setup_done(pgd_t *base);
1763 extern void arch_report_meminfo(struct seq_file *m);
1771 extern void update_page_count(int level, unsigned long pages);
1772 extern pte_t *lookup_address(unsigned long address, unsigned int *level);
1773 struct desc_struct {
1782 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
1783 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
1786 } __attribute__((packed));
1788 GATE_INTERRUPT = 0xE,
1793 struct gate_struct64 {
1796 unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
1800 } __attribute__((packed));
1806 struct ldttss_desc64 {
1809 unsigned base1 : 8, type : 5, dpl : 2, p : 1;
1810 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
1813 } __attribute__((packed));
1814 typedef struct desc_struct gate_desc;
1815 typedef struct desc_struct ldt_desc;
1816 typedef struct desc_struct tss_desc;
1818 unsigned short size;
1819 unsigned long address;
1820 } __attribute__((packed)) ;
1824 KM_SKB_DATA_SOFTIRQ,
1845 struct thread_struct;
1852 struct paravirt_callee_save {
1856 unsigned int kernel_rpl;
1857 int shared_kernel_pmd;
1858 int paravirt_enabled;
1861 struct pv_init_ops {
1862 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
1863 unsigned long addr, unsigned len);
1865 struct pv_lazy_ops {
1866 void (*enter)(void);
1867 void (*leave)(void);
1869 struct pv_time_ops {
1870 unsigned long long (*sched_clock)(void);
1871 unsigned long (*get_tsc_khz)(void);
1874 unsigned long (*get_debugreg)(int regno);
1875 void (*set_debugreg)(int regno, unsigned long value);
1877 unsigned long (*read_cr0)(void);
1878 void (*write_cr0)(unsigned long);
1879 unsigned long (*read_cr4_safe)(void);
1880 unsigned long (*read_cr4)(void);
1881 void (*write_cr4)(unsigned long);
1882 void (*load_tr_desc)(void);
1883 void (*load_gdt)(const struct desc_ptr *);
1884 void (*load_idt)(const struct desc_ptr *);
1885 void (*store_gdt)(struct desc_ptr *);
1886 void (*store_idt)(struct desc_ptr *);
1887 void (*set_ldt)(const void *desc, unsigned entries);
1888 unsigned long (*store_tr)(void);
1889 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
1890 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
1892 void (*write_gdt_entry)(struct desc_struct *,
1893 int entrynum, const void *desc, int size);
1894 void (*write_idt_entry)(gate_desc *,
1895 int entrynum, const gate_desc *gate);
1896 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
1897 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
1898 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
1899 void (*set_iopl_mask)(unsigned mask);
1900 void (*wbinvd)(void);
1901 void (*io_delay)(void);
1902 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
1903 unsigned int *ecx, unsigned int *edx);
1904 u64 (*read_msr)(unsigned int msr, int *err);
1905 int (*rdmsr_regs)(u32 *regs);
1906 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
1907 int (*wrmsr_regs)(u32 *regs);
1908 u64 (*read_tsc)(void);
1909 u64 (*read_pmc)(int counter);
1910 unsigned long long (*read_tscp)(unsigned int *aux);
1911 void (*irq_enable_sysexit)(void);
1912 void (*usergs_sysret64)(void);
1913 void (*usergs_sysret32)(void);
1915 void (*swapgs)(void);
1916 void (*start_context_switch)(struct task_struct *prev);
1917 void (*end_context_switch)(struct task_struct *next);
1920 struct paravirt_callee_save save_fl;
1921 struct paravirt_callee_save restore_fl;
1922 struct paravirt_callee_save irq_disable;
1923 struct paravirt_callee_save irq_enable;
1924 void (*safe_halt)(void);
1927 struct pv_apic_ops {
1928 void (*startup_ipi_hook)(int phys_apicid,
1929 unsigned long start_eip,
1930 unsigned long start_esp);
1933 unsigned long (*read_cr2)(void);
1934 void (*write_cr2)(unsigned long);
1935 unsigned long (*read_cr3)(void);
1936 void (*write_cr3)(unsigned long);
1937 void (*activate_mm)(struct mm_struct *prev,
1938 struct mm_struct *next);
1939 void (*dup_mmap)(struct mm_struct *oldmm,
1940 struct mm_struct *mm);
1941 void (*exit_mmap)(struct mm_struct *mm);
1942 void (*flush_tlb_user)(void);
1943 void (*flush_tlb_kernel)(void);
1944 void (*flush_tlb_single)(unsigned long addr);
1945 void (*flush_tlb_others)(const struct cpumask *cpus,
1946 struct mm_struct *mm,
1948 int (*pgd_alloc)(struct mm_struct *mm);
1949 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
1950 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
1951 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
1952 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
1953 void (*release_pte)(unsigned long pfn);
1954 void (*release_pmd)(unsigned long pfn);
1955 void (*release_pud)(unsigned long pfn);
1956 void (*set_pte)(pte_t *ptep, pte_t pteval);
1957 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
1958 pte_t *ptep, pte_t pteval);
1959 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
1960 void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr,
1961 pmd_t *pmdp, pmd_t pmdval);
1962 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
1964 void (*pte_update_defer)(struct mm_struct *mm,
1965 unsigned long addr, pte_t *ptep);
1966 void (*pmd_update)(struct mm_struct *mm, unsigned long addr,
1968 void (*pmd_update_defer)(struct mm_struct *mm,
1969 unsigned long addr, pmd_t *pmdp);
1970 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
1972 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
1973 pte_t *ptep, pte_t pte);
1974 struct paravirt_callee_save pte_val;
1975 struct paravirt_callee_save make_pte;
1976 struct paravirt_callee_save pgd_val;
1977 struct paravirt_callee_save make_pgd;
1978 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
1979 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
1981 void (*pmd_clear)(pmd_t *pmdp);
1982 void (*set_pud)(pud_t *pudp, pud_t pudval);
1983 struct paravirt_callee_save pmd_val;
1984 struct paravirt_callee_save make_pmd;
1985 struct pv_lazy_ops lazy_mode;
1986 void (*set_fixmap)(unsigned idx,
1987 phys_addr_t phys, pgprot_t flags);
1989 struct arch_spinlock;
1990 struct pv_lock_ops {
1991 int (*spin_is_locked)(struct arch_spinlock *lock);
1992 int (*spin_is_contended)(struct arch_spinlock *lock);
1993 void (*spin_lock)(struct arch_spinlock *lock);
1994 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
1995 int (*spin_trylock)(struct arch_spinlock *lock);
1996 void (*spin_unlock)(struct arch_spinlock *lock);
1998 struct paravirt_patch_template {
1999 struct pv_init_ops pv_init_ops;
2000 struct pv_time_ops pv_time_ops;
2001 struct pv_cpu_ops pv_cpu_ops;
2002 struct pv_irq_ops pv_irq_ops;
2003 struct pv_apic_ops pv_apic_ops;
2004 struct pv_mmu_ops pv_mmu_ops;
2005 struct pv_lock_ops pv_lock_ops;
2007 extern struct pv_info pv_info;
2008 extern struct pv_init_ops pv_init_ops;
2009 extern struct pv_time_ops pv_time_ops;
2010 extern struct pv_cpu_ops pv_cpu_ops;
2011 extern struct pv_irq_ops pv_irq_ops;
2012 extern struct pv_apic_ops pv_apic_ops;
2013 extern struct pv_mmu_ops pv_mmu_ops;
2014 extern struct pv_lock_ops pv_lock_ops;
2015 unsigned paravirt_patch_nop(void);
2016 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
2017 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
2018 unsigned paravirt_patch_ignore(unsigned len);
2019 unsigned paravirt_patch_call(void *insnbuf,
2020 const void *target, u16 tgt_clobbers,
2021 unsigned long addr, u16 site_clobbers,
2023 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
2024 unsigned long addr, unsigned len);
2025 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
2026 unsigned long addr, unsigned len);
2027 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
2028 const char *start, const char *end);
2029 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
2030 unsigned long addr, unsigned len);
2031 int paravirt_disable_iospace(void);
2032 enum paravirt_lazy_mode {
2037 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
2038 void paravirt_start_context_switch(struct task_struct *prev);
2039 void paravirt_end_context_switch(struct task_struct *next);
2040 void paravirt_enter_lazy_mmu(void);
2041 void paravirt_leave_lazy_mmu(void);
2042 void _paravirt_nop(void);
2043 u32 _paravirt_ident_32(u32);
2044 u64 _paravirt_ident_64(u64);
2045 struct paravirt_patch_site {
2051 extern struct paravirt_patch_site __parainstructions[],
2052 __parainstructions_end[];
2053 extern int __bitmap_empty(const unsigned long *bitmap, int bits);
2054 extern int __bitmap_full(const unsigned long *bitmap, int bits);
2055 extern int __bitmap_equal(const unsigned long *bitmap1,
2056 const unsigned long *bitmap2, int bits);
2057 extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
2059 extern void __bitmap_shift_right(unsigned long *dst,
2060 const unsigned long *src, int shift, int bits);
2061 extern void __bitmap_shift_left(unsigned long *dst,
2062 const unsigned long *src, int shift, int bits);
2063 extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
2064 const unsigned long *bitmap2, int bits);
2065 extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
2066 const unsigned long *bitmap2, int bits);
2067 extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
2068 const unsigned long *bitmap2, int bits);
2069 extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
2070 const unsigned long *bitmap2, int bits);
2071 extern int __bitmap_intersects(const unsigned long *bitmap1,
2072 const unsigned long *bitmap2, int bits);
2073 extern int __bitmap_subset(const unsigned long *bitmap1,
2074 const unsigned long *bitmap2, int bits);
2075 extern int __bitmap_weight(const unsigned long *bitmap, int bits);
2076 extern void bitmap_set(unsigned long *map, int i, int len);
2077 extern void bitmap_clear(unsigned long *map, int start, int nr);
2078 extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
2080 unsigned long start,
2082 unsigned long align_mask);
2083 extern int bitmap_scnprintf(char *buf, unsigned int len,
2084 const unsigned long *src, int nbits);
2085 extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
2086 unsigned long *dst, int nbits);
2087 extern int bitmap_parse_user(const char *ubuf, unsigned int ulen,
2088 unsigned long *dst, int nbits);
2089 extern int bitmap_scnlistprintf(char *buf, unsigned int len,
2090 const unsigned long *src, int nbits);
2091 extern int bitmap_parselist(const char *buf, unsigned long *maskp,
2093 extern int bitmap_parselist_user(const char *ubuf, unsigned int ulen,
2094 unsigned long *dst, int nbits);
2095 extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
2096 const unsigned long *old, const unsigned long *new, int bits);
2097 extern int bitmap_bitremap(int oldbit,
2098 const unsigned long *old, const unsigned long *new, int bits);
2099 extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
2100 const unsigned long *relmap, int bits);
2101 extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
2103 extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
2104 extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
2105 extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
2106 extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
2107 static inline __attribute__((always_inline)) void bitmap_zero(unsigned long *dst, int nbits)
2109 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 159, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2112 int len = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(unsigned long);
2113 __builtin_memset(dst, 0, len);
2116 static inline __attribute__((always_inline)) void bitmap_fill(unsigned long *dst, int nbits)
2118 size_t nlongs = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)));
2119 if (__builtin_constant_p(((!(__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!((!(__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 170, }; ______r = !!((!(__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; })) {
2120 int len = (nlongs - 1) * sizeof(unsigned long);
2121 __builtin_memset(dst, 0xff, len);
2123 dst[nlongs - 1] = ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL );
2125 static inline __attribute__((always_inline)) void bitmap_copy(unsigned long *dst, const unsigned long *src,
2128 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 180, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2131 int len = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(unsigned long);
2132 __builtin_memcpy(dst, src, len);
2135 static inline __attribute__((always_inline)) int bitmap_and(unsigned long *dst, const unsigned long *src1,
2136 const unsigned long *src2, int nbits)
2138 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 191, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2139 return (*dst = *src1 & *src2) != 0;
2140 return __bitmap_and(dst, src1, src2, nbits);
2142 static inline __attribute__((always_inline)) void bitmap_or(unsigned long *dst, const unsigned long *src1,
2143 const unsigned long *src2, int nbits)
2145 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 199, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2146 *dst = *src1 | *src2;
2148 __bitmap_or(dst, src1, src2, nbits);
2150 static inline __attribute__((always_inline)) void bitmap_xor(unsigned long *dst, const unsigned long *src1,
2151 const unsigned long *src2, int nbits)
2153 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 208, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2154 *dst = *src1 ^ *src2;
2156 __bitmap_xor(dst, src1, src2, nbits);
2158 static inline __attribute__((always_inline)) int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
2159 const unsigned long *src2, int nbits)
2161 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 217, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2162 return (*dst = *src1 & ~(*src2)) != 0;
2163 return __bitmap_andnot(dst, src1, src2, nbits);
2165 static inline __attribute__((always_inline)) void bitmap_complement(unsigned long *dst, const unsigned long *src,
2168 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 225, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2169 *dst = ~(*src) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL );
2171 __bitmap_complement(dst, src, nbits);
2173 static inline __attribute__((always_inline)) int bitmap_equal(const unsigned long *src1,
2174 const unsigned long *src2, int nbits)
2176 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 234, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2177 return ! ((*src1 ^ *src2) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
2179 return __bitmap_equal(src1, src2, nbits);
2181 static inline __attribute__((always_inline)) int bitmap_intersects(const unsigned long *src1,
2182 const unsigned long *src2, int nbits)
2184 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 243, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2185 return ((*src1 & *src2) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )) != 0;
2187 return __bitmap_intersects(src1, src2, nbits);
2189 static inline __attribute__((always_inline)) int bitmap_subset(const unsigned long *src1,
2190 const unsigned long *src2, int nbits)
2192 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 252, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2193 return ! ((*src1 & ~(*src2)) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
2195 return __bitmap_subset(src1, src2, nbits);
2197 static inline __attribute__((always_inline)) int bitmap_empty(const unsigned long *src, int nbits)
2199 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 260, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2200 return ! (*src & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
2202 return __bitmap_empty(src, nbits);
2204 static inline __attribute__((always_inline)) int bitmap_full(const unsigned long *src, int nbits)
2206 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 268, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2207 return ! (~(*src) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
2209 return __bitmap_full(src, nbits);
2211 static inline __attribute__((always_inline)) int bitmap_weight(const unsigned long *src, int nbits)
2213 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 276, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2214 return hweight_long(*src & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
2215 return __bitmap_weight(src, nbits);
2217 static inline __attribute__((always_inline)) void bitmap_shift_right(unsigned long *dst,
2218 const unsigned long *src, int n, int nbits)
2220 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 284, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2223 __bitmap_shift_right(dst, src, n, nbits);
2225 static inline __attribute__((always_inline)) void bitmap_shift_left(unsigned long *dst,
2226 const unsigned long *src, int n, int nbits)
2228 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 293, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2229 *dst = (*src << n) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL );
2231 __bitmap_shift_left(dst, src, n, nbits);
2233 static inline __attribute__((always_inline)) int bitmap_parse(const char *buf, unsigned int buflen,
2234 unsigned long *maskp, int nmaskbits)
2236 return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
2238 typedef struct cpumask { unsigned long bits[(((8) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; } cpumask_t;
2239 extern int nr_cpu_ids;
2240 extern const struct cpumask *const cpu_possible_mask;
2241 extern const struct cpumask *const cpu_online_mask;
2242 extern const struct cpumask *const cpu_present_mask;
2243 extern const struct cpumask *const cpu_active_mask;
2244 static inline __attribute__((always_inline)) unsigned int cpumask_check(unsigned int cpu)
2248 static inline __attribute__((always_inline)) unsigned int cpumask_first(const struct cpumask *srcp)
2250 return find_first_bit(((srcp)->bits), 8);
2252 static inline __attribute__((always_inline)) unsigned int cpumask_next(int n, const struct cpumask *srcp)
2254 if (__builtin_constant_p(((n != -1))) ? !!((n != -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/cpumask.h", .line = 172, }; ______r = !!((n != -1)); ______f.miss_hit[______r]++; ______r; }))
2256 return find_next_bit(((srcp)->bits), 8, n+1);
2258 static inline __attribute__((always_inline)) unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
2260 if (__builtin_constant_p(((n != -1))) ? !!((n != -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/cpumask.h", .line = 187, }; ______r = !!((n != -1)); ______f.miss_hit[______r]++; ______r; }))
2262 return find_next_zero_bit(((srcp)->bits), 8, n+1);
2264 int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
2265 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
2266 static inline __attribute__((always_inline)) void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
2268 set_bit(cpumask_check(cpu), ((dstp)->bits));
2270 static inline __attribute__((always_inline)) void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
2272 clear_bit(cpumask_check(cpu), ((dstp)->bits));
2274 static inline __attribute__((always_inline)) int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
2276 return test_and_set_bit(cpumask_check(cpu), ((cpumask)->bits));
2278 static inline __attribute__((always_inline)) int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
2280 return test_and_clear_bit(cpumask_check(cpu), ((cpumask)->bits));
2282 static inline __attribute__((always_inline)) void cpumask_setall(struct cpumask *dstp)
2284 bitmap_fill(((dstp)->bits), 8);
2286 static inline __attribute__((always_inline)) void cpumask_clear(struct cpumask *dstp)
2288 bitmap_zero(((dstp)->bits), 8);
2290 static inline __attribute__((always_inline)) int cpumask_and(struct cpumask *dstp,
2291 const struct cpumask *src1p,
2292 const struct cpumask *src2p)
2294 return bitmap_and(((dstp)->bits), ((src1p)->bits),
2295 ((src2p)->bits), 8);
2297 static inline __attribute__((always_inline)) void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
2298 const struct cpumask *src2p)
2300 bitmap_or(((dstp)->bits), ((src1p)->bits),
2301 ((src2p)->bits), 8);
2303 static inline __attribute__((always_inline)) void cpumask_xor(struct cpumask *dstp,
2304 const struct cpumask *src1p,
2305 const struct cpumask *src2p)
2307 bitmap_xor(((dstp)->bits), ((src1p)->bits),
2308 ((src2p)->bits), 8);
2310 static inline __attribute__((always_inline)) int cpumask_andnot(struct cpumask *dstp,
2311 const struct cpumask *src1p,
2312 const struct cpumask *src2p)
2314 return bitmap_andnot(((dstp)->bits), ((src1p)->bits),
2315 ((src2p)->bits), 8);
2317 static inline __attribute__((always_inline)) void cpumask_complement(struct cpumask *dstp,
2318 const struct cpumask *srcp)
2320 bitmap_complement(((dstp)->bits), ((srcp)->bits),
2323 static inline __attribute__((always_inline)) bool cpumask_equal(const struct cpumask *src1p,
2324 const struct cpumask *src2p)
2326 return bitmap_equal(((src1p)->bits), ((src2p)->bits),
2329 static inline __attribute__((always_inline)) bool cpumask_intersects(const struct cpumask *src1p,
2330 const struct cpumask *src2p)
2332 return bitmap_intersects(((src1p)->bits), ((src2p)->bits),
2335 static inline __attribute__((always_inline)) int cpumask_subset(const struct cpumask *src1p,
2336 const struct cpumask *src2p)
2338 return bitmap_subset(((src1p)->bits), ((src2p)->bits),
2341 static inline __attribute__((always_inline)) bool cpumask_empty(const struct cpumask *srcp)
2343 return bitmap_empty(((srcp)->bits), 8);
2345 static inline __attribute__((always_inline)) bool cpumask_full(const struct cpumask *srcp)
2347 return bitmap_full(((srcp)->bits), 8);
2349 static inline __attribute__((always_inline)) unsigned int cpumask_weight(const struct cpumask *srcp)
2351 return bitmap_weight(((srcp)->bits), 8);
2353 static inline __attribute__((always_inline)) void cpumask_shift_right(struct cpumask *dstp,
2354 const struct cpumask *srcp, int n)
2356 bitmap_shift_right(((dstp)->bits), ((srcp)->bits), n,
2359 static inline __attribute__((always_inline)) void cpumask_shift_left(struct cpumask *dstp,
2360 const struct cpumask *srcp, int n)
2362 bitmap_shift_left(((dstp)->bits), ((srcp)->bits), n,
2365 static inline __attribute__((always_inline)) void cpumask_copy(struct cpumask *dstp,
2366 const struct cpumask *srcp)
2368 bitmap_copy(((dstp)->bits), ((srcp)->bits), 8);
2370 static inline __attribute__((always_inline)) int cpumask_scnprintf(char *buf, int len,
2371 const struct cpumask *srcp)
2373 return bitmap_scnprintf(buf, len, ((srcp)->bits), 8);
2375 static inline __attribute__((always_inline)) int cpumask_parse_user(const char *buf, int len,
2376 struct cpumask *dstp)
2378 return bitmap_parse_user(buf, len, ((dstp)->bits), 8);
2380 static inline __attribute__((always_inline)) int cpumask_parselist_user(const char *buf, int len,
2381 struct cpumask *dstp)
2383 return bitmap_parselist_user(buf, len, ((dstp)->bits),
2386 static inline __attribute__((always_inline)) int cpulist_scnprintf(char *buf, int len,
2387 const struct cpumask *srcp)
2389 return bitmap_scnlistprintf(buf, len, ((srcp)->bits),
2392 static inline __attribute__((always_inline)) int cpulist_parse(const char *buf, struct cpumask *dstp)
2394 return bitmap_parselist(buf, ((dstp)->bits), 8);
2396 static inline __attribute__((always_inline)) size_t cpumask_size(void)
2398 return (((8) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(long);
2400 typedef struct cpumask cpumask_var_t[1];
2401 static inline __attribute__((always_inline)) bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
2405 static inline __attribute__((always_inline)) bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
2410 static inline __attribute__((always_inline)) bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
2412 cpumask_clear(*mask);
2415 static inline __attribute__((always_inline)) bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
2418 cpumask_clear(*mask);
2421 static inline __attribute__((always_inline)) void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
2424 static inline __attribute__((always_inline)) void free_cpumask_var(cpumask_var_t mask)
2427 static inline __attribute__((always_inline)) void free_bootmem_cpumask_var(cpumask_var_t mask)
2430 extern const unsigned long cpu_all_bits[(((8) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))];
2431 void set_cpu_possible(unsigned int cpu, bool possible);
2432 void set_cpu_present(unsigned int cpu, bool present);
2433 void set_cpu_online(unsigned int cpu, bool online);
2434 void set_cpu_active(unsigned int cpu, bool active);
2435 void init_cpu_present(const struct cpumask *src);
2436 void init_cpu_possible(const struct cpumask *src);
2437 void init_cpu_online(const struct cpumask *src);
2438 static inline __attribute__((always_inline)) int __check_is_bitmap(const unsigned long *bitmap)
2442 extern const unsigned long
2443 cpu_bit_bitmap[32 +1][(((8) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))];
2444 static inline __attribute__((always_inline)) const struct cpumask *get_cpu_mask(unsigned int cpu)
2446 const unsigned long *p = cpu_bit_bitmap[1 + cpu % 32];
2448 return ((struct cpumask *)(1 ? (p) : (void *)sizeof(__check_is_bitmap(p))));
2450 int __first_cpu(const cpumask_t *srcp);
2451 int __next_cpu(int n, const cpumask_t *srcp);
2452 int __any_online_cpu(const cpumask_t *mask);
2453 static inline __attribute__((always_inline)) void __cpu_set(int cpu, volatile cpumask_t *dstp)
2455 set_bit(cpu, dstp->bits);
2457 static inline __attribute__((always_inline)) void __cpu_clear(int cpu, volatile cpumask_t *dstp)
2459 clear_bit(cpu, dstp->bits);
2461 static inline __attribute__((always_inline)) void __cpus_setall(cpumask_t *dstp, int nbits)
2463 bitmap_fill(dstp->bits, nbits);
2465 static inline __attribute__((always_inline)) void __cpus_clear(cpumask_t *dstp, int nbits)
2467 bitmap_zero(dstp->bits, nbits);
2469 static inline __attribute__((always_inline)) int __cpu_test_and_set(int cpu, cpumask_t *addr)
2471 return test_and_set_bit(cpu, addr->bits);
2473 static inline __attribute__((always_inline)) int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
2474 const cpumask_t *src2p, int nbits)
2476 return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
2478 static inline __attribute__((always_inline)) void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
2479 const cpumask_t *src2p, int nbits)
2481 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
2483 static inline __attribute__((always_inline)) void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
2484 const cpumask_t *src2p, int nbits)
2486 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
2488 static inline __attribute__((always_inline)) int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
2489 const cpumask_t *src2p, int nbits)
2491 return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
2493 static inline __attribute__((always_inline)) int __cpus_equal(const cpumask_t *src1p,
2494 const cpumask_t *src2p, int nbits)
2496 return bitmap_equal(src1p->bits, src2p->bits, nbits);
2498 static inline __attribute__((always_inline)) int __cpus_intersects(const cpumask_t *src1p,
2499 const cpumask_t *src2p, int nbits)
2501 return bitmap_intersects(src1p->bits, src2p->bits, nbits);
2503 static inline __attribute__((always_inline)) int __cpus_subset(const cpumask_t *src1p,
2504 const cpumask_t *src2p, int nbits)
2506 return bitmap_subset(src1p->bits, src2p->bits, nbits);
2508 static inline __attribute__((always_inline)) int __cpus_empty(const cpumask_t *srcp, int nbits)
2510 return bitmap_empty(srcp->bits, nbits);
2512 static inline __attribute__((always_inline)) int __cpus_weight(const cpumask_t *srcp, int nbits)
2514 return bitmap_weight(srcp->bits, nbits);
2516 static inline __attribute__((always_inline)) void __cpus_shift_left(cpumask_t *dstp,
2517 const cpumask_t *srcp, int n, int nbits)
2519 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
2521 static inline __attribute__((always_inline)) int paravirt_enabled(void)
2523 return pv_info.paravirt_enabled;
2525 static inline __attribute__((always_inline)) void load_sp0(struct tss_struct *tss,
2526 struct thread_struct *thread)
2528 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.load_sp0); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.load_sp0) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.load_sp0)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(tss)), "d" ((unsigned long)(thread)) : "memory", "cc" ); });
2530 static inline __attribute__((always_inline)) void __cpuid(unsigned int *eax, unsigned int *ebx,
2531 unsigned int *ecx, unsigned int *edx)
2533 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.cpuid); asm volatile("push %[_arg4];" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "lea 4(%%esp),%%esp;" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.cpuid) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.cpuid)), [paravirt_clobber] "i" (((1 << 4) - 1)), "0" ((u32)(eax)), "1" ((u32)(ebx)), "2" ((u32)(ecx)), [_arg4] "mr" ((u32)(edx)) : "memory", "cc" ); });
2535 static inline __attribute__((always_inline)) unsigned long paravirt_get_debugreg(int reg)
2537 return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.get_debugreg); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 39, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.get_debugreg) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.get_debugreg)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(reg)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.get_debugreg) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.get_debugreg)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(reg)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; });
2539 static inline __attribute__((always_inline)) void set_debugreg(unsigned long val, int reg)
2541 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.set_debugreg); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.set_debugreg) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.set_debugreg)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(reg)), "d" ((unsigned long)(val)) : "memory", "cc" ); });
2543 static inline __attribute__((always_inline)) void clts(void)
2545 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.clts); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.clts) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.clts)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); });
2547 static inline __attribute__((always_inline)) unsigned long read_cr0(void)
2549 return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_cr0); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 54, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_cr0) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_cr0)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_cr0) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_cr0)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; });
2551 static inline __attribute__((always_inline)) void write_cr0(unsigned long x)
2553 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.write_cr0); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_cr0) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_cr0)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(x)) : "memory", "cc" ); });
2555 static inline __attribute__((always_inline)) unsigned long read_cr2(void)
2557 return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.read_cr2); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 64, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.read_cr2) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.read_cr2)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.read_cr2) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.read_cr2)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; });
2559 static inline __attribute__((always_inline)) void write_cr2(unsigned long x)
2561 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.write_cr2); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.write_cr2) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.write_cr2)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(x)) : "memory", "cc" ); });
2563 static inline __attribute__((always_inline)) unsigned long read_cr3(void)
2565 return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.read_cr3); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 74, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.read_cr3) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.read_cr3)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.read_cr3) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.read_cr3)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; });
2567 static inline __attribute__((always_inline)) void write_cr3(unsigned long x)
2569 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.write_cr3); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.write_cr3) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.write_cr3)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(x)) : "memory", "cc" ); });
2571 static inline __attribute__((always_inline)) unsigned long read_cr4(void)
2573 return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_cr4); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 84, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_cr4) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_cr4)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_cr4) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_cr4)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; });
2575 static inline __attribute__((always_inline)) unsigned long read_cr4_safe(void)
2577 return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_cr4_safe); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 88, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_cr4_safe) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_cr4_safe)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_cr4_safe) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_cr4_safe)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; });
2579 static inline __attribute__((always_inline)) void write_cr4(unsigned long x)
2581 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.write_cr4); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_cr4) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_cr4)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(x)) : "memory", "cc" ); });
2583 static inline __attribute__((always_inline)) void arch_safe_halt(void)
2585 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_irq_ops.safe_halt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.safe_halt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.safe_halt)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); });
2587 static inline __attribute__((always_inline)) void halt(void)
2589 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_irq_ops.halt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.halt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.halt)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); });
2591 static inline __attribute__((always_inline)) void wbinvd(void)
2593 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.wbinvd); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.wbinvd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.wbinvd)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); });
2595 static inline __attribute__((always_inline)) u64 paravirt_read_msr(unsigned msr, int *err)
2597 return ({ u64 __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_msr); if (__builtin_constant_p(((sizeof(u64) > sizeof(unsigned long)))) ? !!((sizeof(u64) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 127, }; ______r = !!((sizeof(u64) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_msr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_msr)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(msr)), "d" ((unsigned long)(err)) : "memory", "cc" ); __ret = (u64)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_msr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_msr)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(msr)), "d" ((unsigned long)(err)) : "memory", "cc" ); __ret = (u64)__eax; } __ret; });
2599 static inline __attribute__((always_inline)) int paravirt_rdmsr_regs(u32 *regs)
2601 return ({ int __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.rdmsr_regs); if (__builtin_constant_p(((sizeof(int) > sizeof(unsigned long)))) ? !!((sizeof(int) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 132, }; ______r = !!((sizeof(int) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.rdmsr_regs) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.rdmsr_regs)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(regs)) : "memory", "cc" ); __ret = (int)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.rdmsr_regs) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.rdmsr_regs)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(regs)) : "memory", "cc" ); __ret = (int)__eax; } __ret; });
2603 static inline __attribute__((always_inline)) int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
2605 return ({ int __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.write_msr); if (__builtin_constant_p(((sizeof(int) > sizeof(unsigned long)))) ? !!((sizeof(int) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 137, }; ______r = !!((sizeof(int) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_msr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_msr)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(msr)), "d" ((unsigned long)(low)), "c" ((unsigned long)(high)) : "memory", "cc" ); __ret = (int)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_msr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_msr)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(msr)), "d" ((unsigned long)(low)), "c" ((unsigned long)(high)) : "memory", "cc" ); __ret = (int)__eax; } __ret; });
2607 static inline __attribute__((always_inline)) int paravirt_wrmsr_regs(u32 *regs)
2609 return ({ int __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.wrmsr_regs); if (__builtin_constant_p(((sizeof(int) > sizeof(unsigned long)))) ? !!((sizeof(int) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 142, }; ______r = !!((sizeof(int) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.wrmsr_regs) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.wrmsr_regs)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(regs)) : "memory", "cc" ); __ret = (int)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.wrmsr_regs) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.wrmsr_regs)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(regs)) : "memory", "cc" ); __ret = (int)__eax; } __ret; });
2611 static inline __attribute__((always_inline)) int rdmsrl_safe(unsigned msr, unsigned long long *p)
2614 *p = paravirt_read_msr(msr, &err);
2617 static inline __attribute__((always_inline)) int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
2619 u32 gprs[8] = { 0 };
2622 gprs[7] = 0x9c5a203a;
2623 err = paravirt_rdmsr_regs(gprs);
2624 *p = gprs[0] | ((u64)gprs[2] << 32);
2627 static inline __attribute__((always_inline)) int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
2629 u32 gprs[8] = { 0 };
2632 gprs[2] = val >> 32;
2633 gprs[7] = 0x9c5a203a;
2634 return paravirt_wrmsr_regs(gprs);
2636 static inline __attribute__((always_inline)) u64 paravirt_read_tsc(void)
2638 return ({ u64 __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_tsc); if (__builtin_constant_p(((sizeof(u64) > sizeof(unsigned long)))) ? !!((sizeof(u64) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 217, }; ______r = !!((sizeof(u64) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_tsc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_tsc)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (u64)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_tsc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_tsc)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (u64)__eax; } __ret; });
2640 static inline __attribute__((always_inline)) unsigned long long paravirt_sched_clock(void)
2642 return ({ unsigned long long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_time_ops.sched_clock); if (__builtin_constant_p(((sizeof(unsigned long long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 230, }; ______r = !!((sizeof(unsigned long long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_time_ops.sched_clock) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_time_ops.sched_clock)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_time_ops.sched_clock) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_time_ops.sched_clock)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long long)__eax; } __ret; });
2644 static inline __attribute__((always_inline)) unsigned long long paravirt_read_pmc(int counter)
2646 return ({ u64 __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_pmc); if (__builtin_constant_p(((sizeof(u64) > sizeof(unsigned long)))) ? !!((sizeof(u64) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 235, }; ______r = !!((sizeof(u64) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_pmc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_pmc)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(counter)) : "memory", "cc" ); __ret = (u64)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_pmc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_pmc)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(counter)) : "memory", "cc" ); __ret = (u64)__eax; } __ret; });
2648 static inline __attribute__((always_inline)) unsigned long long paravirt_rdtscp(unsigned int *aux)
2650 return ({ u64 __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_tscp); if (__builtin_constant_p(((sizeof(u64) > sizeof(unsigned long)))) ? !!((sizeof(u64) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 247, }; ______r = !!((sizeof(u64) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_tscp) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_tscp)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(aux)) : "memory", "cc" ); __ret = (u64)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_tscp) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_tscp)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(aux)) : "memory", "cc" ); __ret = (u64)__eax; } __ret; });
2652 static inline __attribute__((always_inline)) void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
2654 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.alloc_ldt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.alloc_ldt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.alloc_ldt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(ldt)), "d" ((unsigned long)(entries)) : "memory", "cc" ); });
2656 static inline __attribute__((always_inline)) void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
2658 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.free_ldt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.free_ldt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.free_ldt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(ldt)), "d" ((unsigned long)(entries)) : "memory", "cc" ); });
2660 static inline __attribute__((always_inline)) void load_TR_desc(void)
2662 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.load_tr_desc); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.load_tr_desc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.load_tr_desc)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); });
2664 static inline __attribute__((always_inline)) void load_gdt(const struct desc_ptr *dtr)
2666 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.load_gdt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.load_gdt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.load_gdt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(dtr)) : "memory", "cc" ); });
2668 static inline __attribute__((always_inline)) void load_idt(const struct desc_ptr *dtr)
2670 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.load_idt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.load_idt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.load_idt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(dtr)) : "memory", "cc" ); });
2672 static inline __attribute__((always_inline)) void set_ldt(const void *addr, unsigned entries)
2674 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.set_ldt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.set_ldt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.set_ldt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(addr)), "d" ((unsigned long)(entries)) : "memory", "cc" ); });
2676 static inline __attribute__((always_inline)) void store_gdt(struct desc_ptr *dtr)
2678 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.store_gdt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.store_gdt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.store_gdt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(dtr)) : "memory", "cc" ); });
2680 static inline __attribute__((always_inline)) void store_idt(struct desc_ptr *dtr)
2682 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.store_idt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.store_idt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.store_idt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(dtr)) : "memory", "cc" ); });
2684 static inline __attribute__((always_inline)) unsigned long paravirt_store_tr(void)
2686 return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.store_tr); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 302, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.store_tr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.store_tr)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.store_tr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.store_tr)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; });
2688 static inline __attribute__((always_inline)) void load_TLS(struct thread_struct *t, unsigned cpu)
2690 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.load_tls); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.load_tls) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.load_tls)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(t)), "d" ((unsigned long)(cpu)) : "memory", "cc" ); });
2692 static inline __attribute__((always_inline)) void write_ldt_entry(struct desc_struct *dt, int entry,
2695 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.write_ldt_entry); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_ldt_entry) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_ldt_entry)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(dt)), "d" ((unsigned long)(entry)), "c" ((unsigned long)(desc)) : "memory", "cc" ); });
2697 static inline __attribute__((always_inline)) void write_gdt_entry(struct desc_struct *dt, int entry,
2698 void *desc, int type)
2700 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.write_gdt_entry); asm volatile("push %[_arg4];" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "lea 4(%%esp),%%esp;" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_gdt_entry) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_gdt_entry)), [paravirt_clobber] "i" (((1 << 4) - 1)), "0" ((u32)(dt)), "1" ((u32)(entry)), "2" ((u32)(desc)), [_arg4] "mr" ((u32)(type)) : "memory", "cc" ); });
2702 static inline __attribute__((always_inline)) void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
2704 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.write_idt_entry); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_idt_entry) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_idt_entry)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(dt)), "d" ((unsigned long)(entry)), "c" ((unsigned long)(g)) : "memory", "cc" ); });
2706 static inline __attribute__((always_inline)) void set_iopl_mask(unsigned mask)
2708 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.set_iopl_mask); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.set_iopl_mask) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.set_iopl_mask)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mask)) : "memory", "cc" ); });
2710 static inline __attribute__((always_inline)) void slow_down_io(void)
2712 pv_cpu_ops.io_delay();
2714 static inline __attribute__((always_inline)) void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
2715 unsigned long start_esp)
2717 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_apic_ops.startup_ipi_hook); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_apic_ops.startup_ipi_hook) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_apic_ops.startup_ipi_hook)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(phys_apicid)), "d" ((unsigned long)(start_eip)), "c" ((unsigned long)(start_esp)) : "memory", "cc" ); })
2720 static inline __attribute__((always_inline)) void paravirt_activate_mm(struct mm_struct *prev,
2721 struct mm_struct *next)
2723 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.activate_mm); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.activate_mm) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.activate_mm)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(prev)), "d" ((unsigned long)(next)) : "memory", "cc" ); });
2725 static inline __attribute__((always_inline)) void arch_dup_mmap(struct mm_struct *oldmm,
2726 struct mm_struct *mm)
2728 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.dup_mmap); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.dup_mmap) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.dup_mmap)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(oldmm)), "d" ((unsigned long)(mm)) : "memory", "cc" ); });
2730 static inline __attribute__((always_inline)) void arch_exit_mmap(struct mm_struct *mm)
2732 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.exit_mmap); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.exit_mmap) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.exit_mmap)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)) : "memory", "cc" ); });
2734 static inline __attribute__((always_inline)) void __flush_tlb(void)
2736 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.flush_tlb_user); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.flush_tlb_user) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.flush_tlb_user)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); });
2738 static inline __attribute__((always_inline)) void __flush_tlb_global(void)
2740 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.flush_tlb_kernel); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.flush_tlb_kernel) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.flush_tlb_kernel)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); });
2742 static inline __attribute__((always_inline)) void __flush_tlb_single(unsigned long addr)
2744 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.flush_tlb_single); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.flush_tlb_single) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.flush_tlb_single)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(addr)) : "memory", "cc" ); });
2746 static inline __attribute__((always_inline)) void flush_tlb_others(const struct cpumask *cpumask,
2747 struct mm_struct *mm,
2750 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.flush_tlb_others); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.flush_tlb_others) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.flush_tlb_others)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(cpumask)), "d" ((unsigned long)(mm)), "c" ((unsigned long)(va)) : "memory", "cc" ); });
2752 static inline __attribute__((always_inline)) int paravirt_pgd_alloc(struct mm_struct *mm)
2754 return ({ int __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pgd_alloc); if (__builtin_constant_p(((sizeof(int) > sizeof(unsigned long)))) ? !!((sizeof(int) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 397, }; ______r = !!((sizeof(int) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_alloc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_alloc)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)) : "memory", "cc" ); __ret = (int)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_alloc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_alloc)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)) : "memory", "cc" ); __ret = (int)__eax; } __ret; });
2756 static inline __attribute__((always_inline)) void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
2758 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pgd_free); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_free) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_free)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(pgd)) : "memory", "cc" ); });
2760 static inline __attribute__((always_inline)) void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
2762 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.alloc_pte); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.alloc_pte) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.alloc_pte)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(pfn)) : "memory", "cc" ); });
2764 static inline __attribute__((always_inline)) void paravirt_release_pte(unsigned long pfn)
2766 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.release_pte); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.release_pte) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.release_pte)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pfn)) : "memory", "cc" ); });
2768 static inline __attribute__((always_inline)) void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
2770 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.alloc_pmd); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.alloc_pmd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.alloc_pmd)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(pfn)) : "memory", "cc" ); });
2772 static inline __attribute__((always_inline)) void paravirt_release_pmd(unsigned long pfn)
2774 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.release_pmd); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.release_pmd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.release_pmd)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pfn)) : "memory", "cc" ); });
2776 static inline __attribute__((always_inline)) void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
2778 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.alloc_pud); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.alloc_pud) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.alloc_pud)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(pfn)) : "memory", "cc" ); });
2780 static inline __attribute__((always_inline)) void paravirt_release_pud(unsigned long pfn)
2782 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.release_pud); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.release_pud) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.release_pud)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pfn)) : "memory", "cc" ); });
2784 static inline __attribute__((always_inline)) void pte_update(struct mm_struct *mm, unsigned long addr,
2787 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pte_update); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_update) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_update)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(ptep)) : "memory", "cc" ); });
2789 static inline __attribute__((always_inline)) void pmd_update(struct mm_struct *mm, unsigned long addr,
2792 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pmd_update); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_update) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_update)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(pmdp)) : "memory", "cc" ); });
2794 static inline __attribute__((always_inline)) void pte_update_defer(struct mm_struct *mm, unsigned long addr,
2797 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pte_update_defer); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_update_defer) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_update_defer)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(ptep)) : "memory", "cc" ); });
2799 static inline __attribute__((always_inline)) void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
2802 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pmd_update_defer); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_update_defer) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_update_defer)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(pmdp)) : "memory", "cc" ); });
2804 static inline __attribute__((always_inline)) pte_t __pte(pteval_t val)
2807 if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(long)))) ? !!((sizeof(pteval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 460, }; ______r = !!((sizeof(pteval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2808 ret = ({ pteval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.make_pte.func); if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(unsigned long)))) ? !!((sizeof(pteval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2809 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2812 , }; ______r = !!((sizeof(pteval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pte.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pte.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pte.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pte.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pteval_t)__eax; } __ret; })
2815 ret = ({ pteval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.make_pte.func); if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(unsigned long)))) ? !!((sizeof(pteval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2816 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2819 , }; ______r = !!((sizeof(pteval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pte.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pte.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pte.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pte.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pteval_t)__eax; } __ret; })
2821 return (pte_t) { .pte = ret };
2823 static inline __attribute__((always_inline)) pteval_t pte_val(pte_t pte)
2826 if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(long)))) ? !!((sizeof(pteval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 476, }; ______r = !!((sizeof(pteval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2827 ret = ({ pteval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pte_val.func); if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(unsigned long)))) ? !!((sizeof(pteval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2828 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2831 , }; ______r = !!((sizeof(pteval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pte.pte)), "d" ((unsigned long)((u64)pte.pte >> 32)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pte.pte)), "d" ((unsigned long)((u64)pte.pte >> 32)) : "memory", "cc" ); __ret = (pteval_t)__eax; } __ret; })
2834 ret = ({ pteval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pte_val.func); if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(unsigned long)))) ? !!((sizeof(pteval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2835 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2838 , }; ______r = !!((sizeof(pteval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pte.pte)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pte.pte)) : "memory", "cc" ); __ret = (pteval_t)__eax; } __ret; })
2842 static inline __attribute__((always_inline)) pgd_t __pgd(pgdval_t val)
2845 if (__builtin_constant_p(((sizeof(pgdval_t) > sizeof(long)))) ? !!((sizeof(pgdval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 490, }; ______r = !!((sizeof(pgdval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2846 ret = ({ pgdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.make_pgd.func); if (__builtin_constant_p(((sizeof(pgdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pgdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2847 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2850 , }; ______r = !!((sizeof(pgdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pgd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pgd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pgdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pgd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pgd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pgdval_t)__eax; } __ret; })
2853 ret = ({ pgdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.make_pgd.func); if (__builtin_constant_p(((sizeof(pgdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pgdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2854 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2857 , }; ______r = !!((sizeof(pgdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pgd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pgd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pgdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pgd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pgd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pgdval_t)__eax; } __ret; })
2859 return (pgd_t) { ret };
2861 static inline __attribute__((always_inline)) pgdval_t pgd_val(pgd_t pgd)
2864 if (__builtin_constant_p(((sizeof(pgdval_t) > sizeof(long)))) ? !!((sizeof(pgdval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 504, }; ______r = !!((sizeof(pgdval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2865 ret = ({ pgdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pgd_val.func); if (__builtin_constant_p(((sizeof(pgdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pgdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2866 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2869 , }; ______r = !!((sizeof(pgdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pgd.pgd)), "d" ((unsigned long)((u64)pgd.pgd >> 32)) : "memory", "cc" ); __ret = (pgdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pgd.pgd)), "d" ((unsigned long)((u64)pgd.pgd >> 32)) : "memory", "cc" ); __ret = (pgdval_t)__eax; } __ret; })
2872 ret = ({ pgdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pgd_val.func); if (__builtin_constant_p(((sizeof(pgdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pgdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2873 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2876 , }; ______r = !!((sizeof(pgdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pgd.pgd)) : "memory", "cc" ); __ret = (pgdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pgd.pgd)) : "memory", "cc" ); __ret = (pgdval_t)__eax; } __ret; })
2880 static inline __attribute__((always_inline)) pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
2884 ret = ({ pteval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.ptep_modify_prot_start); if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(unsigned long)))) ? !!((sizeof(pteval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2885 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2888 , }; ______r = !!((sizeof(pteval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.ptep_modify_prot_start) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.ptep_modify_prot_start)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(ptep)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.ptep_modify_prot_start) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.ptep_modify_prot_start)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(ptep)) : "memory", "cc" ); __ret = (pteval_t)__eax; } __ret; })
2890 return (pte_t) { .pte = ret };
2892 static inline __attribute__((always_inline)) void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
2893 pte_t *ptep, pte_t pte)
2895 if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(long)))) ? !!((sizeof(pteval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 529, }; ______r = !!((sizeof(pteval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2896 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
2898 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.ptep_modify_prot_commit); asm volatile("push %[_arg4];" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "lea 4(%%esp),%%esp;" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.ptep_modify_prot_commit) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.ptep_modify_prot_commit)), [paravirt_clobber] "i" (((1 << 4) - 1)), "0" ((u32)(mm)), "1" ((u32)(addr)), "2" ((u32)(ptep)), [_arg4] "mr" ((u32)(pte.pte)) : "memory", "cc" ); })
2901 static inline __attribute__((always_inline)) void set_pte(pte_t *ptep, pte_t pte)
2903 if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(long)))) ? !!((sizeof(pteval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 539, }; ______r = !!((sizeof(pteval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2904 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pte); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pte) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pte)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(ptep)), "d" ((unsigned long)(pte.pte)), "c" ((unsigned long)((u64)pte.pte >> 32)) : "memory", "cc" ); })
2907 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pte); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pte) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pte)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(ptep)), "d" ((unsigned long)(pte.pte)) : "memory", "cc" ); })
2910 static inline __attribute__((always_inline)) void set_pte_at(struct mm_struct *mm, unsigned long addr,
2911 pte_t *ptep, pte_t pte)
2913 if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(long)))) ? !!((sizeof(pteval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 550, }; ______r = !!((sizeof(pteval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2914 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
2916 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pte_at); asm volatile("push %[_arg4];" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "lea 4(%%esp),%%esp;" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pte_at) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pte_at)), [paravirt_clobber] "i" (((1 << 4) - 1)), "0" ((u32)(mm)), "1" ((u32)(addr)), "2" ((u32)(ptep)), [_arg4] "mr" ((u32)(pte.pte)) : "memory", "cc" ); });
2918 static inline __attribute__((always_inline)) void set_pmd_at(struct mm_struct *mm, unsigned long addr,
2919 pmd_t *pmdp, pmd_t pmd)
2921 if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(long)))) ? !!((sizeof(pmdval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 561, }; ______r = !!((sizeof(pmdval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2922 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
2924 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pmd_at); asm volatile("push %[_arg4];" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "lea 4(%%esp),%%esp;" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pmd_at) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pmd_at)), [paravirt_clobber] "i" (((1 << 4) - 1)), "0" ((u32)(mm)), "1" ((u32)(addr)), "2" ((u32)(pmdp)), [_arg4] "mr" ((u32)(native_pmd_val(pmd))) : "memory", "cc" ); })
2927 static inline __attribute__((always_inline)) void set_pmd(pmd_t *pmdp, pmd_t pmd)
2929 pmdval_t val = native_pmd_val(pmd);
2930 if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(long)))) ? !!((sizeof(pmdval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 574, }; ______r = !!((sizeof(pmdval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2931 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pmd); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pmd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pmd)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pmdp)), "d" ((unsigned long)(val)), "c" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); });
2933 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pmd); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pmd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pmd)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pmdp)), "d" ((unsigned long)(val)) : "memory", "cc" ); });
2935 static inline __attribute__((always_inline)) pmd_t __pmd(pmdval_t val)
2938 if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(long)))) ? !!((sizeof(pmdval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 585, }; ______r = !!((sizeof(pmdval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2939 ret = ({ pmdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.make_pmd.func); if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pmdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2940 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2943 , }; ______r = !!((sizeof(pmdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pmd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pmd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pmdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pmd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pmd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pmdval_t)__eax; } __ret; })
2946 ret = ({ pmdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.make_pmd.func); if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pmdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2947 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2950 , }; ______r = !!((sizeof(pmdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pmd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pmd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pmdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pmd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pmd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pmdval_t)__eax; } __ret; })
2952 return (pmd_t) { ret };
2954 static inline __attribute__((always_inline)) pmdval_t pmd_val(pmd_t pmd)
2957 if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(long)))) ? !!((sizeof(pmdval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 599, }; ______r = !!((sizeof(pmdval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2958 ret = ({ pmdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pmd_val.func); if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pmdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2959 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2962 , }; ______r = !!((sizeof(pmdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pmd.pmd)), "d" ((unsigned long)((u64)pmd.pmd >> 32)) : "memory", "cc" ); __ret = (pmdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pmd.pmd)), "d" ((unsigned long)((u64)pmd.pmd >> 32)) : "memory", "cc" ); __ret = (pmdval_t)__eax; } __ret; })
2965 ret = ({ pmdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pmd_val.func); if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pmdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2966 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2969 , }; ______r = !!((sizeof(pmdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pmd.pmd)) : "memory", "cc" ); __ret = (pmdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pmd.pmd)) : "memory", "cc" ); __ret = (pmdval_t)__eax; } __ret; })
2973 static inline __attribute__((always_inline)) void set_pud(pud_t *pudp, pud_t pud)
2975 pudval_t val = native_pud_val(pud);
2976 if (__builtin_constant_p(((sizeof(pudval_t) > sizeof(long)))) ? !!((sizeof(pudval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 613, }; ______r = !!((sizeof(pudval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2977 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pud); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pud) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pud)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pudp)), "d" ((unsigned long)(val)), "c" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); })
2980 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pud); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pud) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pud)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pudp)), "d" ((unsigned long)(val)) : "memory", "cc" ); })
2983 static inline __attribute__((always_inline)) void set_pte_atomic(pte_t *ptep, pte_t pte)
2985 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pte_atomic); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pte_atomic) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pte_atomic)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(ptep)), "d" ((unsigned long)(pte.pte)), "c" ((unsigned long)(pte.pte >> 32)) : "memory", "cc" ); })
2988 static inline __attribute__((always_inline)) void pte_clear(struct mm_struct *mm, unsigned long addr,
2991 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pte_clear); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_clear) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_clear)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(ptep)) : "memory", "cc" ); });
2993 static inline __attribute__((always_inline)) void pmd_clear(pmd_t *pmdp)
2995 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pmd_clear); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_clear) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_clear)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pmdp)) : "memory", "cc" ); });
2997 static inline __attribute__((always_inline)) void arch_start_context_switch(struct task_struct *prev)
2999 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.start_context_switch); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.start_context_switch) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.start_context_switch)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(prev)) : "memory", "cc" ); });
3001 static inline __attribute__((always_inline)) void arch_end_context_switch(struct task_struct *next)
3003 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.end_context_switch); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.end_context_switch) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.end_context_switch)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(next)) : "memory", "cc" ); });
3005 static inline __attribute__((always_inline)) void arch_enter_lazy_mmu_mode(void)
3007 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.lazy_mode.enter); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.lazy_mode.enter) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.lazy_mode.enter)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); });
3009 static inline __attribute__((always_inline)) void arch_leave_lazy_mmu_mode(void)
3011 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.lazy_mode.leave); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.lazy_mode.leave) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.lazy_mode.leave)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); });
3013 void arch_flush_lazy_mmu_mode(void);
3014 static inline __attribute__((always_inline)) void __set_fixmap(unsigned idx,
3015 phys_addr_t phys, pgprot_t flags)
3017 pv_mmu_ops.set_fixmap(idx, phys, flags);
3019 static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) unsigned long arch_local_save_flags(void)
3021 return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_irq_ops.save_fl.func); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 853, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.save_fl.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.save_fl.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.save_fl.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.save_fl.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; });
3023 static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void arch_local_irq_restore(unsigned long f)
3025 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_irq_ops.restore_fl.func); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.restore_fl.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.restore_fl.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(f)) : "memory", "cc" ); });
3027 static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void arch_local_irq_disable(void)
3029 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_irq_ops.irq_disable.func); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.irq_disable.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.irq_disable.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))) : "memory", "cc" ); });
3031 static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void arch_local_irq_enable(void)
3033 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_irq_ops.irq_enable.func); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.irq_enable.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.irq_enable.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))) : "memory", "cc" ); });
3035 static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) unsigned long arch_local_irq_save(void)
3038 f = arch_local_save_flags();
3039 arch_local_irq_disable();
3042 extern void default_banner(void);
3043 static inline __attribute__((always_inline)) int arch_irqs_disabled_flags(unsigned long flags)
3045 return !(flags & 0x00000200);
3047 static inline __attribute__((always_inline)) int arch_irqs_disabled(void)
3049 unsigned long flags = arch_local_save_flags();
3050 return arch_irqs_disabled_flags(flags);
3052 extern void trace_softirqs_on(unsigned long ip);
3053 extern void trace_softirqs_off(unsigned long ip);
3054 extern void trace_hardirqs_on(void);
3055 extern void trace_hardirqs_off(void);
3056 extern void stop_critical_timings(void);
3057 extern void start_critical_timings(void);
3059 struct task_struct *__switch_to(struct task_struct *prev,
3060 struct task_struct *next);
3062 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
3063 struct tss_struct *tss);
3064 extern void show_regs_common(void);
3065 extern void native_load_gs_index(unsigned);
3066 static inline __attribute__((always_inline)) unsigned long get_limit(unsigned long segment)
3068 unsigned long __limit;
3069 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
3072 static inline __attribute__((always_inline)) void native_clts(void)
3074 asm volatile("clts");
3076 static unsigned long __force_order;
3077 static inline __attribute__((always_inline)) unsigned long native_read_cr0(void)
3080 asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
3083 static inline __attribute__((always_inline)) void native_write_cr0(unsigned long val)
3085 asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
3087 static inline __attribute__((always_inline)) unsigned long native_read_cr2(void)
3090 asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
3093 static inline __attribute__((always_inline)) void native_write_cr2(unsigned long val)
3095 asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
3097 static inline __attribute__((always_inline)) unsigned long native_read_cr3(void)
3100 asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
3103 static inline __attribute__((always_inline)) void native_write_cr3(unsigned long val)
3105 asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
3107 static inline __attribute__((always_inline)) unsigned long native_read_cr4(void)
3110 asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
3113 static inline __attribute__((always_inline)) unsigned long native_read_cr4_safe(void)
3116 asm volatile("1: mov %%cr4, %0\n"
3118 " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "2b" "\n" " .previous\n"
3119 : "=r" (val), "=m" (__force_order) : "0" (0));
3122 static inline __attribute__((always_inline)) void native_write_cr4(unsigned long val)
3124 asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
3126 static inline __attribute__((always_inline)) void native_wbinvd(void)
3128 asm volatile("wbinvd": : :"memory");
3130 static inline __attribute__((always_inline)) void clflush(volatile void *__p)
3132 asm volatile("clflush %0" : "+m" (*(volatile char *)__p));
3134 void disable_hlt(void);
3135 void enable_hlt(void);
3136 void cpu_idle_wait(void);
3137 extern unsigned long arch_align_stack(unsigned long sp);
3138 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
3139 void default_idle(void);
3140 void stop_this_cpu(void *dummy);
3141 static inline __attribute__((always_inline)) __attribute__((always_inline)) void rdtsc_barrier(void)
3143 asm volatile ("661:\n\t" ".byte " "0x8d,0x76,0x00" "\n" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(3*32+17)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "mfence" "\n664:\n" ".previous" : : : "memory");
3144 asm volatile ("661:\n\t" ".byte " "0x8d,0x76,0x00" "\n" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(3*32+18)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "lfence" "\n664:\n" ".previous" : : : "memory");
3146 extern unsigned int __invalid_size_argument_for_IOC;
3147 extern cpumask_var_t cpu_callin_mask;
3148 extern cpumask_var_t cpu_callout_mask;
3149 extern cpumask_var_t cpu_initialized_mask;
3150 extern cpumask_var_t cpu_sibling_setup_mask;
3151 extern void setup_cpu_local_masks(void);
3167 struct msr_regs_info {
3171 static inline __attribute__((always_inline)) unsigned long long native_read_tscp(unsigned int *aux)
3173 unsigned long low, high;
3174 asm volatile(".byte 0x0f,0x01,0xf9"
3175 : "=a" (low), "=d" (high), "=c" (*aux));
3176 return low | ((u64)high << 32);
3178 static inline __attribute__((always_inline)) unsigned long long native_read_msr(unsigned int msr)
3180 unsigned long long val;
3181 asm volatile("rdmsr" : "=A" (val) : "c" (msr));
3184 static inline __attribute__((always_inline)) unsigned long long native_read_msr_safe(unsigned int msr,
3187 unsigned long long val;
3188 asm volatile("2: rdmsr ; xor %[err],%[err]\n"
3190 ".section .fixup,\"ax\"\n\t"
3191 "3: mov %[fault],%[err] ; jmp 1b\n\t"
3193 " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "2b" "," "3b" "\n" " .previous\n"
3194 : [err] "=r" (*err), "=A" (val)
3195 : "c" (msr), [fault] "i" (-5));
3198 static inline __attribute__((always_inline)) void native_write_msr(unsigned int msr,
3199 unsigned low, unsigned high)
3201 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
3203 __attribute__((no_instrument_function)) static inline __attribute__((always_inline)) int native_write_msr_safe(unsigned int msr,
3204 unsigned low, unsigned high)
3207 asm volatile("2: wrmsr ; xor %[err],%[err]\n"
3209 ".section .fixup,\"ax\"\n\t"
3210 "3: mov %[fault],%[err] ; jmp 1b\n\t"
3212 " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "2b" "," "3b" "\n" " .previous\n"
3214 : "c" (msr), "0" (low), "d" (high),
3219 extern unsigned long long native_read_tsc(void);
3220 extern int native_rdmsr_safe_regs(u32 regs[8]);
3221 extern int native_wrmsr_safe_regs(u32 regs[8]);
3222 static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long long __native_read_tsc(void)
3224 unsigned long long val;
3225 asm volatile("rdtsc" : "=A" (val));
3228 static inline __attribute__((always_inline)) unsigned long long native_read_pmc(int counter)
3230 unsigned long long val;
3231 asm volatile("rdpmc" : "=A" (val) : "c" (counter));
3234 struct msr *msrs_alloc(void);
3235 void msrs_free(struct msr *msrs);
3236 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
3237 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
3238 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
3239 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
3240 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
3241 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
3242 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
3243 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
3246 extern int register_exec_domain(struct exec_domain *);
3247 extern int unregister_exec_domain(struct exec_domain *);
3248 extern int __set_personality(unsigned int);
3250 UNAME26 = 0x0020000,
3251 ADDR_NO_RANDOMIZE = 0x0040000,
3252 FDPIC_FUNCPTRS = 0x0080000,
3253 MMAP_PAGE_ZERO = 0x0100000,
3254 ADDR_COMPAT_LAYOUT = 0x0200000,
3255 READ_IMPLIES_EXEC = 0x0400000,
3256 ADDR_LIMIT_32BIT = 0x0800000,
3257 SHORT_INODE = 0x1000000,
3258 WHOLE_SECONDS = 0x2000000,
3259 STICKY_TIMEOUTS = 0x4000000,
3260 ADDR_LIMIT_3GB = 0x8000000,
3264 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
3265 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
3266 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
3267 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
3268 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS |
3269 WHOLE_SECONDS | SHORT_INODE,
3270 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
3271 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
3272 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
3274 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
3275 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
3276 PER_LINUX32 = 0x0008,
3277 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
3278 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,
3279 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,
3280 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,
3281 PER_RISCOS = 0x000c,
3282 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
3283 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
3288 typedef void (*handler_t)(int, struct pt_regs *);
3289 struct exec_domain {
3292 unsigned char pers_low;
3293 unsigned char pers_high;
3294 unsigned long *signal_map;
3295 unsigned long *signal_invmap;
3296 struct map_segment *err_map;
3297 struct map_segment *socktype_map;
3298 struct map_segment *sockopt_map;
3299 struct map_segment *af_map;
3300 struct module *module;
3301 struct exec_domain *next;
3303 extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
3304 extern u64 div64_u64(u64 dividend, u64 divisor);
3305 extern s64 div64_s64(s64 dividend, s64 divisor);
3306 static inline __attribute__((always_inline)) u64 div_u64(u64 dividend, u32 divisor)
3309 return div_u64_rem(dividend, divisor, &remainder);
3311 static inline __attribute__((always_inline)) s64 div_s64(s64 dividend, s32 divisor)
3314 return div_s64_rem(dividend, divisor, &remainder);
3316 u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
3317 static inline __attribute__((always_inline)) __attribute__((always_inline)) u32
3318 __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
3321 while (dividend >= divisor) {
3322 asm("" : "+rm"(dividend));
3323 dividend -= divisor;
3326 *remainder = dividend;
3329 static inline __attribute__((always_inline)) void * __attribute__((warn_unused_result)) ERR_PTR(long error)
3331 return (void *) error;
3333 static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) PTR_ERR(const void *ptr)
3337 static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) IS_ERR(const void *ptr)
3339 return (__builtin_constant_p(((unsigned long)ptr) >= (unsigned long)-4095) ? !!(((unsigned long)ptr) >= (unsigned long)-4095) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/err.h", .line = 34, }; ______r = __builtin_expect(!!(((unsigned long)ptr) >= (unsigned long)-4095), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }));
3341 static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) IS_ERR_OR_NULL(const void *ptr)
3343 return !ptr || (__builtin_constant_p(((unsigned long)ptr) >= (unsigned long)-4095) ? !!(((unsigned long)ptr) >= (unsigned long)-4095) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/err.h", .line = 39, }; ______r = __builtin_expect(!!(((unsigned long)ptr) >= (unsigned long)-4095), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }));
3345 static inline __attribute__((always_inline)) void * __attribute__((warn_unused_result)) ERR_CAST(const void *ptr)
3347 return (void *) ptr;
3349 static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) PTR_RET(const void *ptr)
3351 if (__builtin_constant_p(((IS_ERR(ptr)))) ? !!((IS_ERR(ptr))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/err.h", .line = 57, }; ______r = !!((IS_ERR(ptr))); ______f.miss_hit[______r]++; ______r; }))
3352 return PTR_ERR(ptr);
3356 static inline __attribute__((always_inline)) void *current_text_addr(void)
3359 asm volatile("mov $1f, %0; 1:":"=r" (pc));
3362 struct cpuinfo_x86 {
3377 __u8 x86_coreid_bits;
3378 __u32 extended_cpuid_level;
3380 __u32 x86_capability[10];
3381 char x86_vendor_id[16];
3382 char x86_model_id[64];
3384 int x86_cache_alignment;
3386 unsigned long loops_per_jiffy;
3390 u16 x86_clflush_size;
3396 } __attribute__((__aligned__((1 << (6)))));
3397 extern struct cpuinfo_x86 boot_cpu_data;
3398 extern struct cpuinfo_x86 new_cpu_data;
3399 extern struct tss_struct doublefault_tss;
3400 extern __u32 cpu_caps_cleared[10];
3401 extern __u32 cpu_caps_set[10];
3402 extern __attribute__((section(".data..percpu" ""))) __typeof__(struct cpuinfo_x86) cpu_info __attribute__((__aligned__((1 << (6)))));
3403 extern const struct seq_operations cpuinfo_op;
3404 static inline __attribute__((always_inline)) int hlt_works(int cpu)
3406 return (*({ do { const void *__vpp_verify = (typeof((&(cpu_info))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(cpu_info))) *)(&(cpu_info)))); (typeof((typeof(*(&(cpu_info))) *)(&(cpu_info)))) (__ptr + (((__per_cpu_offset[cpu])))); }); })).hlt_works_ok;
3408 extern void cpu_detect(struct cpuinfo_x86 *c);
3409 extern struct pt_regs *idle_regs(struct pt_regs *);
3410 extern void early_cpu_init(void);
3411 extern void identify_boot_cpu(void);
3412 extern void identify_secondary_cpu(struct cpuinfo_x86 *);
3413 extern void print_cpu_info(struct cpuinfo_x86 *);
3414 extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
3415 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
3416 extern unsigned short num_cache_leaves;
3417 extern void detect_extended_topology(struct cpuinfo_x86 *c);
3418 extern void detect_ht(struct cpuinfo_x86 *c);
3419 static inline __attribute__((always_inline)) void native_cpuid(unsigned int *eax, unsigned int *ebx,
3420 unsigned int *ecx, unsigned int *edx)
3422 asm volatile("cpuid"
3427 : "0" (*eax), "2" (*ecx));
3429 static inline __attribute__((always_inline)) void load_cr3(pgd_t *pgdir)
3431 write_cr3((((unsigned long)(pgdir)) - ((unsigned long)(0xC0000000UL))));
3434 unsigned short back_link, __blh;
3436 unsigned short ss0, __ss0h;
3438 unsigned short ss1, __ss1h;
3440 unsigned short ss2, __ss2h;
3441 unsigned long __cr3;
3443 unsigned long flags;
3452 unsigned short es, __esh;
3453 unsigned short cs, __csh;
3454 unsigned short ss, __ssh;
3455 unsigned short ds, __dsh;
3456 unsigned short fs, __fsh;
3457 unsigned short gs, __gsh;
3458 unsigned short ldt, __ldth;
3459 unsigned short trace;
3460 unsigned short io_bitmap_base;
3461 } __attribute__((packed));
3463 struct x86_hw_tss x86_tss;
3464 unsigned long io_bitmap[((65536/8)/sizeof(long)) + 1];
3465 unsigned long stack[64];
3466 } __attribute__((__aligned__((1 << (6)))));
3467 extern __attribute__((section(".data..percpu" ""))) __typeof__(struct tss_struct) init_tss __attribute__((__aligned__((1 << (6)))));
3469 unsigned long ist[7];
3471 struct i387_fsave_struct {
3482 struct i387_fxsave_struct {
3506 u32 sw_reserved[12];
3508 } __attribute__((aligned(16)));
3509 struct i387_soft_struct {
3524 struct math_emu_info *info;
3527 struct ymmh_struct {
3530 struct xsave_hdr_struct {
3534 } __attribute__((packed));
3535 struct xsave_struct {
3536 struct i387_fxsave_struct i387;
3537 struct xsave_hdr_struct xsave_hdr;
3538 struct ymmh_struct ymmh;
3539 } __attribute__ ((packed, aligned (64)));
3540 union thread_xstate {
3541 struct i387_fsave_struct fsave;
3542 struct i387_fxsave_struct fxsave;
3543 struct i387_soft_struct soft;
3544 struct xsave_struct xsave;
3547 union thread_xstate *state;
3549 struct stack_canary {
3551 unsigned long canary;
3553 extern __attribute__((section(".data..percpu" ""))) __typeof__(struct stack_canary) stack_canary __attribute__((__aligned__((1 << (6)))));
3554 extern unsigned int xstate_size;
3555 extern void free_thread_xstate(struct task_struct *);
3556 extern struct kmem_cache *task_xstate_cachep;
3558 struct thread_struct {
3559 struct desc_struct tls_array[3];
3562 unsigned long sysenter_cs;
3565 struct perf_event *ptrace_bps[4];
3566 unsigned long debugreg6;
3567 unsigned long ptrace_dr7;
3569 unsigned long trap_no;
3570 unsigned long error_code;
3572 struct vm86_struct *vm86_info;
3573 unsigned long screen_bitmap;
3574 unsigned long v86flags;
3575 unsigned long v86mask;
3576 unsigned long saved_sp0;
3577 unsigned int saved_fs;
3578 unsigned int saved_gs;
3579 unsigned long *io_bitmap_ptr;
3581 unsigned io_bitmap_max;
3583 static inline __attribute__((always_inline)) unsigned long native_get_debugreg(int regno)
3585 unsigned long val = 0;
3588 asm("mov %%db0, %0" :"=r" (val));
3591 asm("mov %%db1, %0" :"=r" (val));
3594 asm("mov %%db2, %0" :"=r" (val));
3597 asm("mov %%db3, %0" :"=r" (val));
3600 asm("mov %%db6, %0" :"=r" (val));
3603 asm("mov %%db7, %0" :"=r" (val));
3606 do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h"), "i" (499), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0);
3610 static inline __attribute__((always_inline)) void native_set_debugreg(int regno, unsigned long value)
3614 asm("mov %0, %%db0" ::"r" (value));
3617 asm("mov %0, %%db1" ::"r" (value));
3620 asm("mov %0, %%db2" ::"r" (value));
3623 asm("mov %0, %%db3" ::"r" (value));
3626 asm("mov %0, %%db6" ::"r" (value));
3629 asm("mov %0, %%db7" ::"r" (value));
3632 do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h"), "i" (526), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0);
3635 static inline __attribute__((always_inline)) void native_set_iopl_mask(unsigned mask)
3638 asm volatile ("pushfl;"
3645 : "i" (~0x00003000), "r" (mask));
3647 static inline __attribute__((always_inline)) void
3648 native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
3650 tss->x86_tss.sp0 = thread->sp0;
3651 if (__builtin_constant_p((((__builtin_constant_p(tss->x86_tss.ss1 != thread->sysenter_cs) ? !!(tss->x86_tss.ss1 != thread->sysenter_cs) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 555, }; ______r = __builtin_expect(!!(tss->x86_tss.ss1 != thread->sysenter_cs), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(tss->x86_tss.ss1 != thread->sysenter_cs) ? !!(tss->x86_tss.ss1 != thread->sysenter_cs) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 555, }; ______r = __builtin_expect(!!(tss->x86_tss.ss1 != thread->sysenter_cs), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 555, }; ______r = !!(((__builtin_constant_p(tss->x86_tss.ss1 != thread->sysenter_cs) ? !!(tss->x86_tss.ss1 != thread->sysenter_cs) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 555, }; ______r = __builtin_expect(!!(tss->x86_tss.ss1 != thread->sysenter_cs), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) {
3652 tss->x86_tss.ss1 = thread->sysenter_cs;
3653 do { paravirt_write_msr(0x00000174, thread->sysenter_cs, 0); } while (0);
3656 static inline __attribute__((always_inline)) void native_swapgs(void)
3659 extern unsigned long mmu_cr4_features;
3660 static inline __attribute__((always_inline)) void set_in_cr4(unsigned long mask)
3663 mmu_cr4_features |= mask;
3668 static inline __attribute__((always_inline)) void clear_in_cr4(unsigned long mask)
3671 mmu_cr4_features &= ~mask;
3679 extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
3680 extern void release_thread(struct task_struct *);
3681 extern void prepare_to_copy(struct task_struct *tsk);
3682 unsigned long get_wchan(struct task_struct *p);
3683 static inline __attribute__((always_inline)) void cpuid(unsigned int op,
3684 unsigned int *eax, unsigned int *ebx,
3685 unsigned int *ecx, unsigned int *edx)
3689 __cpuid(eax, ebx, ecx, edx);
3691 static inline __attribute__((always_inline)) void cpuid_count(unsigned int op, int count,
3692 unsigned int *eax, unsigned int *ebx,
3693 unsigned int *ecx, unsigned int *edx)
3697 __cpuid(eax, ebx, ecx, edx);
3699 static inline __attribute__((always_inline)) unsigned int cpuid_eax(unsigned int op)
3701 unsigned int eax, ebx, ecx, edx;
3702 cpuid(op, &eax, &ebx, &ecx, &edx);
3705 static inline __attribute__((always_inline)) unsigned int cpuid_ebx(unsigned int op)
3707 unsigned int eax, ebx, ecx, edx;
3708 cpuid(op, &eax, &ebx, &ecx, &edx);
3711 static inline __attribute__((always_inline)) unsigned int cpuid_ecx(unsigned int op)
3713 unsigned int eax, ebx, ecx, edx;
3714 cpuid(op, &eax, &ebx, &ecx, &edx);
3717 static inline __attribute__((always_inline)) unsigned int cpuid_edx(unsigned int op)
3719 unsigned int eax, ebx, ecx, edx;
3720 cpuid(op, &eax, &ebx, &ecx, &edx);
3723 static inline __attribute__((always_inline)) void rep_nop(void)
3725 asm volatile("rep; nop" ::: "memory");
3727 static inline __attribute__((always_inline)) void cpu_relax(void)
3731 static inline __attribute__((always_inline)) void sync_core(void)
3734 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
3735 : "ebx", "ecx", "edx", "memory");
3737 static inline __attribute__((always_inline)) void __monitor(const void *eax, unsigned long ecx,
3740 asm volatile(".byte 0x0f, 0x01, 0xc8;"
3741 :: "a" (eax), "c" (ecx), "d"(edx));
3743 static inline __attribute__((always_inline)) void __mwait(unsigned long eax, unsigned long ecx)
3745 asm volatile(".byte 0x0f, 0x01, 0xc9;"
3746 :: "a" (eax), "c" (ecx));
3748 static inline __attribute__((always_inline)) void __sti_mwait(unsigned long eax, unsigned long ecx)
3750 trace_hardirqs_on();
3751 asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
3752 :: "a" (eax), "c" (ecx));
3754 extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
3755 extern void select_idle_routine(const struct cpuinfo_x86 *c);
3756 extern void init_amd_e400_c1e_mask(void);
3757 extern unsigned long boot_option_idle_override;
3758 extern bool amd_e400_c1e_detected;
3759 enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
3760 IDLE_POLL, IDLE_FORCE_MWAIT};
3761 extern void enable_sep_cpu(void);
3762 extern int sysenter_setup(void);
3763 extern void early_trap_init(void);
3764 extern struct desc_ptr early_gdt_descr;
3765 extern void cpu_set_gdt(int);
3766 extern void switch_to_new_gdt(int);
3767 extern void load_percpu_segment(int);
3768 extern void cpu_init(void);
3769 static inline __attribute__((always_inline)) unsigned long get_debugctlmsr(void)
3771 unsigned long debugctlmsr = 0;
3772 do { int _err; debugctlmsr = paravirt_read_msr(0x000001d9, &_err); } while (0);
3775 static inline __attribute__((always_inline)) void update_debugctlmsr(unsigned long debugctlmsr)
3777 do { paravirt_write_msr(0x000001d9, (u32)((u64)(debugctlmsr)), ((u64)(debugctlmsr))>>32); } while (0);
3779 extern unsigned int machine_id;
3780 extern unsigned int machine_submodel_id;
3781 extern unsigned int BIOS_revision;
3782 extern int bootloader_type;
3783 extern int bootloader_version;
3784 extern char ignore_fpu_irq;
3785 static inline __attribute__((always_inline)) void prefetch(const void *x)
3787 asm volatile ("661:\n\t" ".byte " "0x8d,0x74,0x26,0x00" "\n" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(0*32+25)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "prefetchnta (%1)" "\n664:\n" ".previous" : : "i" (0), "r" (x))
3790 static inline __attribute__((always_inline)) void prefetchw(const void *x)
3792 asm volatile ("661:\n\t" ".byte " "0x8d,0x74,0x26,0x00" "\n" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(1*32+31)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "prefetchw (%1)" "\n664:\n" ".previous" : : "i" (0), "r" (x))
3795 static inline __attribute__((always_inline)) void spin_lock_prefetch(const void *x)
3799 extern unsigned long thread_saved_pc(struct task_struct *tsk);
3800 extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
3801 unsigned long new_sp);
3802 extern int get_tsc_mode(unsigned long adr);
3803 extern int set_tsc_mode(unsigned int val);
3804 extern int amd_get_nb_id(int cpu);
3808 static inline __attribute__((always_inline)) void get_aperfmperf(struct aperfmperf *am)
3810 ({ static bool __warned; int __ret_warn_once = !!(!(__builtin_constant_p((3*32+28)) && ( ((((3*32+28))>>5)==0 && (1UL<<(((3*32+28))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((3*32+28))>>5)==1 && (1UL<<(((3*32+28))&31) & (0|0))) || ((((3*32+28))>>5)==2 && (1UL<<(((3*32+28))&31) & 0)) || ((((3*32+28))>>5)==3 && (1UL<<(((3*32+28))&31) & (0))) || ((((3*32+28))>>5)==4 && (1UL<<(((3*32+28))&31) & 0)) || ((((3*32+28))>>5)==5 && (1UL<<(((3*32+28))&31) & 0)) || ((((3*32+28))>>5)==6 && (1UL<<(((3*32+28))&31) & 0)) || ((((3*32+28))>>5)==7 && (1UL<<(((3*32+28))&31) & 0)) || ((((3*32+28))>>5)==8 && (1UL<<(((3*32+28))&31) & 0)) || ((((3*32+28))>>5)==9 && (1UL<<(((3*32+28))&31) & 0)) ) ? 1 : (__builtin_constant_p(((3*32+28))) ? constant_test_bit(((3*32+28)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((3*32+28)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))); if (__builtin_constant_p((((__builtin_constant_p(__ret_warn_once) ? !!(__ret_warn_once) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_once), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(__ret_warn_once) ? !!(__ret_warn_once) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_once), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = !!(((__builtin_constant_p(__ret_warn_once) ? !!(__ret_warn_once) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_once), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) if (__builtin_constant_p(((({ int __ret_warn_on = !!(!__warned); if (__builtin_constant_p((((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) warn_slowpath_null("/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", 981); (__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); })))) ? !!((({ int __ret_warn_on = !!(!__warned); if (__builtin_constant_p((((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) warn_slowpath_null("/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", 981); (__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = !!((({ int __ret_warn_on = !!(!__warned); if (__builtin_constant_p((((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) warn_slowpath_null("/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", 981); (__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); }))); ______f.miss_hit[______r]++; ______r; })) __warned = true; (__builtin_constant_p(__ret_warn_once) ? !!(__ret_warn_once) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_once), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); });
3811 do { int _err; am->aperf = paravirt_read_msr(0x000000e8, &_err); } while (0);
3812 do { int _err; am->mperf = paravirt_read_msr(0x000000e7, &_err); } while (0);
3814 static inline __attribute__((always_inline))
3815 unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
3816 struct aperfmperf *new)
3818 u64 aperf = new->aperf - old->aperf;
3819 u64 mperf = new->mperf - old->mperf;
3820 unsigned long ratio = aperf;
3822 if (__builtin_constant_p(((mperf))) ? !!((mperf)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 998, }; ______r = !!((mperf)); ______f.miss_hit[______r]++; ______r; }))
3823 ratio = div64_u64(aperf, mperf);
3826 extern const int amd_erratum_383[];
3827 extern const int amd_erratum_400[];
3828 extern bool cpu_has_amd_erratum(const int *);
3829 extern void mcount(void);
3830 static inline __attribute__((always_inline)) unsigned long ftrace_call_adjust(unsigned long addr)
3834 struct dyn_arch_ftrace {
3836 static inline __attribute__((always_inline)) int atomic_read(const atomic_t *v)
3838 return (*(volatile int *)&(v)->counter);
3840 static inline __attribute__((always_inline)) void atomic_set(atomic_t *v, int i)
3844 static inline __attribute__((always_inline)) void atomic_add(int i, atomic_t *v)
3846 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "addl %1,%0"
3850 static inline __attribute__((always_inline)) void atomic_sub(int i, atomic_t *v)
3852 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "subl %1,%0"
3856 static inline __attribute__((always_inline)) int atomic_sub_and_test(int i, atomic_t *v)
3859 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "subl %2,%0; sete %1"
3860 : "+m" (v->counter), "=qm" (c)
3861 : "ir" (i) : "memory");
3864 static inline __attribute__((always_inline)) void atomic_inc(atomic_t *v)
3866 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "incl %0"
3867 : "+m" (v->counter));
3869 static inline __attribute__((always_inline)) void atomic_dec(atomic_t *v)
3871 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "decl %0"
3872 : "+m" (v->counter));
3874 static inline __attribute__((always_inline)) int atomic_dec_and_test(atomic_t *v)
3877 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "decl %0; sete %1"
3878 : "+m" (v->counter), "=qm" (c)
3882 static inline __attribute__((always_inline)) int atomic_inc_and_test(atomic_t *v)
3885 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "incl %0; sete %1"
3886 : "+m" (v->counter), "=qm" (c)
3890 static inline __attribute__((always_inline)) int atomic_add_negative(int i, atomic_t *v)
3893 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "addl %2,%0; sets %1"
3894 : "+m" (v->counter), "=qm" (c)
3895 : "ir" (i) : "memory");
3898 static inline __attribute__((always_inline)) int atomic_add_return(int i, atomic_t *v)
3902 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "xaddl %0, %1"
3903 : "+r" (i), "+m" (v->counter)
3907 static inline __attribute__((always_inline)) int atomic_sub_return(int i, atomic_t *v)
3909 return atomic_add_return(-i, v);
3911 static inline __attribute__((always_inline)) int atomic_cmpxchg(atomic_t *v, int old, int new)
3913 return ({ __typeof__(*(((&v->counter)))) __ret; __typeof__(*(((&v->counter)))) __old = (((old))); __typeof__(*(((&v->counter)))) __new = (((new))); switch ((sizeof(*&v->counter))) { case 1: { volatile u8 *__ptr = (volatile u8 *)(((&v->counter))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgb %2,%1" : "=a" (__ret), "+m" (*__ptr) : "q" (__new), "0" (__old) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)(((&v->counter))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgw %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)(((&v->counter))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgl %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } default: __cmpxchg_wrong_size(); } __ret; });
3915 static inline __attribute__((always_inline)) int atomic_xchg(atomic_t *v, int new)
3917 return ({ __typeof(*((&v->counter))) __x = ((new)); switch (sizeof(*&v->counter)) { case 1: { volatile u8 *__ptr = (volatile u8 *)((&v->counter)); asm volatile("xchgb %0,%1" : "=q" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)((&v->counter)); asm volatile("xchgw %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)((&v->counter)); asm volatile("xchgl %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } default: __xchg_wrong_size(); } __x; });
3919 static inline __attribute__((always_inline)) int atomic_add_unless(atomic_t *v, int a, int u)
3924 if (__builtin_constant_p((((__builtin_constant_p(c == (u)) ? !!(c == (u)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 237, }; ______r = __builtin_expect(!!(c == (u)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(c == (u)) ? !!(c == (u)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 237, }; ______r = __builtin_expect(!!(c == (u)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 237, }; ______r = !!(((__builtin_constant_p(c == (u)) ? !!(c == (u)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 237, }; ______r = __builtin_expect(!!(c == (u)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
3926 old = atomic_cmpxchg((v), c, c + (a));
3927 if (__builtin_constant_p((((__builtin_constant_p(old == c) ? !!(old == c) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 240, }; ______r = __builtin_expect(!!(old == c), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(old == c) ? !!(old == c) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 240, }; ______r = __builtin_expect(!!(old == c), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 240, }; ______r = !!(((__builtin_constant_p(old == c) ? !!(old == c) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 240, }; ______r = __builtin_expect(!!(old == c), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
3933 static inline __attribute__((always_inline)) int atomic_dec_if_positive(atomic_t *v)
3939 if (__builtin_constant_p((((__builtin_constant_p(dec < 0) ? !!(dec < 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 262, }; ______r = __builtin_expect(!!(dec < 0), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(dec < 0) ? !!(dec < 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 262, }; ______r = __builtin_expect(!!(dec < 0), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 262, }; ______r = !!(((__builtin_constant_p(dec < 0) ? !!(dec < 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 262, }; ______r = __builtin_expect(!!(dec < 0), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
3941 old = atomic_cmpxchg((v), c, dec);
3942 if (__builtin_constant_p((((__builtin_constant_p(old == c) ? !!(old == c) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 265, }; ______r = __builtin_expect(!!(old == c), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(old == c) ? !!(old == c) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 265, }; ______r = __builtin_expect(!!(old == c), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 265, }; ______r = !!(((__builtin_constant_p(old == c) ? !!(old == c) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 265, }; ______r = __builtin_expect(!!(old == c), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
3948 static inline __attribute__((always_inline)) short int atomic_inc_short(short int *v)
3950 asm(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "addw $1, %0" : "+m" (*v));
3954 u64 __attribute__((aligned(8))) counter;
3956 static inline __attribute__((always_inline)) long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
3958 return ((__typeof__(*(&v->counter)))__cmpxchg64((&v->counter), (unsigned long long)(o), (unsigned long long)(n)));
3960 static inline __attribute__((always_inline)) long long atomic64_xchg(atomic64_t *v, long long n)
3963 unsigned high = (unsigned)(n >> 32);
3964 unsigned low = (unsigned)n;
3965 asm volatile("call atomic64_" "xchg" "_cx8"
3966 : "=A" (o), "+b" (low), "+c" (high)
3972 static inline __attribute__((always_inline)) void atomic64_set(atomic64_t *v, long long i)
3974 unsigned high = (unsigned)(i >> 32);
3975 unsigned low = (unsigned)i;
3976 asm volatile("call atomic64_" "set" "_cx8"
3977 : "+b" (low), "+c" (high)
3979 : "eax", "edx", "memory"
3982 static inline __attribute__((always_inline)) long long atomic64_read(atomic64_t *v)
3985 asm volatile("call atomic64_" "read" "_cx8"
3986 : "=A" (r), "+c" (v)
3991 static inline __attribute__((always_inline)) long long atomic64_add_return(long long i, atomic64_t *v)
3993 asm volatile("call atomic64_" "add_return" "_cx8"
3994 : "+A" (i), "+c" (v)
3999 static inline __attribute__((always_inline)) long long atomic64_sub_return(long long i, atomic64_t *v)
4001 asm volatile("call atomic64_" "sub_return" "_cx8"
4002 : "+A" (i), "+c" (v)
4007 static inline __attribute__((always_inline)) long long atomic64_inc_return(atomic64_t *v)
4010 asm volatile("call atomic64_" "inc_return" "_cx8"
4017 static inline __attribute__((always_inline)) long long atomic64_dec_return(atomic64_t *v)
4020 asm volatile("call atomic64_" "dec_return" "_cx8"
4027 static inline __attribute__((always_inline)) long long atomic64_add(long long i, atomic64_t *v)
4029 asm volatile("call atomic64_" "add_return" "_cx8"
4030 : "+A" (i), "+c" (v)
4035 static inline __attribute__((always_inline)) long long atomic64_sub(long long i, atomic64_t *v)
4037 asm volatile("call atomic64_" "sub_return" "_cx8"
4038 : "+A" (i), "+c" (v)
4043 static inline __attribute__((always_inline)) int atomic64_sub_and_test(long long i, atomic64_t *v)
4045 return atomic64_sub_return(i, v) == 0;
4047 static inline __attribute__((always_inline)) void atomic64_inc(atomic64_t *v)
4049 asm volatile("call atomic64_" "inc_return" "_cx8"
4051 : "memory", "eax", "ecx", "edx"
4054 static inline __attribute__((always_inline)) void atomic64_dec(atomic64_t *v)
4056 asm volatile("call atomic64_" "dec_return" "_cx8"
4058 : "memory", "eax", "ecx", "edx"
4061 static inline __attribute__((always_inline)) int atomic64_dec_and_test(atomic64_t *v)
4063 return atomic64_dec_return(v) == 0;
4065 static inline __attribute__((always_inline)) int atomic64_inc_and_test(atomic64_t *v)
4067 return atomic64_inc_return(v) == 0;
4069 static inline __attribute__((always_inline)) int atomic64_add_negative(long long i, atomic64_t *v)
4071 return atomic64_add_return(i, v) < 0;
4073 static inline __attribute__((always_inline)) int atomic64_add_unless(atomic64_t *v, long long a, long long u)
4075 unsigned low = (unsigned)u;
4076 unsigned high = (unsigned)(u >> 32);
4077 asm volatile("call atomic64_" "add_unless" "_cx8" "\n\t"
4078 : "+A" (a), "+c" (v), "+S" (low), "+D" (high)
4082 static inline __attribute__((always_inline)) int atomic64_inc_not_zero(atomic64_t *v)
4085 asm volatile("call atomic64_" "inc_not_zero" "_cx8"
4088 : "ecx", "edx", "memory"
4092 static inline __attribute__((always_inline)) long long atomic64_dec_if_positive(atomic64_t *v)
4095 asm volatile("call atomic64_" "dec_if_positive" "_cx8"
4102 typedef atomic_t atomic_long_t;
4103 static inline __attribute__((always_inline)) long atomic_long_read(atomic_long_t *l)
4105 atomic_t *v = (atomic_t *)l;
4106 return (long)atomic_read(v);
4108 static inline __attribute__((always_inline)) void atomic_long_set(atomic_long_t *l, long i)
4110 atomic_t *v = (atomic_t *)l;
4113 static inline __attribute__((always_inline)) void atomic_long_inc(atomic_long_t *l)
4115 atomic_t *v = (atomic_t *)l;
4118 static inline __attribute__((always_inline)) void atomic_long_dec(atomic_long_t *l)
4120 atomic_t *v = (atomic_t *)l;
4123 static inline __attribute__((always_inline)) void atomic_long_add(long i, atomic_long_t *l)
4125 atomic_t *v = (atomic_t *)l;
4128 static inline __attribute__((always_inline)) void atomic_long_sub(long i, atomic_long_t *l)
4130 atomic_t *v = (atomic_t *)l;
4133 static inline __attribute__((always_inline)) int atomic_long_sub_and_test(long i, atomic_long_t *l)
4135 atomic_t *v = (atomic_t *)l;
4136 return atomic_sub_and_test(i, v);
4138 static inline __attribute__((always_inline)) int atomic_long_dec_and_test(atomic_long_t *l)
4140 atomic_t *v = (atomic_t *)l;
4141 return atomic_dec_and_test(v);
4143 static inline __attribute__((always_inline)) int atomic_long_inc_and_test(atomic_long_t *l)
4145 atomic_t *v = (atomic_t *)l;
4146 return atomic_inc_and_test(v);
4148 static inline __attribute__((always_inline)) int atomic_long_add_negative(long i, atomic_long_t *l)
4150 atomic_t *v = (atomic_t *)l;
4151 return atomic_add_negative(i, v);
4153 static inline __attribute__((always_inline)) long atomic_long_add_return(long i, atomic_long_t *l)
4155 atomic_t *v = (atomic_t *)l;
4156 return (long)atomic_add_return(i, v);
4158 static inline __attribute__((always_inline)) long atomic_long_sub_return(long i, atomic_long_t *l)
4160 atomic_t *v = (atomic_t *)l;
4161 return (long)atomic_sub_return(i, v);
4163 static inline __attribute__((always_inline)) long atomic_long_inc_return(atomic_long_t *l)
4165 atomic_t *v = (atomic_t *)l;
4166 return (long)(atomic_add_return(1, v));
4168 static inline __attribute__((always_inline)) long atomic_long_dec_return(atomic_long_t *l)
4170 atomic_t *v = (atomic_t *)l;
4171 return (long)(atomic_sub_return(1, v));
4173 static inline __attribute__((always_inline)) long atomic_long_add_unless(atomic_long_t *l, long a, long u)
4175 atomic_t *v = (atomic_t *)l;
4176 return (long)atomic_add_unless(v, a, u);
4178 struct thread_info {
4179 struct task_struct *task;
4180 struct exec_domain *exec_domain;
4185 mm_segment_t addr_limit;
4186 struct restart_block restart_block;
4187 void *sysenter_return;
4188 unsigned long previous_esp;
4189 __u8 supervisor_stack[0];
4192 register unsigned long current_stack_pointer asm("esp") __attribute__((__used__));
4193 static inline __attribute__((always_inline)) struct thread_info *current_thread_info(void)
4195 return (struct thread_info *)
4196 (current_stack_pointer & ~((((1UL) << 12) << 1) - 1));
4198 static inline __attribute__((always_inline)) void set_restore_sigmask(void)
4200 struct thread_info *ti = current_thread_info();
4201 ti->status |= 0x0008;
4202 set_bit(2, (unsigned long *)&ti->flags);
4204 extern void arch_task_cache_init(void);
4205 extern void free_thread_info(struct thread_info *ti);
4206 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
4207 static inline __attribute__((always_inline)) void set_ti_thread_flag(struct thread_info *ti, int flag)
4209 set_bit(flag, (unsigned long *)&ti->flags);
4211 static inline __attribute__((always_inline)) void clear_ti_thread_flag(struct thread_info *ti, int flag)
4213 clear_bit(flag, (unsigned long *)&ti->flags);
4215 static inline __attribute__((always_inline)) int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
4217 return test_and_set_bit(flag, (unsigned long *)&ti->flags);
4219 static inline __attribute__((always_inline)) int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
4221 return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
4223 static inline __attribute__((always_inline)) int test_ti_thread_flag(struct thread_info *ti, int flag)
4225 return (__builtin_constant_p((flag)) ? constant_test_bit((flag), ((unsigned long *)&ti->flags)) : variable_test_bit((flag), ((unsigned long *)&ti->flags)));
4227 static inline __attribute__((always_inline)) void INIT_LIST_HEAD(struct list_head *list)
4232 static inline __attribute__((always_inline)) void __list_add(struct list_head *new,
4233 struct list_head *prev,
4234 struct list_head *next)
4241 static inline __attribute__((always_inline)) void list_add(struct list_head *new, struct list_head *head)
4243 __list_add(new, head, head->next);
4245 static inline __attribute__((always_inline)) void list_add_tail(struct list_head *new, struct list_head *head)
4247 __list_add(new, head->prev, head);
4249 static inline __attribute__((always_inline)) void __list_del(struct list_head * prev, struct list_head * next)
4254 static inline __attribute__((always_inline)) void __list_del_entry(struct list_head *entry)
4256 __list_del(entry->prev, entry->next);
4258 static inline __attribute__((always_inline)) void list_del(struct list_head *entry)
4260 __list_del(entry->prev, entry->next);
4261 entry->next = ((void *) 0x00100100 + (0x0UL));
4262 entry->prev = ((void *) 0x00200200 + (0x0UL));
4264 static inline __attribute__((always_inline)) void list_replace(struct list_head *old,
4265 struct list_head *new)
4267 new->next = old->next;
4268 new->next->prev = new;
4269 new->prev = old->prev;
4270 new->prev->next = new;
4272 static inline __attribute__((always_inline)) void list_replace_init(struct list_head *old,
4273 struct list_head *new)
4275 list_replace(old, new);
4276 INIT_LIST_HEAD(old);
4278 static inline __attribute__((always_inline)) void list_del_init(struct list_head *entry)
4280 __list_del_entry(entry);
4281 INIT_LIST_HEAD(entry);
4283 static inline __attribute__((always_inline)) void list_move(struct list_head *list, struct list_head *head)
4285 __list_del_entry(list);
4286 list_add(list, head);
4288 static inline __attribute__((always_inline)) void list_move_tail(struct list_head *list,
4289 struct list_head *head)
4291 __list_del_entry(list);
4292 list_add_tail(list, head);
4294 static inline __attribute__((always_inline)) int list_is_last(const struct list_head *list,
4295 const struct list_head *head)
4297 return list->next == head;
4299 static inline __attribute__((always_inline)) int list_empty(const struct list_head *head)
4301 return head->next == head;
4303 static inline __attribute__((always_inline)) int list_empty_careful(const struct list_head *head)
4305 struct list_head *next = head->next;
4306 return (next == head) && (next == head->prev);
4308 static inline __attribute__((always_inline)) void list_rotate_left(struct list_head *head)
4310 struct list_head *first;
4311 if (__builtin_constant_p(((!list_empty(head)))) ? !!((!list_empty(head))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 218, }; ______r = !!((!list_empty(head))); ______f.miss_hit[______r]++; ______r; })) {
4313 list_move_tail(first, head);
4316 static inline __attribute__((always_inline)) int list_is_singular(const struct list_head *head)
4318 return !list_empty(head) && (head->next == head->prev);
4320 static inline __attribute__((always_inline)) void __list_cut_position(struct list_head *list,
4321 struct list_head *head, struct list_head *entry)
4323 struct list_head *new_first = entry->next;
4324 list->next = head->next;
4325 list->next->prev = list;
4328 head->next = new_first;
4329 new_first->prev = head;
4331 static inline __attribute__((always_inline)) void list_cut_position(struct list_head *list,
4332 struct list_head *head, struct list_head *entry)
4334 if (__builtin_constant_p(((list_empty(head)))) ? !!((list_empty(head))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 262, }; ______r = !!((list_empty(head))); ______f.miss_hit[______r]++; ______r; }))
4336 if (__builtin_constant_p(((list_is_singular(head) && (head->next != entry && head != entry)))) ? !!((list_is_singular(head) && (head->next != entry && head != entry))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
4337 "include/linux/list.h"
4340 , }; ______r = !!((list_is_singular(head) && (head->next != entry && head != entry))); ______f.miss_hit[______r]++; ______r; }))
4342 if (__builtin_constant_p(((entry == head))) ? !!((entry == head)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 267, }; ______r = !!((entry == head)); ______f.miss_hit[______r]++; ______r; }))
4343 INIT_LIST_HEAD(list);
4345 __list_cut_position(list, head, entry);
4347 static inline __attribute__((always_inline)) void __list_splice(const struct list_head *list,
4348 struct list_head *prev,
4349 struct list_head *next)
4351 struct list_head *first = list->next;
4352 struct list_head *last = list->prev;
4358 static inline __attribute__((always_inline)) void list_splice(const struct list_head *list,
4359 struct list_head *head)
4361 if (__builtin_constant_p(((!list_empty(list)))) ? !!((!list_empty(list))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 295, }; ______r = !!((!list_empty(list))); ______f.miss_hit[______r]++; ______r; }))
4362 __list_splice(list, head, head->next);
4364 static inline __attribute__((always_inline)) void list_splice_tail(struct list_head *list,
4365 struct list_head *head)
4367 if (__builtin_constant_p(((!list_empty(list)))) ? !!((!list_empty(list))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 307, }; ______r = !!((!list_empty(list))); ______f.miss_hit[______r]++; ______r; }))
4368 __list_splice(list, head->prev, head);
4370 static inline __attribute__((always_inline)) void list_splice_init(struct list_head *list,
4371 struct list_head *head)
4373 if (__builtin_constant_p(((!list_empty(list)))) ? !!((!list_empty(list))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 321, }; ______r = !!((!list_empty(list))); ______f.miss_hit[______r]++; ______r; })) {
4374 __list_splice(list, head, head->next);
4375 INIT_LIST_HEAD(list);
4378 static inline __attribute__((always_inline)) void list_splice_tail_init(struct list_head *list,
4379 struct list_head *head)
4381 if (__builtin_constant_p(((!list_empty(list)))) ? !!((!list_empty(list))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 338, }; ______r = !!((!list_empty(list))); ______f.miss_hit[______r]++; ______r; })) {
4382 __list_splice(list, head->prev, head);
4383 INIT_LIST_HEAD(list);
4386 static inline __attribute__((always_inline)) void INIT_HLIST_NODE(struct hlist_node *h)
4388 h->next = ((void *)0);
4389 h->pprev = ((void *)0);
4391 static inline __attribute__((always_inline)) int hlist_unhashed(const struct hlist_node *h)
4395 static inline __attribute__((always_inline)) int hlist_empty(const struct hlist_head *h)
4399 static inline __attribute__((always_inline)) void __hlist_del(struct hlist_node *n)
4401 struct hlist_node *next = n->next;
4402 struct hlist_node **pprev = n->pprev;
4404 if (__builtin_constant_p(((next))) ? !!((next)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 591, }; ______r = !!((next)); ______f.miss_hit[______r]++; ______r; }))
4405 next->pprev = pprev;
4407 static inline __attribute__((always_inline)) void hlist_del(struct hlist_node *n)
4410 n->next = ((void *) 0x00100100 + (0x0UL));
4411 n->pprev = ((void *) 0x00200200 + (0x0UL));
4413 static inline __attribute__((always_inline)) void hlist_del_init(struct hlist_node *n)
4415 if (__builtin_constant_p(((!hlist_unhashed(n)))) ? !!((!hlist_unhashed(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 604, }; ______r = !!((!hlist_unhashed(n))); ______f.miss_hit[______r]++; ______r; })) {
4420 static inline __attribute__((always_inline)) void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
4422 struct hlist_node *first = h->first;
4424 if (__builtin_constant_p(((first))) ? !!((first)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 614, }; ______r = !!((first)); ______f.miss_hit[______r]++; ______r; }))
4425 first->pprev = &n->next;
4427 n->pprev = &h->first;
4429 static inline __attribute__((always_inline)) void hlist_add_before(struct hlist_node *n,
4430 struct hlist_node *next)
4432 n->pprev = next->pprev;
4434 next->pprev = &n->next;
4437 static inline __attribute__((always_inline)) void hlist_add_after(struct hlist_node *n,
4438 struct hlist_node *next)
4440 next->next = n->next;
4442 next->pprev = &n->next;
4443 if (__builtin_constant_p(((next->next))) ? !!((next->next)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 637, }; ______r = !!((next->next)); ______f.miss_hit[______r]++; ______r; }))
4444 next->next->pprev = &next->next;
4446 static inline __attribute__((always_inline)) void hlist_add_fake(struct hlist_node *n)
4448 n->pprev = &n->next;
4450 static inline __attribute__((always_inline)) void hlist_move_list(struct hlist_head *old,
4451 struct hlist_head *new)
4453 new->first = old->first;
4454 if (__builtin_constant_p(((new->first))) ? !!((new->first)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 655, }; ______r = !!((new->first)); ______f.miss_hit[______r]++; ______r; }))
4455 new->first->pprev = &new->first;
4456 old->first = ((void *)0);
4458 extern void add_preempt_count(int val);
4459 extern void sub_preempt_count(int val);
4460 __attribute__((regparm(0))) void preempt_schedule(void);
4461 struct preempt_notifier;
4462 struct preempt_ops {
4463 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
4464 void (*sched_out)(struct preempt_notifier *notifier,
4465 struct task_struct *next);
4467 struct preempt_notifier {
4468 struct hlist_node link;
4469 struct preempt_ops *ops;
4471 void preempt_notifier_register(struct preempt_notifier *notifier);
4472 void preempt_notifier_unregister(struct preempt_notifier *notifier);
4473 static inline __attribute__((always_inline)) void preempt_notifier_init(struct preempt_notifier *notifier,
4474 struct preempt_ops *ops)
4476 INIT_HLIST_NODE(¬ifier->link);
4477 notifier->ops = ops;
4481 extern int prove_locking;
4482 extern int lock_stat;
4484 extern int debug_locks;
4485 extern int debug_locks_silent;
4486 static inline __attribute__((always_inline)) int __debug_locks_off(void)
4488 return ({ __typeof(*((&debug_locks))) __x = ((0)); switch (sizeof(*&debug_locks)) { case 1: { volatile u8 *__ptr = (volatile u8 *)((&debug_locks)); asm volatile("xchgb %0,%1" : "=q" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)((&debug_locks)); asm volatile("xchgw %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)((&debug_locks)); asm volatile("xchgl %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } default: __xchg_wrong_size(); } __x; });
4490 extern int debug_locks_off(void);
4492 extern void debug_show_all_locks(void);
4493 extern void debug_show_held_locks(struct task_struct *task);
4494 extern void debug_check_no_locks_freed(const void *from, unsigned long len);
4495 extern void debug_check_no_locks_held(struct task_struct *task);
4499 struct stack_trace {
4500 unsigned int nr_entries, max_entries;
4501 unsigned long *entries;
4504 extern void save_stack_trace(struct stack_trace *trace);
4505 extern void save_stack_trace_regs(struct stack_trace *trace,
4506 struct pt_regs *regs);
4507 extern void save_stack_trace_tsk(struct task_struct *tsk,
4508 struct stack_trace *trace);
4509 extern void print_stack_trace(struct stack_trace *trace, int spaces);
4510 extern void save_stack_trace_user(struct stack_trace *trace);
4511 struct lockdep_subclass_key {
4513 } __attribute__ ((__packed__));
4514 struct lock_class_key {
4515 struct lockdep_subclass_key subkeys[8UL];
4517 extern struct lock_class_key __lockdep_no_validate__;
4519 struct list_head hash_entry;
4520 struct list_head lock_entry;
4521 struct lockdep_subclass_key *key;
4522 unsigned int subclass;
4523 unsigned int dep_gen_id;
4524 unsigned long usage_mask;
4525 struct stack_trace usage_traces[(1+3*4)];
4526 struct list_head locks_after, locks_before;
4527 unsigned int version;
4532 struct lockdep_map {
4533 struct lock_class_key *key;
4534 struct lock_class *class_cache[2];
4538 struct list_head entry;
4539 struct lock_class *class;
4540 struct stack_trace trace;
4542 struct lock_list *parent;
4548 struct list_head entry;
4553 unsigned long acquire_ip;
4554 struct lockdep_map *instance;
4555 struct lockdep_map *nest_lock;
4556 unsigned int class_idx:13;
4557 unsigned int irq_context:2;
4558 unsigned int trylock:1;
4559 unsigned int read:2;
4560 unsigned int check:2;
4561 unsigned int hardirqs_off:1;
4562 unsigned int references:11;
4564 extern void lockdep_init(void);
4565 extern void lockdep_info(void);
4566 extern void lockdep_reset(void);
4567 extern void lockdep_reset_lock(struct lockdep_map *lock);
4568 extern void lockdep_free_key_range(void *start, unsigned long size);
4569 extern void lockdep_sys_exit(void);
4570 extern void lockdep_off(void);
4571 extern void lockdep_on(void);
4572 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
4573 struct lock_class_key *key, int subclass);
4574 static inline __attribute__((always_inline)) int lockdep_match_key(struct lockdep_map *lock,
4575 struct lock_class_key *key)
4577 return lock->key == key;
4579 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
4580 int trylock, int read, int check,
4581 struct lockdep_map *nest_lock, unsigned long ip);
4582 extern void lock_release(struct lockdep_map *lock, int nested,
4584 extern int lock_is_held(struct lockdep_map *lock);
4585 extern void lock_set_class(struct lockdep_map *lock, const char *name,
4586 struct lock_class_key *key, unsigned int subclass,
4588 static inline __attribute__((always_inline)) void lock_set_subclass(struct lockdep_map *lock,
4589 unsigned int subclass, unsigned long ip)
4591 lock_set_class(lock, lock->name, lock->key, subclass, ip);
4593 extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
4594 extern void lockdep_clear_current_reclaim_state(void);
4595 extern void lockdep_trace_alloc(gfp_t mask);
4596 extern void print_irqtrace_events(struct task_struct *curr);
4597 extern void ftrace_nmi_enter(void);
4598 extern void ftrace_nmi_exit(void);
4599 extern void cpu_idle(void);
4600 typedef void (*smp_call_func_t)(void *info);
4601 struct call_single_data {
4602 struct list_head list;
4603 smp_call_func_t func;
4608 extern unsigned int total_cpus;
4609 int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
4613 unsigned int physptr;
4614 unsigned char length;
4615 unsigned char specification;
4616 unsigned char checksum;
4617 unsigned char feature1;
4618 unsigned char feature2;
4619 unsigned char feature3;
4620 unsigned char feature4;
4621 unsigned char feature5;
4625 unsigned short length;
4630 unsigned int oemptr;
4631 unsigned short oemsize;
4632 unsigned short oemcount;
4634 unsigned int reserved;
4638 unsigned char apicid;
4639 unsigned char apicver;
4640 unsigned char cpuflag;
4641 unsigned int cpufeature;
4642 unsigned int featureflag;
4643 unsigned int reserved[2];
4647 unsigned char busid;
4648 unsigned char bustype[6];
4652 unsigned char apicid;
4653 unsigned char apicver;
4654 unsigned char flags;
4655 unsigned int apicaddr;
4659 unsigned char irqtype;
4660 unsigned short irqflag;
4661 unsigned char srcbus;
4662 unsigned char srcbusirq;
4663 unsigned char dstapic;
4664 unsigned char dstirq;
4666 enum mp_irq_source_types {
4672 struct mpc_lintsrc {
4674 unsigned char irqtype;
4675 unsigned short irqflag;
4676 unsigned char srcbusid;
4677 unsigned char srcbusirq;
4678 unsigned char destapic;
4679 unsigned char destapiclint;
4681 struct mpc_oemtable {
4683 unsigned short length;
4694 struct screen_info {
4698 __u16 orig_video_page;
4699 __u8 orig_video_mode;
4700 __u8 orig_video_cols;
4703 __u16 orig_video_ega_bx;
4705 __u8 orig_video_lines;
4706 __u8 orig_video_isVGA;
4707 __u16 orig_video_points;
4713 __u16 cl_magic, cl_offset;
4714 __u16 lfb_linelength;
4726 __u16 vesa_attributes;
4729 } __attribute__((packed));
4730 extern struct screen_info screen_info;
4731 typedef unsigned short apm_event_t;
4732 typedef unsigned short apm_eventinfo_t;
4733 struct apm_bios_info {
4745 struct apm_bios_info bios;
4746 unsigned short connection_version;
4747 int get_power_status_broken;
4748 int get_power_status_swabinminutes;
4751 int realmode_power_off;
4754 extern struct apm_info apm_info;
4755 struct edd_device_params {
4758 __u32 num_default_cylinders;
4759 __u32 num_default_heads;
4760 __u32 sectors_per_track;
4761 __u64 number_of_sectors;
4762 __u16 bytes_per_sector;
4765 __u8 device_path_info_length;
4768 __u8 host_bus_type[4];
4769 __u8 interface_type[8];
4775 } __attribute__ ((packed)) isa;
4782 } __attribute__ ((packed)) pci;
4785 } __attribute__ ((packed)) ibnd;
4788 } __attribute__ ((packed)) xprs;
4791 } __attribute__ ((packed)) htpt;
4794 } __attribute__ ((packed)) unknown;
4803 } __attribute__ ((packed)) ata;
4811 } __attribute__ ((packed)) atapi;
4817 } __attribute__ ((packed)) scsi;
4819 __u64 serial_number;
4821 } __attribute__ ((packed)) usb;
4825 } __attribute__ ((packed)) i1394;
4829 } __attribute__ ((packed)) fibre;
4833 } __attribute__ ((packed)) i2o;
4838 } __attribute__ ((packed)) raid;
4845 } __attribute__ ((packed)) sata;
4849 } __attribute__ ((packed)) unknown;
4853 } __attribute__ ((packed));
4857 __u16 interface_support;
4858 __u16 legacy_max_cylinder;
4859 __u8 legacy_max_head;
4860 __u8 legacy_sectors_per_track;
4861 struct edd_device_params params;
4862 } __attribute__ ((packed));
4864 unsigned int mbr_signature[16];
4865 struct edd_info edd_info[6];
4866 unsigned char mbr_signature_nr;
4867 unsigned char edd_info_nr;
4869 extern struct edd edd;
4874 } __attribute__((packed));
4877 struct e820entry map[128];
4879 extern struct e820map e820;
4880 extern struct e820map e820_saved;
4881 extern unsigned long pci_mem_start;
4882 extern int e820_any_mapped(u64 start, u64 end, unsigned type);
4883 extern int e820_all_mapped(u64 start, u64 end, unsigned type);
4884 extern void e820_add_region(u64 start, u64 size, int type);
4885 extern void e820_print_map(char *who);
4887 sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, u32 *pnr_map);
4888 extern u64 e820_update_range(u64 start, u64 size, unsigned old_type,
4890 extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type,
4892 extern void update_e820(void);
4893 extern void e820_setup_gap(void);
4894 extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
4895 unsigned long start_addr, unsigned long long end_addr);
4897 extern void parse_e820_ext(struct setup_data *data);
4898 extern void e820_mark_nosave_regions(unsigned long limit_pfn);
4899 static inline __attribute__((always_inline)) void early_memtest(unsigned long start, unsigned long end)
4902 extern unsigned long e820_end_of_ram_pfn(void);
4903 extern unsigned long e820_end_of_low_ram_pfn(void);
4904 extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
4905 void memblock_x86_fill(void);
4906 void memblock_find_dma_reserve(void);
4907 extern void finish_e820_parsing(void);
4908 extern void e820_reserve_resources(void);
4909 extern void e820_reserve_resources_late(void);
4910 extern void setup_memory_map(void);
4911 extern char *default_machine_specific_memory_setup(void);
4912 static inline __attribute__((always_inline)) bool is_ISA_range(u64 s, u64 e)
4914 return s >= 0xa0000 && e <= 0x100000;
4917 resource_size_t start;
4918 resource_size_t end;
4920 unsigned long flags;
4921 struct resource *parent, *sibling, *child;
4923 struct resource_list {
4924 struct resource_list *next;
4925 struct resource *res;
4926 struct pci_dev *dev;
4928 extern struct resource ioport_resource;
4929 extern struct resource iomem_resource;
4930 extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
4931 extern int request_resource(struct resource *root, struct resource *new);
4932 extern int release_resource(struct resource *new);
4933 void release_child_resources(struct resource *new);
4934 extern void reserve_region_with_split(struct resource *root,
4935 resource_size_t start, resource_size_t end,
4937 extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
4938 extern int insert_resource(struct resource *parent, struct resource *new);
4939 extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
4940 extern void arch_remove_reservations(struct resource *avail);
4941 extern int allocate_resource(struct resource *root, struct resource *new,
4942 resource_size_t size, resource_size_t min,
4943 resource_size_t max, resource_size_t align,
4944 resource_size_t (*alignf)(void *,
4945 const struct resource *,
4949 int adjust_resource(struct resource *res, resource_size_t start,
4950 resource_size_t size);
4951 resource_size_t resource_alignment(struct resource *res);
4952 static inline __attribute__((always_inline)) resource_size_t resource_size(const struct resource *res)
4954 return res->end - res->start + 1;
4956 static inline __attribute__((always_inline)) unsigned long resource_type(const struct resource *res)
4958 return res->flags & 0x00001f00;
4960 extern struct resource * __request_region(struct resource *,
4961 resource_size_t start,
4963 const char *name, int flags);
4964 extern int __check_region(struct resource *, resource_size_t, resource_size_t);
4965 extern void __release_region(struct resource *, resource_size_t,
4967 static inline __attribute__((always_inline)) int __attribute__((deprecated)) check_region(resource_size_t s,
4970 return __check_region(&ioport_resource, s, n);
4973 extern struct resource * __devm_request_region(struct device *dev,
4974 struct resource *parent, resource_size_t start,
4975 resource_size_t n, const char *name);
4976 extern void __devm_release_region(struct device *dev, struct resource *parent,
4977 resource_size_t start, resource_size_t n);
4978 extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
4979 extern int iomem_is_exclusive(u64 addr);
4981 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
4982 void *arg, int (*func)(unsigned long, unsigned long, void *));
4989 extern struct ist_info ist_info;
4991 unsigned char dummy[128];
4993 extern struct edid_info edid_info;
5000 struct setup_header {
5011 __u32 realmode_swtch;
5013 __u16 kernel_version;
5014 __u8 type_of_loader;
5016 __u16 setup_move_size;
5018 __u32 ramdisk_image;
5020 __u32 bootsect_kludge;
5022 __u8 ext_loader_ver;
5023 __u8 ext_loader_type;
5025 __u32 initrd_addr_max;
5026 __u32 kernel_alignment;
5027 __u8 relocatable_kernel;
5030 __u32 hardware_subarch;
5031 __u64 hardware_subarch_data;
5032 __u32 payload_offset;
5033 __u32 payload_length;
5035 } __attribute__((packed));
5036 struct sys_desc_table {
5040 struct olpc_ofw_header {
5044 __u32 irq_desc_table;
5045 } __attribute__((packed));
5047 __u32 efi_loader_signature;
5049 __u32 efi_memdesc_size;
5050 __u32 efi_memdesc_version;
5052 __u32 efi_memmap_size;
5053 __u32 efi_systab_hi;
5054 __u32 efi_memmap_hi;
5056 struct boot_params {
5057 struct screen_info screen_info;
5058 struct apm_bios_info apm_bios_info;
5061 struct ist_info ist_info;
5065 struct sys_desc_table sys_desc_table;
5066 struct olpc_ofw_header olpc_ofw_header;
5068 struct edid_info edid_info;
5069 struct efi_info efi_info;
5073 __u8 eddbuf_entries;
5074 __u8 edd_mbr_sig_buf_entries;
5076 struct setup_header hdr;
5077 __u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)];
5078 __u32 edd_mbr_sig_buffer[16];
5079 struct e820entry e820_map[128];
5081 struct edd_info eddbuf[6];
5083 } __attribute__((packed));
5095 struct x86_init_mpparse {
5096 void (*mpc_record)(unsigned int mode);
5097 void (*setup_ioapic_ids)(void);
5098 int (*mpc_apic_id)(struct mpc_cpu *m);
5099 void (*smp_read_mpc_oem)(struct mpc_table *mpc);
5100 void (*mpc_oem_pci_bus)(struct mpc_bus *m);
5101 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
5102 void (*find_smp_config)(void);
5103 void (*get_smp_config)(unsigned int early);
5105 struct x86_init_resources {
5106 void (*probe_roms)(void);
5107 void (*reserve_resources)(void);
5108 char *(*memory_setup)(void);
5110 struct x86_init_irqs {
5111 void (*pre_vector_init)(void);
5112 void (*intr_init)(void);
5113 void (*trap_init)(void);
5115 struct x86_init_oem {
5116 void (*arch_setup)(void);
5117 void (*banner)(void);
5119 struct x86_init_mapping {
5120 void (*pagetable_reserve)(u64 start, u64 end);
5122 struct x86_init_paging {
5123 void (*pagetable_setup_start)(pgd_t *base);
5124 void (*pagetable_setup_done)(pgd_t *base);
5126 struct x86_init_timers {
5127 void (*setup_percpu_clockev)(void);
5128 void (*tsc_pre_init)(void);
5129 void (*timer_init)(void);
5130 void (*wallclock_init)(void);
5132 struct x86_init_iommu {
5133 int (*iommu_init)(void);
5135 struct x86_init_pci {
5136 int (*arch_init)(void);
5138 void (*init_irq)(void);
5139 void (*fixup_irqs)(void);
5141 struct x86_init_ops {
5142 struct x86_init_resources resources;
5143 struct x86_init_mpparse mpparse;
5144 struct x86_init_irqs irqs;
5145 struct x86_init_oem oem;
5146 struct x86_init_mapping mapping;
5147 struct x86_init_paging paging;
5148 struct x86_init_timers timers;
5149 struct x86_init_iommu iommu;
5150 struct x86_init_pci pci;
5152 struct x86_cpuinit_ops {
5153 void (*setup_percpu_clockev)(void);
5155 struct x86_platform_ops {
5156 unsigned long (*calibrate_tsc)(void);
5157 unsigned long (*get_wallclock)(void);
5158 int (*set_wallclock)(unsigned long nowtime);
5159 void (*iommu_shutdown)(void);
5160 bool (*is_untracked_pat_range)(u64 start, u64 end);
5161 void (*nmi_init)(void);
5162 int (*i8042_detect)(void);
5165 struct x86_msi_ops {
5166 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
5167 void (*teardown_msi_irq)(unsigned int irq);
5168 void (*teardown_msi_irqs)(struct pci_dev *dev);
5170 extern struct x86_init_ops x86_init;
5171 extern struct x86_cpuinit_ops x86_cpuinit;
5172 extern struct x86_platform_ops x86_platform;
5173 extern struct x86_msi_ops x86_msi;
5174 extern void x86_init_noop(void);
5175 extern void x86_init_uint_noop(unsigned int unused);
5177 struct { unsigned int __reserved[4]; } __reserved_01;
5178 struct { unsigned int __reserved[4]; } __reserved_02;
5180 unsigned int __reserved_1 : 24,
5183 unsigned int __reserved[3];
5187 unsigned int version : 8,
5191 unsigned int __reserved[3];
5193 struct { unsigned int __reserved[4]; } __reserved_03;
5194 struct { unsigned int __reserved[4]; } __reserved_04;
5195 struct { unsigned int __reserved[4]; } __reserved_05;
5196 struct { unsigned int __reserved[4]; } __reserved_06;
5198 unsigned int priority : 8,
5200 unsigned int __reserved_2[3];
5204 unsigned int priority : 8,
5206 unsigned int __reserved_2[3];
5210 unsigned int priority : 8,
5212 unsigned int __reserved_2[3];
5216 unsigned int __reserved[3];
5218 struct { unsigned int __reserved[4]; } __reserved_07;
5220 unsigned int __reserved_1 : 24,
5222 unsigned int __reserved_2[3];
5225 unsigned int __reserved_1 : 28,
5227 unsigned int __reserved_2[3];
5230 unsigned int spurious_vector : 8,
5234 unsigned int __reserved_3[3];
5237 unsigned int bitfield;
5238 unsigned int __reserved[3];
5241 unsigned int bitfield;
5242 unsigned int __reserved[3];
5245 unsigned int bitfield;
5246 unsigned int __reserved[3];
5250 unsigned int send_cs_error : 1,
5251 receive_cs_error : 1,
5252 send_accept_error : 1,
5253 receive_accept_error : 1,
5255 send_illegal_vector : 1,
5256 receive_illegal_vector : 1,
5257 illegal_register_address : 1,
5259 unsigned int __reserved_3[3];
5262 unsigned int errors;
5263 unsigned int __reserved_3[3];
5266 struct { unsigned int __reserved[4]; } __reserved_08;
5267 struct { unsigned int __reserved[4]; } __reserved_09;
5268 struct { unsigned int __reserved[4]; } __reserved_10;
5269 struct { unsigned int __reserved[4]; } __reserved_11;
5270 struct { unsigned int __reserved[4]; } __reserved_12;
5271 struct { unsigned int __reserved[4]; } __reserved_13;
5272 struct { unsigned int __reserved[4]; } __reserved_14;
5274 unsigned int vector : 8,
5276 destination_mode : 1,
5277 delivery_status : 1,
5284 unsigned int __reserved_4[3];
5288 unsigned int __reserved_1 : 24,
5291 unsigned int __reserved_3 : 24,
5294 unsigned int __reserved_4[3];
5297 unsigned int vector : 8,
5299 delivery_status : 1,
5304 unsigned int __reserved_4[3];
5307 unsigned int vector : 8,
5310 delivery_status : 1,
5314 unsigned int __reserved_4[3];
5317 unsigned int vector : 8,
5320 delivery_status : 1,
5324 unsigned int __reserved_4[3];
5327 unsigned int vector : 8,
5330 delivery_status : 1,
5336 unsigned int __reserved_3[3];
5339 unsigned int vector : 8,
5342 delivery_status : 1,
5348 unsigned int __reserved_3[3];
5351 unsigned int vector : 8,
5353 delivery_status : 1,
5357 unsigned int __reserved_4[3];
5360 unsigned int initial_count;
5361 unsigned int __reserved_2[3];
5365 unsigned int curr_count;
5366 unsigned int __reserved_2[3];
5368 struct { unsigned int __reserved[4]; } __reserved_16;
5369 struct { unsigned int __reserved[4]; } __reserved_17;
5370 struct { unsigned int __reserved[4]; } __reserved_18;
5371 struct { unsigned int __reserved[4]; } __reserved_19;
5373 unsigned int divisor : 4,
5375 unsigned int __reserved_2[3];
5377 struct { unsigned int __reserved[4]; } __reserved_20;
5378 } __attribute__ ((packed));
5379 enum ioapic_irq_destination_types {
5381 dest_LowestPrio = 1,
5383 dest__reserved_1 = 3,
5386 dest__reserved_2 = 6,
5389 extern int apic_version[];
5390 extern int pic_mode;
5391 extern unsigned int def_to_bigsmp;
5392 extern unsigned long mp_bus_not_pci[(((260) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))];
5393 extern unsigned int boot_cpu_physical_apicid;
5394 extern unsigned int max_physical_apicid;
5395 extern int mpc_default_type;
5396 extern unsigned long mp_lapic_addr;
5397 extern int smp_found_config;
5398 static inline __attribute__((always_inline)) void get_smp_config(void)
5400 x86_init.mpparse.get_smp_config(0);
5402 static inline __attribute__((always_inline)) void early_get_smp_config(void)
5404 x86_init.mpparse.get_smp_config(1);
5406 static inline __attribute__((always_inline)) void find_smp_config(void)
5408 x86_init.mpparse.find_smp_config();
5410 extern void early_reserve_e820_mpc_new(void);
5411 extern int enable_update_mptable;
5412 extern int default_mpc_apic_id(struct mpc_cpu *m);
5413 extern void default_smp_read_mpc_oem(struct mpc_table *mpc);
5414 extern void default_mpc_oem_bus_info(struct mpc_bus *m, char *str);
5415 extern void default_find_smp_config(void);
5416 extern void default_get_smp_config(unsigned int early);
5417 void __attribute__ ((__section__(".cpuinit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) generic_processor_info(int apicid, int version);
5418 extern void mp_register_ioapic(int id, u32 address, u32 gsi_base);
5419 extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
5421 extern void mp_config_acpi_legacy_irqs(void);
5423 extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level,
5424 int active_high_low);
5425 struct physid_mask {
5426 unsigned long mask[(((256) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))];
5428 typedef struct physid_mask physid_mask_t;
5429 static inline __attribute__((always_inline)) unsigned long physids_coerce(physid_mask_t *map)
5431 return map->mask[0];
5433 static inline __attribute__((always_inline)) void physids_promote(unsigned long physids, physid_mask_t *map)
5435 bitmap_zero((*map).mask, 256);
5436 map->mask[0] = physids;
5438 static inline __attribute__((always_inline)) void physid_set_mask_of_physid(int physid, physid_mask_t *map)
5440 bitmap_zero((*map).mask, 256);
5441 set_bit(physid, (*map).mask);
5443 extern physid_mask_t phys_cpu_present_map;
5444 extern int generic_mps_oem_check(struct mpc_table *, char *, char *);
5445 extern int default_acpi_madt_oem_check(char *, char *);
5446 extern void local_bh_disable(void);
5447 extern void _local_bh_enable(void);
5448 extern void local_bh_enable(void);
5449 extern void local_bh_enable_ip(unsigned long ip);
5450 typedef struct arch_spinlock {
5456 typedef struct raw_spinlock {
5457 arch_spinlock_t raw_lock;
5458 unsigned int magic, owner_cpu;
5460 struct lockdep_map dep_map;
5462 typedef struct spinlock {
5464 struct raw_spinlock rlock;
5466 u8 __padding[(__builtin_offsetof(struct raw_spinlock,dep_map))];
5467 struct lockdep_map dep_map;
5472 arch_rwlock_t raw_lock;
5473 unsigned int magic, owner_cpu;
5475 struct lockdep_map dep_map;
5477 static inline __attribute__((always_inline)) __attribute__((always_inline)) void __ticket_spin_lock(arch_spinlock_t *lock)
5481 ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "xaddw %w0, %1\n"
5489 : "+Q" (inc), "+m" (lock->slock)
5493 static inline __attribute__((always_inline)) __attribute__((always_inline)) int __ticket_spin_trylock(arch_spinlock_t *lock)
5496 asm volatile("movzwl %2, %0\n\t"
5498 "leal 0x100(%" "k" "0), %1\n\t"
5500 ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgw %w1,%2\n\t"
5504 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
5509 static inline __attribute__((always_inline)) __attribute__((always_inline)) void __ticket_spin_unlock(arch_spinlock_t *lock)
5511 asm volatile( "incb %0"
5512 : "+m" (lock->slock)
5516 static inline __attribute__((always_inline)) int __ticket_spin_is_locked(arch_spinlock_t *lock)
5518 int tmp = (*(volatile typeof(lock->slock) *)&(lock->slock));
5519 return !!(((tmp >> 8) ^ tmp) & ((1 << 8) - 1));
5521 static inline __attribute__((always_inline)) int __ticket_spin_is_contended(arch_spinlock_t *lock)
5523 int tmp = (*(volatile typeof(lock->slock) *)&(lock->slock));
5524 return (((tmp >> 8) - tmp) & ((1 << 8) - 1)) > 1;
5526 static inline __attribute__((always_inline)) int arch_spin_is_locked(arch_spinlock_t *lock)
5528 return __ticket_spin_is_locked(lock);
5530 static inline __attribute__((always_inline)) int arch_spin_is_contended(arch_spinlock_t *lock)
5532 return __ticket_spin_is_contended(lock);
5534 static inline __attribute__((always_inline)) __attribute__((always_inline)) void arch_spin_lock(arch_spinlock_t *lock)
5536 __ticket_spin_lock(lock);
5538 static inline __attribute__((always_inline)) __attribute__((always_inline)) int arch_spin_trylock(arch_spinlock_t *lock)
5540 return __ticket_spin_trylock(lock);
5542 static inline __attribute__((always_inline)) __attribute__((always_inline)) void arch_spin_unlock(arch_spinlock_t *lock)
5544 __ticket_spin_unlock(lock);
5546 static inline __attribute__((always_inline)) __attribute__((always_inline)) void arch_spin_lock_flags(arch_spinlock_t *lock,
5547 unsigned long flags)
5549 arch_spin_lock(lock);
5551 static inline __attribute__((always_inline)) void arch_spin_unlock_wait(arch_spinlock_t *lock)
5553 while (arch_spin_is_locked(lock))
5556 static inline __attribute__((always_inline)) int arch_read_can_lock(arch_rwlock_t *lock)
5558 return (int)(lock)->lock > 0;
5560 static inline __attribute__((always_inline)) int arch_write_can_lock(arch_rwlock_t *lock)
5562 return (lock)->lock == 0x01000000;
5564 static inline __attribute__((always_inline)) void arch_read_lock(arch_rwlock_t *rw)
5566 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " subl $1,(%0)\n\t"
5568 "call __read_lock_failed\n\t"
5570 ::"a" (rw) : "memory");
5572 static inline __attribute__((always_inline)) void arch_write_lock(arch_rwlock_t *rw)
5574 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " subl %1,(%0)\n\t"
5576 "call __write_lock_failed\n\t"
5578 ::"a" (rw), "i" (0x01000000) : "memory");
5580 static inline __attribute__((always_inline)) int arch_read_trylock(arch_rwlock_t *lock)
5582 atomic_t *count = (atomic_t *)lock;
5583 if (__builtin_constant_p((((atomic_sub_return(1, count)) >= 0))) ? !!(((atomic_sub_return(1, count)) >= 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/spinlock.h", .line = 271, }; ______r = !!(((atomic_sub_return(1, count)) >= 0)); ______f.miss_hit[______r]++; ______r; }))
5588 static inline __attribute__((always_inline)) int arch_write_trylock(arch_rwlock_t *lock)
5590 atomic_t *count = (atomic_t *)lock;
5591 if (__builtin_constant_p(((atomic_sub_and_test(0x01000000, count)))) ? !!((atomic_sub_and_test(0x01000000, count))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/spinlock.h", .line = 281, }; ______r = !!((atomic_sub_and_test(0x01000000, count))); ______f.miss_hit[______r]++; ______r; }))
5593 atomic_add(0x01000000, count);
5596 static inline __attribute__((always_inline)) void arch_read_unlock(arch_rwlock_t *rw)
5598 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "incl %0" :"+m" (rw->lock) : : "memory");
5600 static inline __attribute__((always_inline)) void arch_write_unlock(arch_rwlock_t *rw)
5602 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "addl %1, %0"
5603 : "+m" (rw->lock) : "i" (0x01000000) : "memory");
5605 static inline __attribute__((always_inline)) void smp_mb__after_lock(void) { }
5606 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
5607 struct lock_class_key *key);
5608 extern void do_raw_spin_lock(raw_spinlock_t *lock) ;
5609 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
5610 extern void do_raw_spin_unlock(raw_spinlock_t *lock) ;
5611 extern void __rwlock_init(rwlock_t *lock, const char *name,
5612 struct lock_class_key *key);
5613 extern void do_raw_read_lock(rwlock_t *lock) ;
5614 extern int do_raw_read_trylock(rwlock_t *lock);
5615 extern void do_raw_read_unlock(rwlock_t *lock) ;
5616 extern void do_raw_write_lock(rwlock_t *lock) ;
5617 extern int do_raw_write_trylock(rwlock_t *lock);
5618 extern void do_raw_write_unlock(rwlock_t *lock) ;
5619 int in_lock_functions(unsigned long addr);
5620 void __attribute__((section(".spinlock.text"))) _raw_spin_lock(raw_spinlock_t *lock) ;
5621 void __attribute__((section(".spinlock.text"))) _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
5623 void __attribute__((section(".spinlock.text")))
5624 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
5626 void __attribute__((section(".spinlock.text"))) _raw_spin_lock_bh(raw_spinlock_t *lock) ;
5627 void __attribute__((section(".spinlock.text"))) _raw_spin_lock_irq(raw_spinlock_t *lock)
5629 unsigned long __attribute__((section(".spinlock.text"))) _raw_spin_lock_irqsave(raw_spinlock_t *lock)
5631 unsigned long __attribute__((section(".spinlock.text")))
5632 _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
5634 int __attribute__((section(".spinlock.text"))) _raw_spin_trylock(raw_spinlock_t *lock);
5635 int __attribute__((section(".spinlock.text"))) _raw_spin_trylock_bh(raw_spinlock_t *lock);
5636 void __attribute__((section(".spinlock.text"))) _raw_spin_unlock(raw_spinlock_t *lock) ;
5637 void __attribute__((section(".spinlock.text"))) _raw_spin_unlock_bh(raw_spinlock_t *lock) ;
5638 void __attribute__((section(".spinlock.text"))) _raw_spin_unlock_irq(raw_spinlock_t *lock) ;
5639 void __attribute__((section(".spinlock.text")))
5640 _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
5642 static inline __attribute__((always_inline)) int __raw_spin_trylock(raw_spinlock_t *lock)
5644 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5645 if (__builtin_constant_p(((do_raw_spin_trylock(lock)))) ? !!((do_raw_spin_trylock(lock))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 89, }; ______r = !!((do_raw_spin_trylock(lock))); ______f.miss_hit[______r]++; ______r; })) {
5646 lock_acquire(&lock->dep_map, 0, 1, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5649 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 93, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 93, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 93, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 93, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5652 static inline __attribute__((always_inline)) unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
5654 unsigned long flags;
5655 do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0);
5656 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5657 lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5658 do_raw_spin_lock(lock);
5661 static inline __attribute__((always_inline)) void __raw_spin_lock_irq(raw_spinlock_t *lock)
5663 do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0);
5664 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5665 lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5666 do_raw_spin_lock(lock);
5668 static inline __attribute__((always_inline)) void __raw_spin_lock_bh(raw_spinlock_t *lock)
5671 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5672 lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5673 do_raw_spin_lock(lock);
5675 static inline __attribute__((always_inline)) void __raw_spin_lock(raw_spinlock_t *lock)
5677 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5678 lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5679 do_raw_spin_lock(lock);
5681 static inline __attribute__((always_inline)) void __raw_spin_unlock(raw_spinlock_t *lock)
5683 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5684 do_raw_spin_unlock(lock);
5685 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 153, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 153, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 153, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 153, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5687 static inline __attribute__((always_inline)) void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
5688 unsigned long flags)
5690 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5691 do_raw_spin_unlock(lock);
5692 do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 161, }; ______r = !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0);
5693 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 162, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 162, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 162, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 162, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5695 static inline __attribute__((always_inline)) void __raw_spin_unlock_irq(raw_spinlock_t *lock)
5697 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5698 do_raw_spin_unlock(lock);
5699 do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0);
5700 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 170, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 170, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 170, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 170, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5702 static inline __attribute__((always_inline)) void __raw_spin_unlock_bh(raw_spinlock_t *lock)
5704 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5705 do_raw_spin_unlock(lock);
5706 do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0);
5707 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
5709 static inline __attribute__((always_inline)) int __raw_spin_trylock_bh(raw_spinlock_t *lock)
5712 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5713 if (__builtin_constant_p(((do_raw_spin_trylock(lock)))) ? !!((do_raw_spin_trylock(lock))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 185, }; ______r = !!((do_raw_spin_trylock(lock))); ______f.miss_hit[______r]++; ______r; })) {
5714 lock_acquire(&lock->dep_map, 0, 1, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5717 do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0);
5718 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
5721 void __attribute__((section(".spinlock.text"))) _raw_read_lock(rwlock_t *lock) ;
5722 void __attribute__((section(".spinlock.text"))) _raw_write_lock(rwlock_t *lock) ;
5723 void __attribute__((section(".spinlock.text"))) _raw_read_lock_bh(rwlock_t *lock) ;
5724 void __attribute__((section(".spinlock.text"))) _raw_write_lock_bh(rwlock_t *lock) ;
5725 void __attribute__((section(".spinlock.text"))) _raw_read_lock_irq(rwlock_t *lock) ;
5726 void __attribute__((section(".spinlock.text"))) _raw_write_lock_irq(rwlock_t *lock) ;
5727 unsigned long __attribute__((section(".spinlock.text"))) _raw_read_lock_irqsave(rwlock_t *lock)
5729 unsigned long __attribute__((section(".spinlock.text"))) _raw_write_lock_irqsave(rwlock_t *lock)
5731 int __attribute__((section(".spinlock.text"))) _raw_read_trylock(rwlock_t *lock);
5732 int __attribute__((section(".spinlock.text"))) _raw_write_trylock(rwlock_t *lock);
5733 void __attribute__((section(".spinlock.text"))) _raw_read_unlock(rwlock_t *lock) ;
5734 void __attribute__((section(".spinlock.text"))) _raw_write_unlock(rwlock_t *lock) ;
5735 void __attribute__((section(".spinlock.text"))) _raw_read_unlock_bh(rwlock_t *lock) ;
5736 void __attribute__((section(".spinlock.text"))) _raw_write_unlock_bh(rwlock_t *lock) ;
5737 void __attribute__((section(".spinlock.text"))) _raw_read_unlock_irq(rwlock_t *lock) ;
5738 void __attribute__((section(".spinlock.text"))) _raw_write_unlock_irq(rwlock_t *lock) ;
5739 void __attribute__((section(".spinlock.text")))
5740 _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
5742 void __attribute__((section(".spinlock.text")))
5743 _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
5745 static inline __attribute__((always_inline)) int __raw_read_trylock(rwlock_t *lock)
5747 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5748 if (__builtin_constant_p(((do_raw_read_trylock(lock)))) ? !!((do_raw_read_trylock(lock))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 120, }; ______r = !!((do_raw_read_trylock(lock))); ______f.miss_hit[______r]++; ______r; })) {
5749 lock_acquire(&lock->dep_map, 0, 1, 2, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5752 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 124, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 124, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 124, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 124, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5755 static inline __attribute__((always_inline)) int __raw_write_trylock(rwlock_t *lock)
5757 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5758 if (__builtin_constant_p(((do_raw_write_trylock(lock)))) ? !!((do_raw_write_trylock(lock))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 131, }; ______r = !!((do_raw_write_trylock(lock))); ______f.miss_hit[______r]++; ______r; })) {
5759 lock_acquire(&lock->dep_map, 0, 1, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5762 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 135, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 135, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 135, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 135, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5765 static inline __attribute__((always_inline)) void __raw_read_lock(rwlock_t *lock)
5767 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5768 lock_acquire(&lock->dep_map, 0, 0, 2, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5769 do_raw_read_lock(lock);
5771 static inline __attribute__((always_inline)) unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
5773 unsigned long flags;
5774 do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0);
5775 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5776 lock_acquire(&lock->dep_map, 0, 0, 2, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5777 (do_raw_read_lock)((lock))
5781 static inline __attribute__((always_inline)) void __raw_read_lock_irq(rwlock_t *lock)
5783 do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0);
5784 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5785 lock_acquire(&lock->dep_map, 0, 0, 2, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5786 do_raw_read_lock(lock);
5788 static inline __attribute__((always_inline)) void __raw_read_lock_bh(rwlock_t *lock)
5791 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5792 lock_acquire(&lock->dep_map, 0, 0, 2, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5793 do_raw_read_lock(lock);
5795 static inline __attribute__((always_inline)) unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
5797 unsigned long flags;
5798 do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0);
5799 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5800 lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5801 (do_raw_write_lock)((lock))
5805 static inline __attribute__((always_inline)) void __raw_write_lock_irq(rwlock_t *lock)
5807 do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0);
5808 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5809 lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5810 do_raw_write_lock(lock);
5812 static inline __attribute__((always_inline)) void __raw_write_lock_bh(rwlock_t *lock)
5815 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5816 lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5817 do_raw_write_lock(lock);
5819 static inline __attribute__((always_inline)) void __raw_write_lock(rwlock_t *lock)
5821 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5822 lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5823 do_raw_write_lock(lock);
5825 static inline __attribute__((always_inline)) void __raw_write_unlock(rwlock_t *lock)
5827 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5828 do_raw_write_unlock(lock);
5829 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 222, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 222, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 222, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 222, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5831 static inline __attribute__((always_inline)) void __raw_read_unlock(rwlock_t *lock)
5833 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5834 do_raw_read_unlock(lock);
5835 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 229, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 229, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 229, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 229, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5837 static inline __attribute__((always_inline)) void
5838 __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
5840 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5841 do_raw_read_unlock(lock);
5842 do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 237, }; ______r = !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0);
5843 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 238, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 238, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 238, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 238, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5845 static inline __attribute__((always_inline)) void __raw_read_unlock_irq(rwlock_t *lock)
5847 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5848 do_raw_read_unlock(lock);
5849 do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0);
5850 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 246, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 246, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 246, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 246, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5852 static inline __attribute__((always_inline)) void __raw_read_unlock_bh(rwlock_t *lock)
5854 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5855 do_raw_read_unlock(lock);
5856 do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0);
5857 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
5859 static inline __attribute__((always_inline)) void __raw_write_unlock_irqrestore(rwlock_t *lock,
5860 unsigned long flags)
5862 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5863 do_raw_write_unlock(lock);
5864 do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 262, }; ______r = !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0);
5865 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 263, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 263, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 263, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 263, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5867 static inline __attribute__((always_inline)) void __raw_write_unlock_irq(rwlock_t *lock)
5869 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5870 do_raw_write_unlock(lock);
5871 do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0);
5872 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 271, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 271, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 271, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 271, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5874 static inline __attribute__((always_inline)) void __raw_write_unlock_bh(rwlock_t *lock)
5876 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5877 do_raw_write_unlock(lock);
5878 do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0);
5879 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
5881 static inline __attribute__((always_inline)) raw_spinlock_t *spinlock_check(spinlock_t *lock)
5883 return &lock->rlock;
5885 static inline __attribute__((always_inline)) void spin_lock(spinlock_t *lock)
5887 _raw_spin_lock(&lock->rlock);
5889 static inline __attribute__((always_inline)) void spin_lock_bh(spinlock_t *lock)
5891 _raw_spin_lock_bh(&lock->rlock);
5893 static inline __attribute__((always_inline)) int spin_trylock(spinlock_t *lock)
5895 return (_raw_spin_trylock(&lock->rlock));
5897 static inline __attribute__((always_inline)) void spin_lock_irq(spinlock_t *lock)
5899 _raw_spin_lock_irq(&lock->rlock);
5901 static inline __attribute__((always_inline)) void spin_unlock(spinlock_t *lock)
5903 _raw_spin_unlock(&lock->rlock);
5905 static inline __attribute__((always_inline)) void spin_unlock_bh(spinlock_t *lock)
5907 _raw_spin_unlock_bh(&lock->rlock);
5909 static inline __attribute__((always_inline)) void spin_unlock_irq(spinlock_t *lock)
5911 _raw_spin_unlock_irq(&lock->rlock);
5913 static inline __attribute__((always_inline)) void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
5915 do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _raw_spin_unlock_irqrestore(&lock->rlock, flags); } while (0);
5917 static inline __attribute__((always_inline)) int spin_trylock_bh(spinlock_t *lock)
5919 return (_raw_spin_trylock_bh(&lock->rlock));
5921 static inline __attribute__((always_inline)) int spin_trylock_irq(spinlock_t *lock)
5923 return ({ do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0); (_raw_spin_trylock(&lock->rlock)) ? 1 : ({ do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0); 0; }); });
5925 static inline __attribute__((always_inline)) void spin_unlock_wait(spinlock_t *lock)
5927 arch_spin_unlock_wait(&(&lock->rlock)->raw_lock);
5929 static inline __attribute__((always_inline)) int spin_is_locked(spinlock_t *lock)
5931 return arch_spin_is_locked(&(&lock->rlock)->raw_lock);
5933 static inline __attribute__((always_inline)) int spin_is_contended(spinlock_t *lock)
5935 return arch_spin_is_contended(&(&lock->rlock)->raw_lock);
5937 static inline __attribute__((always_inline)) int spin_can_lock(spinlock_t *lock)
5939 return (!arch_spin_is_locked(&(&lock->rlock)->raw_lock));
5941 static inline __attribute__((always_inline)) void assert_spin_locked(spinlock_t *lock)
5943 do { if (__builtin_constant_p((((__builtin_constant_p(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)) ? !!(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock.h", .line = 380, }; ______r = __builtin_expect(!!(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)) ? !!(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock.h", .line = 380, }; ______r = __builtin_expect(!!(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock.h", .line = 380, }; ______r = !!(((__builtin_constant_p(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)) ? !!(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock.h", .line = 380, }; ______r = __builtin_expect(!!(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/spinlock.h"), "i" (380), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
5945 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
5950 static inline __attribute__((always_inline)) void write_seqlock(seqlock_t *sl)
5952 spin_lock(&sl->lock);
5954 __asm__ __volatile__("": : :"memory");
5956 static inline __attribute__((always_inline)) void write_sequnlock(seqlock_t *sl)
5958 __asm__ __volatile__("": : :"memory");
5960 spin_unlock(&sl->lock);
5962 static inline __attribute__((always_inline)) int write_tryseqlock(seqlock_t *sl)
5964 int ret = spin_trylock(&sl->lock);
5965 if (__builtin_constant_p(((ret))) ? !!((ret)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 76, }; ______r = !!((ret)); ______f.miss_hit[______r]++; ______r; })) {
5967 __asm__ __volatile__("": : :"memory");
5971 static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned read_seqbegin(const seqlock_t *sl)
5975 ret = (*(volatile typeof(sl->sequence) *)&(sl->sequence));
5976 if (__builtin_constant_p((((__builtin_constant_p(ret & 1) ? !!(ret & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 90, }; ______r = __builtin_expect(!!(ret & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(ret & 1) ? !!(ret & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 90, }; ______r = __builtin_expect(!!(ret & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 90, }; ______r = !!(((__builtin_constant_p(ret & 1) ? !!(ret & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 90, }; ______r = __builtin_expect(!!(ret & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) {
5980 __asm__ __volatile__("": : :"memory");
5983 static inline __attribute__((always_inline)) __attribute__((always_inline)) int read_seqretry(const seqlock_t *sl, unsigned start)
5985 __asm__ __volatile__("": : :"memory");
5986 return (__builtin_constant_p(sl->sequence != start) ? !!(sl->sequence != start) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 108, }; ______r = __builtin_expect(!!(sl->sequence != start), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }));
5988 typedef struct seqcount {
5991 static inline __attribute__((always_inline)) unsigned __read_seqcount_begin(const seqcount_t *s)
5996 if (__builtin_constant_p((((__builtin_constant_p(ret & 1) ? !!(ret & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 145, }; ______r = __builtin_expect(!!(ret & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(ret & 1) ? !!(ret & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 145, }; ______r = __builtin_expect(!!(ret & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 145, }; ______r = !!(((__builtin_constant_p(ret & 1) ? !!(ret & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 145, }; ______r = __builtin_expect(!!(ret & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) {
6002 static inline __attribute__((always_inline)) unsigned read_seqcount_begin(const seqcount_t *s)
6004 unsigned ret = __read_seqcount_begin(s);
6005 __asm__ __volatile__("": : :"memory");
6008 static inline __attribute__((always_inline)) int __read_seqcount_retry(const seqcount_t *s, unsigned start)
6010 return (__builtin_constant_p(s->sequence != start) ? !!(s->sequence != start) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 184, }; ______r = __builtin_expect(!!(s->sequence != start), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }));
6012 static inline __attribute__((always_inline)) int read_seqcount_retry(const seqcount_t *s, unsigned start)
6014 __asm__ __volatile__("": : :"memory");
6015 return __read_seqcount_retry(s, start);
6017 static inline __attribute__((always_inline)) void write_seqcount_begin(seqcount_t *s)
6020 __asm__ __volatile__("": : :"memory");
6022 static inline __attribute__((always_inline)) void write_seqcount_end(seqcount_t *s)
6024 __asm__ __volatile__("": : :"memory");
6027 static inline __attribute__((always_inline)) void write_seqcount_barrier(seqcount_t *s)
6029 __asm__ __volatile__("": : :"memory");
6033 __kernel_time_t tv_sec;
6037 __kernel_time_t tv_sec;
6038 __kernel_suseconds_t tv_usec;
6044 extern struct timezone sys_tz;
6045 static inline __attribute__((always_inline)) int timespec_equal(const struct timespec *a,
6046 const struct timespec *b)
6048 return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
6050 static inline __attribute__((always_inline)) int timespec_compare(const struct timespec *lhs, const struct timespec *rhs)
6052 if (__builtin_constant_p(((lhs->tv_sec < rhs->tv_sec))) ? !!((lhs->tv_sec < rhs->tv_sec)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/time.h", .line = 58, }; ______r = !!((lhs->tv_sec < rhs->tv_sec)); ______f.miss_hit[______r]++; ______r; }))
6054 if (__builtin_constant_p(((lhs->tv_sec > rhs->tv_sec))) ? !!((lhs->tv_sec > rhs->tv_sec)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/time.h", .line = 60, }; ______r = !!((lhs->tv_sec > rhs->tv_sec)); ______f.miss_hit[______r]++; ______r; }))
6056 return lhs->tv_nsec - rhs->tv_nsec;
6058 static inline __attribute__((always_inline)) int timeval_compare(const struct timeval *lhs, const struct timeval *rhs)
6060 if (__builtin_constant_p(((lhs->tv_sec < rhs->tv_sec))) ? !!((lhs->tv_sec < rhs->tv_sec)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/time.h", .line = 67, }; ______r = !!((lhs->tv_sec < rhs->tv_sec)); ______f.miss_hit[______r]++; ______r; }))
6062 if (__builtin_constant_p(((lhs->tv_sec > rhs->tv_sec))) ? !!((lhs->tv_sec > rhs->tv_sec)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/time.h", .line = 69, }; ______r = !!((lhs->tv_sec > rhs->tv_sec)); ______f.miss_hit[______r]++; ______r; }))
6064 return lhs->tv_usec - rhs->tv_usec;
6066 extern unsigned long mktime(const unsigned int year, const unsigned int mon,
6067 const unsigned int day, const unsigned int hour,
6068 const unsigned int min, const unsigned int sec);
6069 extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec);
6070 extern struct timespec timespec_add_safe(const struct timespec lhs,
6071 const struct timespec rhs);
6072 static inline __attribute__((always_inline)) struct timespec timespec_add(struct timespec lhs,
6073 struct timespec rhs)
6075 struct timespec ts_delta;
6076 set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec,
6077 lhs.tv_nsec + rhs.tv_nsec);
6080 static inline __attribute__((always_inline)) struct timespec timespec_sub(struct timespec lhs,
6081 struct timespec rhs)
6083 struct timespec ts_delta;
6084 set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec,
6085 lhs.tv_nsec - rhs.tv_nsec);
6088 extern void read_persistent_clock(struct timespec *ts);
6089 extern void read_boot_clock(struct timespec *ts);
6090 extern int update_persistent_clock(struct timespec now);
6091 extern int no_sync_cmos_clock __attribute__((__section__(".data..read_mostly")));
6092 void timekeeping_init(void);
6093 extern int timekeeping_suspended;
6094 unsigned long get_seconds(void);
6095 struct timespec current_kernel_time(void);
6096 struct timespec __current_kernel_time(void);
6097 struct timespec get_monotonic_coarse(void);
6098 void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
6099 struct timespec *wtom, struct timespec *sleep);
6100 void timekeeping_inject_sleeptime(struct timespec *delta);
6101 static inline __attribute__((always_inline)) u32 arch_gettimeoffset(void) { return 0; }
6102 extern void do_gettimeofday(struct timeval *tv);
6103 extern int do_settimeofday(const struct timespec *tv);
6104 extern int do_sys_settimeofday(const struct timespec *tv,
6105 const struct timezone *tz);
6106 extern long do_utimes(int dfd, const char *filename, struct timespec *times, int flags);
6108 extern int do_setitimer(int which, struct itimerval *value,
6109 struct itimerval *ovalue);
6110 extern unsigned int alarm_setitimer(unsigned int seconds);
6111 extern int do_getitimer(int which, struct itimerval *value);
6112 extern void getnstimeofday(struct timespec *tv);
6113 extern void getrawmonotonic(struct timespec *ts);
6114 extern void getnstime_raw_and_real(struct timespec *ts_raw,
6115 struct timespec *ts_real);
6116 extern void getboottime(struct timespec *ts);
6117 extern void monotonic_to_bootbased(struct timespec *ts);
6118 extern void get_monotonic_boottime(struct timespec *ts);
6119 extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
6120 extern int timekeeping_valid_for_hres(void);
6121 extern u64 timekeeping_max_deferment(void);
6122 extern void timekeeping_leap_insert(int leapsecond);
6123 extern int timekeeping_inject_offset(struct timespec *ts);
6125 extern void do_sys_times(struct tms *);
6136 void time_to_tm(time_t totalsecs, int offset, struct tm *result);
6137 static inline __attribute__((always_inline)) s64 timespec_to_ns(const struct timespec *ts)
6139 return ((s64) ts->tv_sec * 1000000000L) + ts->tv_nsec;
6141 static inline __attribute__((always_inline)) s64 timeval_to_ns(const struct timeval *tv)
6143 return ((s64) tv->tv_sec * 1000000000L) +
6144 tv->tv_usec * 1000L;
6146 extern struct timespec ns_to_timespec(const s64 nsec);
6147 extern struct timeval ns_to_timeval(const s64 nsec);
6148 static inline __attribute__((always_inline)) __attribute__((always_inline)) void timespec_add_ns(struct timespec *a, u64 ns)
6150 a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, 1000000000L, &ns);
6154 struct timespec it_interval;
6155 struct timespec it_value;
6158 struct timeval it_interval;
6159 struct timeval it_value;
6171 struct timeval time;
6182 int :32; int :32; int :32; int :32;
6183 int :32; int :32; int :32; int :32;
6184 int :32; int :32; int :32;
6186 typedef unsigned long long cycles_t;
6187 extern unsigned int cpu_khz;
6188 extern unsigned int tsc_khz;
6189 extern void disable_TSC(void);
6190 static inline __attribute__((always_inline)) cycles_t get_cycles(void)
6192 unsigned long long ret = 0;
6193 (ret = paravirt_read_tsc());
6196 static inline __attribute__((always_inline)) __attribute__((always_inline)) cycles_t vget_cycles(void)
6198 return (cycles_t)__native_read_tsc();
6200 extern void tsc_init(void);
6201 extern void mark_tsc_unstable(char *reason);
6202 extern int unsynchronized_tsc(void);
6203 extern int check_tsc_unstable(void);
6204 extern unsigned long native_calibrate_tsc(void);
6205 extern void check_tsc_sync_source(int cpu);
6206 extern void check_tsc_sync_target(void);
6207 extern int notsc_setup(char *);
6208 extern void save_sched_clock_state(void);
6209 extern void restore_sched_clock_state(void);
6210 extern unsigned long tick_usec;
6211 extern unsigned long tick_nsec;
6212 extern int time_status;
6213 extern void ntp_init(void);
6214 extern void ntp_clear(void);
6215 static inline __attribute__((always_inline)) int ntp_synced(void)
6217 return !(time_status & 0x0040);
6219 extern u64 tick_length;
6220 extern void second_overflow(void);
6221 extern void update_ntp_one_tick(void);
6222 extern int do_adjtimex(struct timex *);
6223 extern void hardpps(const struct timespec *, const struct timespec *);
6224 int read_current_timer(unsigned long *timer_val);
6225 extern u64 __attribute__((section(".data"))) jiffies_64;
6226 extern unsigned long volatile __attribute__((section(".data"))) jiffies;
6227 u64 get_jiffies_64(void);
6228 extern unsigned long preset_lpj;
6229 extern unsigned int jiffies_to_msecs(const unsigned long j);
6230 extern unsigned int jiffies_to_usecs(const unsigned long j);
6231 extern unsigned long msecs_to_jiffies(const unsigned int m);
6232 extern unsigned long usecs_to_jiffies(const unsigned int u);
6233 extern unsigned long timespec_to_jiffies(const struct timespec *value);
6234 extern void jiffies_to_timespec(const unsigned long jiffies,
6235 struct timespec *value);
6236 extern unsigned long timeval_to_jiffies(const struct timeval *value);
6237 extern void jiffies_to_timeval(const unsigned long jiffies,
6238 struct timeval *value);
6239 extern clock_t jiffies_to_clock_t(long x);
6240 extern unsigned long clock_t_to_jiffies(unsigned long x);
6241 extern u64 jiffies_64_to_clock_t(u64 x);
6242 extern u64 nsec_to_clock_t(u64 x);
6243 extern u64 nsecs_to_jiffies64(u64 n);
6244 extern unsigned long nsecs_to_jiffies(u64 n);
6248 typedef union ktime ktime_t;
6249 static inline __attribute__((always_inline)) ktime_t ktime_set(const long secs, const unsigned long nsecs)
6251 return (ktime_t) { .tv64 = (s64)secs * 1000000000L + (s64)nsecs };
6253 static inline __attribute__((always_inline)) ktime_t timespec_to_ktime(struct timespec ts)
6255 return ktime_set(ts.tv_sec, ts.tv_nsec);
6257 static inline __attribute__((always_inline)) ktime_t timeval_to_ktime(struct timeval tv)
6259 return ktime_set(tv.tv_sec, tv.tv_usec * 1000L);
6261 static inline __attribute__((always_inline)) int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
6263 return cmp1.tv64 == cmp2.tv64;
6265 static inline __attribute__((always_inline)) s64 ktime_to_us(const ktime_t kt)
6267 struct timeval tv = ns_to_timeval((kt).tv64);
6268 return (s64) tv.tv_sec * 1000000L + tv.tv_usec;
6270 static inline __attribute__((always_inline)) s64 ktime_to_ms(const ktime_t kt)
6272 struct timeval tv = ns_to_timeval((kt).tv64);
6273 return (s64) tv.tv_sec * 1000L + tv.tv_usec / 1000L;
6275 static inline __attribute__((always_inline)) s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
6277 return ktime_to_us(({ (ktime_t){ .tv64 = (later).tv64 - (earlier).tv64 }; }));
6279 static inline __attribute__((always_inline)) ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
6281 return ({ (ktime_t){ .tv64 = (kt).tv64 + (usec * 1000) }; });
6283 static inline __attribute__((always_inline)) ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
6285 return ({ (ktime_t){ .tv64 = (kt).tv64 - (usec * 1000) }; });
6287 extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
6288 extern void ktime_get_ts(struct timespec *ts);
6289 static inline __attribute__((always_inline)) ktime_t ns_to_ktime(u64 ns)
6291 static const ktime_t ktime_zero = { .tv64 = 0 };
6292 return ({ (ktime_t){ .tv64 = (ktime_zero).tv64 + (ns) }; });
6294 enum debug_obj_state {
6297 ODEBUG_STATE_INACTIVE,
6298 ODEBUG_STATE_ACTIVE,
6299 ODEBUG_STATE_DESTROYED,
6300 ODEBUG_STATE_NOTAVAILABLE,
6303 struct debug_obj_descr;
6305 struct hlist_node node;
6306 enum debug_obj_state state;
6307 unsigned int astate;
6309 struct debug_obj_descr *descr;
6311 struct debug_obj_descr {
6313 void *(*debug_hint) (void *addr);
6314 int (*fixup_init) (void *addr, enum debug_obj_state state);
6315 int (*fixup_activate) (void *addr, enum debug_obj_state state);
6316 int (*fixup_destroy) (void *addr, enum debug_obj_state state);
6317 int (*fixup_free) (void *addr, enum debug_obj_state state);
6319 static inline __attribute__((always_inline)) void
6320 debug_object_init (void *addr, struct debug_obj_descr *descr) { }
6321 static inline __attribute__((always_inline)) void
6322 debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { }
6323 static inline __attribute__((always_inline)) void
6324 debug_object_activate (void *addr, struct debug_obj_descr *descr) { }
6325 static inline __attribute__((always_inline)) void
6326 debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { }
6327 static inline __attribute__((always_inline)) void
6328 debug_object_destroy (void *addr, struct debug_obj_descr *descr) { }
6329 static inline __attribute__((always_inline)) void
6330 debug_object_free (void *addr, struct debug_obj_descr *descr) { }
6331 static inline __attribute__((always_inline)) void debug_objects_early_init(void) { }
6332 static inline __attribute__((always_inline)) void debug_objects_mem_init(void) { }
6333 static inline __attribute__((always_inline)) void
6334 debug_check_no_obj_freed(const void *address, unsigned long size) { }
6337 struct list_head entry;
6338 unsigned long expires;
6339 struct tvec_base *base;
6340 void (*function)(unsigned long);
6345 char start_comm[16];
6346 struct lockdep_map lockdep_map;
6348 extern struct tvec_base boot_tvec_bases;
6349 void init_timer_key(struct timer_list *timer,
6351 struct lock_class_key *key);
6352 void init_timer_deferrable_key(struct timer_list *timer,
6354 struct lock_class_key *key);
6355 static inline __attribute__((always_inline)) void destroy_timer_on_stack(struct timer_list *timer) { }
6356 static inline __attribute__((always_inline)) void init_timer_on_stack_key(struct timer_list *timer,
6358 struct lock_class_key *key)
6360 init_timer_key(timer, name, key);
6362 static inline __attribute__((always_inline)) void setup_timer_key(struct timer_list * timer,
6364 struct lock_class_key *key,
6365 void (*function)(unsigned long),
6368 timer->function = function;
6370 init_timer_key(timer, name, key);
6372 static inline __attribute__((always_inline)) void setup_timer_on_stack_key(struct timer_list *timer,
6374 struct lock_class_key *key,
6375 void (*function)(unsigned long),
6378 timer->function = function;
6380 init_timer_on_stack_key(timer, name, key);
6382 extern void setup_deferrable_timer_on_stack_key(struct timer_list *timer,
6384 struct lock_class_key *key,
6385 void (*function)(unsigned long),
6386 unsigned long data);
6387 static inline __attribute__((always_inline)) int timer_pending(const struct timer_list * timer)
6389 return timer->entry.next != ((void *)0);
6391 extern void add_timer_on(struct timer_list *timer, int cpu);
6392 extern int del_timer(struct timer_list * timer);
6393 extern int mod_timer(struct timer_list *timer, unsigned long expires);
6394 extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
6395 extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
6396 extern void set_timer_slack(struct timer_list *time, int slack_hz);
6397 extern unsigned long get_next_timer_interrupt(unsigned long now);
6398 extern int timer_stats_active;
6399 extern void init_timer_stats(void);
6400 extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
6401 void *timerf, char *comm,
6402 unsigned int timer_flag);
6403 extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
6405 static inline __attribute__((always_inline)) void timer_stats_timer_set_start_info(struct timer_list *timer)
6407 if (__builtin_constant_p((((__builtin_constant_p(!timer_stats_active) ? !!(!timer_stats_active) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/timer.h", .line = 252, }; ______r = __builtin_expect(!!(!timer_stats_active), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(!timer_stats_active) ? !!(!timer_stats_active) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/timer.h", .line = 252, }; ______r = __builtin_expect(!!(!timer_stats_active), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/timer.h", .line = 252, }; ______r = !!(((__builtin_constant_p(!timer_stats_active) ? !!(!timer_stats_active) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/timer.h", .line = 252, }; ______r = __builtin_expect(!!(!timer_stats_active), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
6409 __timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
6411 static inline __attribute__((always_inline)) void timer_stats_timer_clear_start_info(struct timer_list *timer)
6413 timer->start_site = ((void *)0);
6415 extern void add_timer(struct timer_list *timer);
6416 extern int try_to_del_timer_sync(struct timer_list *timer);
6417 extern int del_timer_sync(struct timer_list *timer);
6418 extern void init_timers(void);
6419 extern void run_local_timers(void);
6421 extern enum hrtimer_restart it_real_fn(struct hrtimer *);
6422 unsigned long __round_jiffies(unsigned long j, int cpu);
6423 unsigned long __round_jiffies_relative(unsigned long j, int cpu);
6424 unsigned long round_jiffies(unsigned long j);
6425 unsigned long round_jiffies_relative(unsigned long j);
6426 unsigned long __round_jiffies_up(unsigned long j, int cpu);
6427 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
6428 unsigned long round_jiffies_up(unsigned long j);
6429 unsigned long round_jiffies_up_relative(unsigned long j);
6430 struct workqueue_struct;
6432 typedef void (*work_func_t)(struct work_struct *work);
6434 WORK_STRUCT_PENDING_BIT = 0,
6435 WORK_STRUCT_DELAYED_BIT = 1,
6436 WORK_STRUCT_CWQ_BIT = 2,
6437 WORK_STRUCT_LINKED_BIT = 3,
6438 WORK_STRUCT_COLOR_SHIFT = 4,
6439 WORK_STRUCT_COLOR_BITS = 4,
6440 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
6441 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
6442 WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
6443 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
6444 WORK_STRUCT_STATIC = 0,
6445 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
6446 WORK_NO_COLOR = WORK_NR_COLORS,
6447 WORK_CPU_UNBOUND = 8,
6448 WORK_CPU_NONE = 8 + 1,
6449 WORK_CPU_LAST = WORK_CPU_NONE,
6450 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
6451 WORK_STRUCT_COLOR_BITS,
6452 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
6453 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
6454 WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS,
6455 WORK_BUSY_PENDING = 1 << 0,
6456 WORK_BUSY_RUNNING = 1 << 1,
6458 struct work_struct {
6460 struct list_head entry;
6462 struct lockdep_map lockdep_map;
6464 struct delayed_work {
6465 struct work_struct work;
6466 struct timer_list timer;
6468 static inline __attribute__((always_inline)) struct delayed_work *to_delayed_work(struct work_struct *work)
6470 return ({ const typeof( ((struct delayed_work *)0)->work ) *__mptr = (work); (struct delayed_work *)( (char *)__mptr - __builtin_offsetof(struct delayed_work,work) );});
6472 struct execute_work {
6473 struct work_struct work;
6475 static inline __attribute__((always_inline)) void __init_work(struct work_struct *work, int onstack) { }
6476 static inline __attribute__((always_inline)) void destroy_work_on_stack(struct work_struct *work) { }
6477 static inline __attribute__((always_inline)) unsigned int work_static(struct work_struct *work) { return 0; }
6479 WQ_NON_REENTRANT = 1 << 0,
6480 WQ_UNBOUND = 1 << 1,
6481 WQ_FREEZABLE = 1 << 2,
6482 WQ_MEM_RECLAIM = 1 << 3,
6483 WQ_HIGHPRI = 1 << 4,
6484 WQ_CPU_INTENSIVE = 1 << 5,
6486 WQ_RESCUER = 1 << 7,
6487 WQ_MAX_ACTIVE = 512,
6488 WQ_MAX_UNBOUND_PER_CPU = 4,
6489 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
6491 extern struct workqueue_struct *system_wq;
6492 extern struct workqueue_struct *system_long_wq;
6493 extern struct workqueue_struct *system_nrt_wq;
6494 extern struct workqueue_struct *system_unbound_wq;
6495 extern struct workqueue_struct *system_freezable_wq;
6496 extern struct workqueue_struct *
6497 __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
6498 struct lock_class_key *key, const char *lock_name);
6499 static inline __attribute__((always_inline)) struct workqueue_struct *
6500 alloc_ordered_workqueue(const char *name, unsigned int flags)
6502 return ({ static struct lock_class_key __key; const char *__lock_name; if (__builtin_constant_p(((__builtin_constant_p(name)))) ? !!((__builtin_constant_p(name))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/workqueue.h", .line = 337, }; ______r = !!((__builtin_constant_p(name))); ______f.miss_hit[______r]++; ______r; })) __lock_name = (name); else __lock_name = "name"; __alloc_workqueue_key((name), (WQ_UNBOUND | flags), (1), &__key, __lock_name); });
6504 extern void destroy_workqueue(struct workqueue_struct *wq);
6505 extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
6506 extern int queue_work_on(int cpu, struct workqueue_struct *wq,
6507 struct work_struct *work);
6508 extern int queue_delayed_work(struct workqueue_struct *wq,
6509 struct delayed_work *work, unsigned long delay);
6510 extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
6511 struct delayed_work *work, unsigned long delay);
6512 extern void flush_workqueue(struct workqueue_struct *wq);
6513 extern void flush_scheduled_work(void);
6514 extern int schedule_work(struct work_struct *work);
6515 extern int schedule_work_on(int cpu, struct work_struct *work);
6516 extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
6517 extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
6518 unsigned long delay);
6519 extern int schedule_on_each_cpu(work_func_t func);
6520 extern int keventd_up(void);
6521 int execute_in_process_context(work_func_t fn, struct execute_work *);
6522 extern bool flush_work(struct work_struct *work);
6523 extern bool flush_work_sync(struct work_struct *work);
6524 extern bool cancel_work_sync(struct work_struct *work);
6525 extern bool flush_delayed_work(struct delayed_work *dwork);
6526 extern bool flush_delayed_work_sync(struct delayed_work *work);
6527 extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
6528 extern void workqueue_set_max_active(struct workqueue_struct *wq,
6530 extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
6531 extern unsigned int work_cpu(struct work_struct *work);
6532 extern unsigned int work_busy(struct work_struct *work);
6533 static inline __attribute__((always_inline)) bool cancel_delayed_work(struct delayed_work *work)
6536 ret = del_timer_sync(&work->timer);
6537 if (__builtin_constant_p(((ret))) ? !!((ret)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/workqueue.h", .line = 395, }; ______r = !!((ret)); ______f.miss_hit[______r]++; ______r; }))
6538 clear_bit(WORK_STRUCT_PENDING_BIT, ((unsigned long *)(&(&work->work)->data)));
6541 static inline __attribute__((always_inline)) bool __cancel_delayed_work(struct delayed_work *work)
6544 ret = del_timer(&work->timer);
6545 if (__builtin_constant_p(((ret))) ? !!((ret)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/workqueue.h", .line = 410, }; ______r = !!((ret)); ______f.miss_hit[______r]++; ______r; }))
6546 clear_bit(WORK_STRUCT_PENDING_BIT, ((unsigned long *)(&(&work->work)->data)));
6549 static inline __attribute__((always_inline)) __attribute__((deprecated))
6550 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
6551 struct delayed_work *work)
6553 cancel_delayed_work_sync(work);
6555 static inline __attribute__((always_inline)) __attribute__((deprecated))
6556 void cancel_rearming_delayed_work(struct delayed_work *work)
6558 cancel_delayed_work_sync(work);
6560 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
6561 extern void freeze_workqueues_begin(void);
6562 extern bool freeze_workqueues_busy(void);
6563 extern void thaw_workqueues(void);
6564 typedef struct __wait_queue wait_queue_t;
6565 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
6566 int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
6567 struct __wait_queue {
6570 wait_queue_func_t func;
6571 struct list_head task_list;
6573 struct wait_bit_key {
6577 struct wait_bit_queue {
6578 struct wait_bit_key key;
6581 struct __wait_queue_head {
6583 struct list_head task_list;
6585 typedef struct __wait_queue_head wait_queue_head_t;
6587 extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *);
6588 static inline __attribute__((always_inline)) void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
6592 q->func = default_wake_function;
6594 static inline __attribute__((always_inline)) void init_waitqueue_func_entry(wait_queue_t *q,
6595 wait_queue_func_t func)
6598 q->private = ((void *)0);
6601 static inline __attribute__((always_inline)) int waitqueue_active(wait_queue_head_t *q)
6603 return !list_empty(&q->task_list);
6605 extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
6606 extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
6607 extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
6608 static inline __attribute__((always_inline)) void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
6610 list_add(&new->task_list, &head->task_list);
6612 static inline __attribute__((always_inline)) void __add_wait_queue_exclusive(wait_queue_head_t *q,
6615 wait->flags |= 0x01;
6616 __add_wait_queue(q, wait);
6618 static inline __attribute__((always_inline)) void __add_wait_queue_tail(wait_queue_head_t *head,
6621 list_add_tail(&new->task_list, &head->task_list);
6623 static inline __attribute__((always_inline)) void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
6626 wait->flags |= 0x01;
6627 __add_wait_queue_tail(q, wait);
6629 static inline __attribute__((always_inline)) void __remove_wait_queue(wait_queue_head_t *head,
6632 list_del(&old->task_list);
6634 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
6635 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
6636 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
6638 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
6639 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
6640 void __wake_up_bit(wait_queue_head_t *, void *, int);
6641 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
6642 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
6643 void wake_up_bit(void *, int);
6644 int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
6645 int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
6646 wait_queue_head_t *bit_waitqueue(void *, int);
6647 extern void sleep_on(wait_queue_head_t *q);
6648 extern long sleep_on_timeout(wait_queue_head_t *q,
6649 signed long timeout);
6650 extern void interruptible_sleep_on(wait_queue_head_t *q);
6651 extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
6652 signed long timeout);
6653 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
6654 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
6655 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
6656 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
6657 unsigned int mode, void *key);
6658 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
6659 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
6660 static inline __attribute__((always_inline)) int wait_on_bit(void *word, int bit,
6661 int (*action)(void *), unsigned mode)
6663 if (__builtin_constant_p(((!(__builtin_constant_p((bit)) ? constant_test_bit((bit), (word)) : variable_test_bit((bit), (word)))))) ? !!((!(__builtin_constant_p((bit)) ? constant_test_bit((bit), (word)) : variable_test_bit((bit), (word))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/wait.h", .line = 637, }; ______r = !!((!(__builtin_constant_p((bit)) ? constant_test_bit((bit), (word)) : variable_test_bit((bit), (word))))); ______f.miss_hit[______r]++; ______r; }))
6665 return out_of_line_wait_on_bit(word, bit, action, mode);
6667 static inline __attribute__((always_inline)) int wait_on_bit_lock(void *word, int bit,
6668 int (*action)(void *), unsigned mode)
6670 if (__builtin_constant_p(((!test_and_set_bit(bit, word)))) ? !!((!test_and_set_bit(bit, word))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/wait.h", .line = 661, }; ______r = !!((!test_and_set_bit(bit, word))); ______f.miss_hit[______r]++; ______r; }))
6672 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
6676 wait_queue_head_t wait;
6678 static inline __attribute__((always_inline)) void init_completion(struct completion *x)
6681 do { static struct lock_class_key __key; __init_waitqueue_head((&x->wait), &__key); } while (0);
6683 extern void wait_for_completion(struct completion *);
6684 extern int wait_for_completion_interruptible(struct completion *x);
6685 extern int wait_for_completion_killable(struct completion *x);
6686 extern unsigned long wait_for_completion_timeout(struct completion *x,
6687 unsigned long timeout);
6688 extern long wait_for_completion_interruptible_timeout(
6689 struct completion *x, unsigned long timeout);
6690 extern long wait_for_completion_killable_timeout(
6691 struct completion *x, unsigned long timeout);
6692 extern bool try_wait_for_completion(struct completion *x);
6693 extern bool completion_done(struct completion *x);
6694 extern void complete(struct completion *);
6695 extern void complete_all(struct completion *);
6696 extern void (*pm_idle)(void);
6697 extern void (*pm_power_off)(void);
6698 extern void (*pm_power_off_prepare)(void);
6700 extern const char power_group_name[];
6701 typedef struct pm_message {
6705 int (*prepare)(struct device *dev);
6706 void (*complete)(struct device *dev);
6707 int (*suspend)(struct device *dev);
6708 int (*resume)(struct device *dev);
6709 int (*freeze)(struct device *dev);
6710 int (*thaw)(struct device *dev);
6711 int (*poweroff)(struct device *dev);
6712 int (*restore)(struct device *dev);
6713 int (*suspend_noirq)(struct device *dev);
6714 int (*resume_noirq)(struct device *dev);
6715 int (*freeze_noirq)(struct device *dev);
6716 int (*thaw_noirq)(struct device *dev);
6717 int (*poweroff_noirq)(struct device *dev);
6718 int (*restore_noirq)(struct device *dev);
6719 int (*runtime_suspend)(struct device *dev);
6720 int (*runtime_resume)(struct device *dev);
6721 int (*runtime_idle)(struct device *dev);
6723 extern struct dev_pm_ops generic_subsys_pm_ops;
6734 RPM_REQ_AUTOSUSPEND,
6737 struct wakeup_source;
6738 struct dev_pm_info {
6739 pm_message_t power_state;
6740 unsigned int can_wakeup:1;
6741 unsigned int async_suspend:1;
6743 bool is_suspended:1;
6745 struct list_head entry;
6746 struct completion completion;
6747 struct wakeup_source *wakeup;
6748 struct timer_list suspend_timer;
6749 unsigned long timer_expires;
6750 struct work_struct work;
6751 wait_queue_head_t wait_queue;
6752 atomic_t usage_count;
6753 atomic_t child_count;
6754 unsigned int disable_depth:3;
6755 unsigned int ignore_children:1;
6756 unsigned int idle_notification:1;
6757 unsigned int request_pending:1;
6758 unsigned int deferred_resume:1;
6759 unsigned int run_wake:1;
6760 unsigned int runtime_auto:1;
6761 unsigned int no_callbacks:1;
6762 unsigned int irq_safe:1;
6763 unsigned int use_autosuspend:1;
6764 unsigned int timer_autosuspends:1;
6765 enum rpm_request request;
6766 enum rpm_status runtime_status;
6768 int autosuspend_delay;
6769 unsigned long last_busy;
6770 unsigned long active_jiffies;
6771 unsigned long suspended_jiffies;
6772 unsigned long accounting_timestamp;
6775 extern void update_pm_runtime_accounting(struct device *dev);
6776 struct dev_power_domain {
6777 struct dev_pm_ops ops;
6779 extern void device_pm_lock(void);
6780 extern void dpm_resume_noirq(pm_message_t state);
6781 extern void dpm_resume_end(pm_message_t state);
6782 extern void dpm_resume(pm_message_t state);
6783 extern void dpm_complete(pm_message_t state);
6784 extern void device_pm_unlock(void);
6785 extern int dpm_suspend_noirq(pm_message_t state);
6786 extern int dpm_suspend_start(pm_message_t state);
6787 extern int dpm_suspend(pm_message_t state);
6788 extern int dpm_prepare(pm_message_t state);
6789 extern void __suspend_report_result(const char *function, void *fn, int ret);
6790 extern int device_pm_wait_for_dev(struct device *sub, struct device *dev);
6791 extern int pm_generic_prepare(struct device *dev);
6792 extern int pm_generic_suspend(struct device *dev);
6793 extern int pm_generic_resume(struct device *dev);
6794 extern int pm_generic_freeze(struct device *dev);
6795 extern int pm_generic_thaw(struct device *dev);
6796 extern int pm_generic_restore(struct device *dev);
6797 extern int pm_generic_poweroff(struct device *dev);
6798 extern void pm_generic_complete(struct device *dev);
6801 DPM_ORDER_DEV_AFTER_PARENT,
6802 DPM_ORDER_PARENT_BEFORE_DEV,
6805 typedef struct { unsigned long bits[((((1 << 0)) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; } nodemask_t;
6806 extern nodemask_t _unused_nodemask_arg_;
6807 static inline __attribute__((always_inline)) void __node_set(int node, volatile nodemask_t *dstp)
6809 set_bit(node, dstp->bits);
6811 static inline __attribute__((always_inline)) void __node_clear(int node, volatile nodemask_t *dstp)
6813 clear_bit(node, dstp->bits);
6815 static inline __attribute__((always_inline)) void __nodes_setall(nodemask_t *dstp, int nbits)
6817 bitmap_fill(dstp->bits, nbits);
6819 static inline __attribute__((always_inline)) void __nodes_clear(nodemask_t *dstp, int nbits)
6821 bitmap_zero(dstp->bits, nbits);
6823 static inline __attribute__((always_inline)) int __node_test_and_set(int node, nodemask_t *addr)
6825 return test_and_set_bit(node, addr->bits);
6827 static inline __attribute__((always_inline)) void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
6828 const nodemask_t *src2p, int nbits)
6830 bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
6832 static inline __attribute__((always_inline)) void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
6833 const nodemask_t *src2p, int nbits)
6835 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
6837 static inline __attribute__((always_inline)) void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
6838 const nodemask_t *src2p, int nbits)
6840 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
6842 static inline __attribute__((always_inline)) void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
6843 const nodemask_t *src2p, int nbits)
6845 bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
6847 static inline __attribute__((always_inline)) void __nodes_complement(nodemask_t *dstp,
6848 const nodemask_t *srcp, int nbits)
6850 bitmap_complement(dstp->bits, srcp->bits, nbits);
6852 static inline __attribute__((always_inline)) int __nodes_equal(const nodemask_t *src1p,
6853 const nodemask_t *src2p, int nbits)
6855 return bitmap_equal(src1p->bits, src2p->bits, nbits);
6857 static inline __attribute__((always_inline)) int __nodes_intersects(const nodemask_t *src1p,
6858 const nodemask_t *src2p, int nbits)
6860 return bitmap_intersects(src1p->bits, src2p->bits, nbits);
6862 static inline __attribute__((always_inline)) int __nodes_subset(const nodemask_t *src1p,
6863 const nodemask_t *src2p, int nbits)
6865 return bitmap_subset(src1p->bits, src2p->bits, nbits);
6867 static inline __attribute__((always_inline)) int __nodes_empty(const nodemask_t *srcp, int nbits)
6869 return bitmap_empty(srcp->bits, nbits);
6871 static inline __attribute__((always_inline)) int __nodes_full(const nodemask_t *srcp, int nbits)
6873 return bitmap_full(srcp->bits, nbits);
6875 static inline __attribute__((always_inline)) int __nodes_weight(const nodemask_t *srcp, int nbits)
6877 return bitmap_weight(srcp->bits, nbits);
6879 static inline __attribute__((always_inline)) void __nodes_shift_right(nodemask_t *dstp,
6880 const nodemask_t *srcp, int n, int nbits)
6882 bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
6884 static inline __attribute__((always_inline)) void __nodes_shift_left(nodemask_t *dstp,
6885 const nodemask_t *srcp, int n, int nbits)
6887 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
6889 static inline __attribute__((always_inline)) int __first_node(const nodemask_t *srcp)
6891 return ({ int __min1 = ((1 << 0)); int __min2 = (find_first_bit(srcp->bits, (1 << 0))); __min1 < __min2 ? __min1: __min2; });
6893 static inline __attribute__((always_inline)) int __next_node(int n, const nodemask_t *srcp)
6895 return ({ int __min1 = ((1 << 0)); int __min2 = (find_next_bit(srcp->bits, (1 << 0), n+1)); __min1 < __min2 ? __min1: __min2; });
6897 static inline __attribute__((always_inline)) void init_nodemask_of_node(nodemask_t *mask, int node)
6899 __nodes_clear(&(*mask), (1 << 0));
6900 __node_set((node), &(*mask));
6902 static inline __attribute__((always_inline)) int __first_unset_node(const nodemask_t *maskp)
6904 return ({ int __min1 = ((1 << 0)); int __min2 = (find_first_zero_bit(maskp->bits, (1 << 0))); __min1 < __min2 ? __min1: __min2; })
6907 static inline __attribute__((always_inline)) int __nodemask_scnprintf(char *buf, int len,
6908 const nodemask_t *srcp, int nbits)
6910 return bitmap_scnprintf(buf, len, srcp->bits, nbits);
6912 static inline __attribute__((always_inline)) int __nodemask_parse_user(const char *buf, int len,
6913 nodemask_t *dstp, int nbits)
6915 return bitmap_parse_user(buf, len, dstp->bits, nbits);
6917 static inline __attribute__((always_inline)) int __nodelist_scnprintf(char *buf, int len,
6918 const nodemask_t *srcp, int nbits)
6920 return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
6922 static inline __attribute__((always_inline)) int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
6924 return bitmap_parselist(buf, dstp->bits, nbits);
6926 static inline __attribute__((always_inline)) int __node_remap(int oldbit,
6927 const nodemask_t *oldp, const nodemask_t *newp, int nbits)
6929 return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
6931 static inline __attribute__((always_inline)) void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
6932 const nodemask_t *oldp, const nodemask_t *newp, int nbits)
6934 bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
6936 static inline __attribute__((always_inline)) void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
6937 const nodemask_t *relmapp, int nbits)
6939 bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
6941 static inline __attribute__((always_inline)) void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
6944 bitmap_fold(dstp->bits, origp->bits, sz, nbits);
6954 extern nodemask_t node_states[NR_NODE_STATES];
6955 static inline __attribute__((always_inline)) int node_state(int node, enum node_states state)
6959 static inline __attribute__((always_inline)) void node_set_state(int node, enum node_states state)
6962 static inline __attribute__((always_inline)) void node_clear_state(int node, enum node_states state)
6965 static inline __attribute__((always_inline)) int num_node_state(enum node_states state)
6969 struct nodemask_scratch {
6973 static inline __attribute__((always_inline)) int numa_node_id(void)
6977 static inline __attribute__((always_inline)) int early_cpu_to_node(int cpu)
6981 static inline __attribute__((always_inline)) void setup_node_to_cpumask_map(void) { }
6982 extern const struct cpumask *cpu_coregroup_mask(int cpu);
6983 static inline __attribute__((always_inline)) void arch_fix_phys_package_id(int num, u32 slot)
6987 void x86_pci_root_bus_res_quirks(struct pci_bus *b);
6988 static inline __attribute__((always_inline)) int get_mp_bus_to_node(int busnum)
6992 static inline __attribute__((always_inline)) void set_mp_bus_to_node(int busnum, int node)
6995 static inline __attribute__((always_inline)) void set_apicid_to_node(int apicid, s16 node)
6998 static inline __attribute__((always_inline)) int numa_cpu_node(int cpu)
7002 extern void set_highmem_pages_init(void);
7003 static inline __attribute__((always_inline)) void numa_set_node(int cpu, int node) { }
7004 static inline __attribute__((always_inline)) void numa_clear_node(int cpu) { }
7005 static inline __attribute__((always_inline)) void init_cpu_to_node(void) { }
7006 static inline __attribute__((always_inline)) void numa_add_cpu(int cpu) { }
7007 static inline __attribute__((always_inline)) void numa_remove_cpu(int cpu) { }
7010 spinlock_t wait_lock;
7011 struct list_head wait_list;
7012 struct task_struct *owner;
7015 struct lockdep_map dep_map;
7017 struct mutex_waiter {
7018 struct list_head list;
7019 struct task_struct *task;
7022 extern void mutex_destroy(struct mutex *lock);
7023 extern void __mutex_init(struct mutex *lock, const char *name,
7024 struct lock_class_key *key);
7025 static inline __attribute__((always_inline)) int mutex_is_locked(struct mutex *lock)
7027 return atomic_read(&lock->count) != 1;
7029 extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
7030 extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
7031 extern int __attribute__((warn_unused_result)) mutex_lock_interruptible_nested(struct mutex *lock,
7032 unsigned int subclass);
7033 extern int __attribute__((warn_unused_result)) mutex_lock_killable_nested(struct mutex *lock,
7034 unsigned int subclass);
7035 extern int mutex_trylock(struct mutex *lock);
7036 extern void mutex_unlock(struct mutex *lock);
7037 extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
7044 void leave_mm(int cpu);
7045 enum xen_domain_type {
7050 static inline __attribute__((always_inline)) unsigned char readb(const volatile void *addr) { unsigned char ret; asm volatile("mov" "b" " %1,%0":"=q" (ret) :"m" (*(volatile unsigned char *)addr) :"memory"); return ret; }
7051 static inline __attribute__((always_inline)) unsigned short readw(const volatile void *addr) { unsigned short ret; asm volatile("mov" "w" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned short *)addr) :"memory"); return ret; }
7052 static inline __attribute__((always_inline)) unsigned int readl(const volatile void *addr) { unsigned int ret; asm volatile("mov" "l" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned int *)addr) :"memory"); return ret; }
7053 static inline __attribute__((always_inline)) unsigned char __readb(const volatile void *addr) { unsigned char ret; asm volatile("mov" "b" " %1,%0":"=q" (ret) :"m" (*(volatile unsigned char *)addr) ); return ret; }
7054 static inline __attribute__((always_inline)) unsigned short __readw(const volatile void *addr) { unsigned short ret; asm volatile("mov" "w" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned short *)addr) ); return ret; }
7055 static inline __attribute__((always_inline)) unsigned int __readl(const volatile void *addr) { unsigned int ret; asm volatile("mov" "l" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned int *)addr) ); return ret; }
7056 static inline __attribute__((always_inline)) void writeb(unsigned char val, volatile void *addr) { asm volatile("mov" "b" " %0,%1": :"q" (val), "m" (*(volatile unsigned char *)addr) :"memory"); }
7057 static inline __attribute__((always_inline)) void writew(unsigned short val, volatile void *addr) { asm volatile("mov" "w" " %0,%1": :"r" (val), "m" (*(volatile unsigned short *)addr) :"memory"); }
7058 static inline __attribute__((always_inline)) void writel(unsigned int val, volatile void *addr) { asm volatile("mov" "l" " %0,%1": :"r" (val), "m" (*(volatile unsigned int *)addr) :"memory"); }
7059 static inline __attribute__((always_inline)) void __writeb(unsigned char val, volatile void *addr) { asm volatile("mov" "b" " %0,%1": :"q" (val), "m" (*(volatile unsigned char *)addr) ); }
7060 static inline __attribute__((always_inline)) void __writew(unsigned short val, volatile void *addr) { asm volatile("mov" "w" " %0,%1": :"r" (val), "m" (*(volatile unsigned short *)addr) ); }
7061 static inline __attribute__((always_inline)) void __writel(unsigned int val, volatile void *addr) { asm volatile("mov" "l" " %0,%1": :"r" (val), "m" (*(volatile unsigned int *)addr) ); }
7062 static inline __attribute__((always_inline)) phys_addr_t virt_to_phys(volatile void *address)
7064 return (((unsigned long)(address)) - ((unsigned long)(0xC0000000UL)));
7066 static inline __attribute__((always_inline)) void *phys_to_virt(phys_addr_t address)
7068 return ((void *)((unsigned long)(address)+((unsigned long)(0xC0000000UL))));
7070 static inline __attribute__((always_inline)) unsigned int isa_virt_to_bus(volatile void *address)
7072 return (unsigned int)virt_to_phys(address);
7074 extern void *ioremap_nocache(resource_size_t offset, unsigned long size);
7075 extern void *ioremap_cache(resource_size_t offset, unsigned long size);
7076 extern void *ioremap_prot(resource_size_t offset, unsigned long size,
7077 unsigned long prot_val);
7078 static inline __attribute__((always_inline)) void *ioremap(resource_size_t offset, unsigned long size)
7080 return ioremap_nocache(offset, size);
7082 extern void iounmap(volatile void *addr);
7083 extern void set_iounmap_nonlazy(void);
7084 extern unsigned int ioread8(void *);
7085 extern unsigned int ioread16(void *);
7086 extern unsigned int ioread16be(void *);
7087 extern unsigned int ioread32(void *);
7088 extern unsigned int ioread32be(void *);
7089 extern void iowrite8(u8, void *);
7090 extern void iowrite16(u16, void *);
7091 extern void iowrite16be(u16, void *);
7092 extern void iowrite32(u32, void *);
7093 extern void iowrite32be(u32, void *);
7094 extern void ioread8_rep(void *port, void *buf, unsigned long count);
7095 extern void ioread16_rep(void *port, void *buf, unsigned long count);
7096 extern void ioread32_rep(void *port, void *buf, unsigned long count);
7097 extern void iowrite8_rep(void *port, const void *buf, unsigned long count);
7098 extern void iowrite16_rep(void *port, const void *buf, unsigned long count);
7099 extern void iowrite32_rep(void *port, const void *buf, unsigned long count);
7100 extern void *ioport_map(unsigned long port, unsigned int nr);
7101 extern void ioport_unmap(void *);
7103 extern void *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
7104 extern void pci_iounmap(struct pci_dev *dev, void *);
7105 struct vm_area_struct;
7107 struct vm_struct *next;
7110 unsigned long flags;
7111 struct page **pages;
7112 unsigned int nr_pages;
7113 phys_addr_t phys_addr;
7116 extern void vm_unmap_ram(const void *mem, unsigned int count);
7117 extern void *vm_map_ram(struct page **pages, unsigned int count,
7118 int node, pgprot_t prot);
7119 extern void vm_unmap_aliases(void);
7120 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vmalloc_init(void);
7121 extern void *vmalloc(unsigned long size);
7122 extern void *vzalloc(unsigned long size);
7123 extern void *vmalloc_user(unsigned long size);
7124 extern void *vmalloc_node(unsigned long size, int node);
7125 extern void *vzalloc_node(unsigned long size, int node);
7126 extern void *vmalloc_exec(unsigned long size);
7127 extern void *vmalloc_32(unsigned long size);
7128 extern void *vmalloc_32_user(unsigned long size);
7129 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
7130 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
7131 unsigned long start, unsigned long end, gfp_t gfp_mask,
7132 pgprot_t prot, int node, void *caller);
7133 extern void vfree(const void *addr);
7134 extern void *vmap(struct page **pages, unsigned int count,
7135 unsigned long flags, pgprot_t prot);
7136 extern void vunmap(const void *addr);
7137 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
7138 unsigned long pgoff);
7139 void vmalloc_sync_all(void);
7140 static inline __attribute__((always_inline)) size_t get_vm_area_size(const struct vm_struct *area)
7142 return area->size - ((1UL) << 12);
7144 extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
7145 extern struct vm_struct *get_vm_area_caller(unsigned long size,
7146 unsigned long flags, void *caller);
7147 extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
7148 unsigned long start, unsigned long end);
7149 extern struct vm_struct *__get_vm_area_caller(unsigned long size,
7150 unsigned long flags,
7151 unsigned long start, unsigned long end,
7153 extern struct vm_struct *remove_vm_area(const void *addr);
7154 extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
7155 struct page ***pages);
7156 extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
7157 pgprot_t prot, struct page **pages);
7158 extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
7159 extern void unmap_kernel_range(unsigned long addr, unsigned long size);
7160 extern struct vm_struct *alloc_vm_area(size_t size);
7161 extern void free_vm_area(struct vm_struct *area);
7162 extern long vread(char *buf, char *addr, unsigned long count);
7163 extern long vwrite(char *buf, char *addr, unsigned long count);
7164 extern rwlock_t vmlist_lock;
7165 extern struct vm_struct *vmlist;
7166 extern __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) void vm_area_register_early(struct vm_struct *vm, size_t align);
7167 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
7168 const size_t *sizes, int nr_vms,
7170 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
7171 static inline __attribute__((always_inline)) void
7172 memset_io(volatile void *addr, unsigned char val, size_t count)
7174 __builtin_memset((void *)addr, val, count);
7176 static inline __attribute__((always_inline)) void
7177 memcpy_fromio(void *dst, const volatile void *src, size_t count)
7179 __builtin_memcpy(dst, (const void *)src, count);
7181 static inline __attribute__((always_inline)) void
7182 memcpy_toio(volatile void *dst, const void *src, size_t count)
7184 __builtin_memcpy((void *)dst, src, count);
7186 static inline __attribute__((always_inline)) void flush_write_buffers(void)
7189 extern void native_io_delay(void);
7190 extern int io_delay_type;
7191 extern void io_delay_init(void);
7192 static inline __attribute__((always_inline)) void outb(unsigned char value, int port) { asm volatile("out" "b" " %" "b" "0, %w1" : : "a"(value), "Nd"(port)); } static inline __attribute__((always_inline)) unsigned char inb(int port) { unsigned char value; asm volatile("in" "b" " %w1, %" "b" "0" : "=a"(value) : "Nd"(port)); return value; } static inline __attribute__((always_inline)) void outb_p(unsigned char value, int port) { outb(value, port); slow_down_io(); } static inline __attribute__((always_inline)) unsigned char inb_p(int port) { unsigned char value = inb(port); slow_down_io(); return value; } static inline __attribute__((always_inline)) void outsb(int port, const void *addr, unsigned long count) { asm volatile("rep; outs" "b" : "+S"(addr), "+c"(count) : "d"(port)); } static inline __attribute__((always_inline)) void insb(int port, void *addr, unsigned long count) { asm volatile("rep; ins" "b" : "+D"(addr), "+c"(count) : "d"(port)); }
7193 static inline __attribute__((always_inline)) void outw(unsigned short value, int port) { asm volatile("out" "w" " %" "w" "0, %w1" : : "a"(value), "Nd"(port)); } static inline __attribute__((always_inline)) unsigned short inw(int port) { unsigned short value; asm volatile("in" "w" " %w1, %" "w" "0" : "=a"(value) : "Nd"(port)); return value; } static inline __attribute__((always_inline)) void outw_p(unsigned short value, int port) { outw(value, port); slow_down_io(); } static inline __attribute__((always_inline)) unsigned short inw_p(int port) { unsigned short value = inw(port); slow_down_io(); return value; } static inline __attribute__((always_inline)) void outsw(int port, const void *addr, unsigned long count) { asm volatile("rep; outs" "w" : "+S"(addr), "+c"(count) : "d"(port)); } static inline __attribute__((always_inline)) void insw(int port, void *addr, unsigned long count) { asm volatile("rep; ins" "w" : "+D"(addr), "+c"(count) : "d"(port)); }
7194 static inline __attribute__((always_inline)) void outl(unsigned int value, int port) { asm volatile("out" "l" " %" "" "0, %w1" : : "a"(value), "Nd"(port)); } static inline __attribute__((always_inline)) unsigned int inl(int port) { unsigned int value; asm volatile("in" "l" " %w1, %" "" "0" : "=a"(value) : "Nd"(port)); return value; } static inline __attribute__((always_inline)) void outl_p(unsigned int value, int port) { outl(value, port); slow_down_io(); } static inline __attribute__((always_inline)) unsigned int inl_p(int port) { unsigned int value = inl(port); slow_down_io(); return value; } static inline __attribute__((always_inline)) void outsl(int port, const void *addr, unsigned long count) { asm volatile("rep; outs" "l" : "+S"(addr), "+c"(count) : "d"(port)); } static inline __attribute__((always_inline)) void insl(int port, void *addr, unsigned long count) { asm volatile("rep; ins" "l" : "+D"(addr), "+c"(count) : "d"(port)); }
7195 extern void *xlate_dev_mem_ptr(unsigned long phys);
7196 extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
7197 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
7198 unsigned long prot_val);
7199 extern void *ioremap_wc(resource_size_t offset, unsigned long size);
7200 extern void early_ioremap_init(void);
7201 extern void early_ioremap_reset(void);
7202 extern void *early_ioremap(resource_size_t phys_addr,
7203 unsigned long size);
7204 extern void *early_memremap(resource_size_t phys_addr,
7205 unsigned long size);
7206 extern void early_iounmap(void *addr, unsigned long size);
7207 extern void fixup_early_ioremap(void);
7208 extern bool is_early_ioremap_ptep(pte_t *ptep);
7209 extern const unsigned char x86_trampoline_start [];
7210 extern const unsigned char x86_trampoline_end [];
7211 extern unsigned char *x86_trampoline_base;
7212 extern unsigned long init_rsp;
7213 extern unsigned long initial_code;
7214 extern unsigned long initial_gs;
7215 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) setup_trampolines(void);
7216 extern const unsigned char trampoline_data[];
7217 extern const unsigned char trampoline_status[];
7218 static inline __attribute__((always_inline)) unsigned long trampoline_address(void)
7220 return virt_to_phys(((void *)(x86_trampoline_base + ((const unsigned char *)(trampoline_data) - x86_trampoline_start))));
7222 int __acpi_acquire_global_lock(unsigned int *lock);
7223 int __acpi_release_global_lock(unsigned int *lock);
7224 extern int acpi_lapic;
7225 extern int acpi_ioapic;
7226 extern int acpi_noirq;
7227 extern int acpi_strict;
7228 extern int acpi_disabled;
7229 extern int acpi_pci_disabled;
7230 extern int acpi_skip_timer_override;
7231 extern int acpi_use_timer_override;
7232 extern int acpi_fix_pin2_polarity;
7233 extern u8 acpi_sci_flags;
7234 extern int acpi_sci_override_gsi;
7235 void acpi_pic_sci_set_trigger(unsigned int, u16);
7236 extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
7237 int trigger, int polarity);
7238 static inline __attribute__((always_inline)) void disable_acpi(void)
7241 acpi_pci_disabled = 1;
7244 extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
7245 static inline __attribute__((always_inline)) void acpi_noirq_set(void) { acpi_noirq = 1; }
7246 static inline __attribute__((always_inline)) void acpi_disable_pci(void)
7248 acpi_pci_disabled = 1;
7251 extern int acpi_suspend_lowlevel(void);
7252 extern const unsigned char acpi_wakeup_code[];
7253 extern void acpi_reserve_wakeup_memory(void);
7254 static inline __attribute__((always_inline)) unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
7256 if (__builtin_constant_p(((boot_cpu_data.x86 == 0x0F && boot_cpu_data.x86_vendor == 2 && boot_cpu_data.x86_model <= 0x05 && boot_cpu_data.x86_mask < 0x0A))) ? !!((boot_cpu_data.x86 == 0x0F && boot_cpu_data.x86_vendor == 2 && boot_cpu_data.x86_model <= 0x05 && boot_cpu_data.x86_mask < 0x0A)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
7257 "/data/exp/linux-3.0.4/arch/x86/include/asm/acpi.h"
7260 , }; ______r = !!((boot_cpu_data.x86 == 0x0F && boot_cpu_data.x86_vendor == 2 && boot_cpu_data.x86_model <= 0x05 && boot_cpu_data.x86_mask < 0x0A)); ______f.miss_hit[______r]++; ______r; }))
7262 else if (__builtin_constant_p(((amd_e400_c1e_detected))) ? !!((amd_e400_c1e_detected)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/acpi.h", .line = 142, }; ______r = !!((amd_e400_c1e_detected)); ______f.miss_hit[______r]++; ______r; }))
7267 static inline __attribute__((always_inline)) bool arch_has_acpi_pdc(void)
7269 struct cpuinfo_x86 *c = &(*({ do { const void *__vpp_verify = (typeof((&(cpu_info))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(cpu_info))) *)(&(cpu_info)))); (typeof((typeof(*(&(cpu_info))) *)(&(cpu_info)))) (__ptr + (((__per_cpu_offset[0])))); }); }));
7270 return (c->x86_vendor == 0 ||
7271 c->x86_vendor == 5);
7273 static inline __attribute__((always_inline)) void arch_acpi_set_pdc_bits(u32 *buf)
7275 struct cpuinfo_x86 *c = &(*({ do { const void *__vpp_verify = (typeof((&(cpu_info))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(cpu_info))) *)(&(cpu_info)))); (typeof((typeof(*(&(cpu_info))) *)(&(cpu_info)))) (__ptr + (((__per_cpu_offset[0])))); }); }));
7276 buf[2] |= ((0x0010) | (0x0008) | (0x0002) | (0x0100) | (0x0200));
7277 if (__builtin_constant_p((((__builtin_constant_p((4*32+ 7)) && ( ((((4*32+ 7))>>5)==0 && (1UL<<(((4*32+ 7))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+ 7))>>5)==1 && (1UL<<(((4*32+ 7))&31) & (0|0))) || ((((4*32+ 7))>>5)==2 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==3 && (1UL<<(((4*32+ 7))&31) & (0))) || ((((4*32+ 7))>>5)==4 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==5 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==6 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==7 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==8 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==9 && (1UL<<(((4*32+ 7))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+ 7))) ? constant_test_bit(((4*32+ 7)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((4*32+ 7)), ((unsigned long *)((c)->x86_capability)))))))) ? !!(((__builtin_constant_p((4*32+ 7)) && ( ((((4*32+ 7))>>5)==0 && (1UL<<(((4*32+ 7))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+ 7))>>5)==1 && (1UL<<(((4*32+ 7))&31) & (0|0))) || ((((4*32+ 7))>>5)==2 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==3 && (1UL<<(((4*32+ 7))&31) & (0))) || ((((4*32+ 7))>>5)==4 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==5 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==6 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==7 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==8 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==9 && (1UL<<(((4*32+ 7))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+ 7))) ? constant_test_bit(((4*32+ 7)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((4*32+ 7)), ((unsigned long *)((c)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/acpi.h", .line = 161, }; ______r = !!(((__builtin_constant_p((4*32+ 7)) && ( ((((4*32+ 7))>>5)==0 && (1UL<<(((4*32+ 7))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+ 7))>>5)==1 && (1UL<<(((4*32+ 7))&31) & (0|0))) || ((((4*32+ 7))>>5)==2 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==3 && (1UL<<(((4*32+ 7))&31) & (0))) || ((((4*32+ 7))>>5)==4 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==5 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==6 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==7 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==8 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==9 && (1UL<<(((4*32+ 7))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+ 7))) ? constant_test_bit(((4*32+ 7)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((4*32+ 7)), ((unsigned long *)((c)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; }))
7278 buf[2] |= ((0x0008) | (0x0002) | (0x0020) | (0x0800) | (0x0001));
7279 if (__builtin_constant_p((((__builtin_constant_p((0*32+22)) && ( ((((0*32+22))>>5)==0 && (1UL<<(((0*32+22))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+22))>>5)==1 && (1UL<<(((0*32+22))&31) & (0|0))) || ((((0*32+22))>>5)==2 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==3 && (1UL<<(((0*32+22))&31) & (0))) || ((((0*32+22))>>5)==4 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==5 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==6 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==7 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==8 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==9 && (1UL<<(((0*32+22))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+22))) ? constant_test_bit(((0*32+22)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((0*32+22)), ((unsigned long *)((c)->x86_capability)))))))) ? !!(((__builtin_constant_p((0*32+22)) && ( ((((0*32+22))>>5)==0 && (1UL<<(((0*32+22))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+22))>>5)==1 && (1UL<<(((0*32+22))&31) & (0|0))) || ((((0*32+22))>>5)==2 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==3 && (1UL<<(((0*32+22))&31) & (0))) || ((((0*32+22))>>5)==4 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==5 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==6 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==7 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==8 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==9 && (1UL<<(((0*32+22))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+22))) ? constant_test_bit(((0*32+22)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((0*32+22)), ((unsigned long *)((c)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/acpi.h", .line = 164, }; ______r = !!(((__builtin_constant_p((0*32+22)) && ( ((((0*32+22))>>5)==0 && (1UL<<(((0*32+22))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+22))>>5)==1 && (1UL<<(((0*32+22))&31) & (0|0))) || ((((0*32+22))>>5)==2 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==3 && (1UL<<(((0*32+22))&31) & (0))) || ((((0*32+22))>>5)==4 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==5 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==6 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==7 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==8 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==9 && (1UL<<(((0*32+22))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+22))) ? constant_test_bit(((0*32+22)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((0*32+22)), ((unsigned long *)((c)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; }))
7281 if (__builtin_constant_p(((!(__builtin_constant_p((4*32+ 3)) && ( ((((4*32+ 3))>>5)==0 && (1UL<<(((4*32+ 3))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+ 3))>>5)==1 && (1UL<<(((4*32+ 3))&31) & (0|0))) || ((((4*32+ 3))>>5)==2 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==3 && (1UL<<(((4*32+ 3))&31) & (0))) || ((((4*32+ 3))>>5)==4 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==5 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==6 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==7 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==8 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==9 && (1UL<<(((4*32+ 3))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+ 3))) ? constant_test_bit(((4*32+ 3)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((4*32+ 3)), ((unsigned long *)((c)->x86_capability)))))))) ? !!((!(__builtin_constant_p((4*32+ 3)) && ( ((((4*32+ 3))>>5)==0 && (1UL<<(((4*32+ 3))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+ 3))>>5)==1 && (1UL<<(((4*32+ 3))&31) & (0|0))) || ((((4*32+ 3))>>5)==2 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==3 && (1UL<<(((4*32+ 3))&31) & (0))) || ((((4*32+ 3))>>5)==4 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==5 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==6 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==7 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==8 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==9 && (1UL<<(((4*32+ 3))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+ 3))) ? constant_test_bit(((4*32+ 3)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((4*32+ 3)), ((unsigned long *)((c)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/acpi.h", .line = 170, }; ______r = !!((!(__builtin_constant_p((4*32+ 3)) && ( ((((4*32+ 3))>>5)==0 && (1UL<<(((4*32+ 3))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+ 3))>>5)==1 && (1UL<<(((4*32+ 3))&31) & (0|0))) || ((((4*32+ 3))>>5)==2 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==3 && (1UL<<(((4*32+ 3))&31) & (0))) || ((((4*32+ 3))>>5)==4 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==5 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==6 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==7 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==8 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==9 && (1UL<<(((4*32+ 3))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+ 3))) ? constant_test_bit(((4*32+ 3)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((4*32+ 3)), ((unsigned long *)((c)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; }))
7282 buf[2] &= ~((0x0200));
7284 extern unsigned long __FIXADDR_TOP;
7285 enum fixed_addresses {
7289 FIX_EARLYCON_MEM_BASE,
7292 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + 64 - 1,
7294 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*8)-1,
7296 FIX_PARAVIRT_BOOTMAP,
7299 __end_of_permanent_fixed_addresses,
7301 (__end_of_permanent_fixed_addresses ^
7302 (__end_of_permanent_fixed_addresses + (64 * 4) - 1)) &
7304 ? __end_of_permanent_fixed_addresses + (64 * 4) -
7305 (__end_of_permanent_fixed_addresses & ((64 * 4) - 1))
7306 : __end_of_permanent_fixed_addresses,
7307 FIX_BTMAP_BEGIN = FIX_BTMAP_END + (64 * 4) - 1,
7309 __end_of_fixed_addresses
7311 extern void reserve_top_address(unsigned long reserve);
7312 extern int fixmaps_set;
7313 extern pte_t *kmap_pte;
7314 extern pgprot_t kmap_prot;
7315 extern pte_t *pkmap_page_table;
7316 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
7317 void native_set_fixmap(enum fixed_addresses idx,
7318 phys_addr_t phys, pgprot_t flags);
7319 extern void __this_fixmap_does_not_exist(void);
7320 static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long fix_to_virt(const unsigned int idx)
7322 if (__builtin_constant_p(((idx >= __end_of_fixed_addresses))) ? !!((idx >= __end_of_fixed_addresses)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/fixmap.h", .line = 210, }; ______r = !!((idx >= __end_of_fixed_addresses)); ______f.miss_hit[______r]++; ______r; }))
7323 __this_fixmap_does_not_exist();
7324 return (((unsigned long)__FIXADDR_TOP) - ((idx) << 12));
7326 static inline __attribute__((always_inline)) unsigned long virt_to_fix(const unsigned long vaddr)
7328 do { if (__builtin_constant_p((((__builtin_constant_p(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))) ? !!(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/fixmap.h", .line = 218, }; ______r = __builtin_expect(!!(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))) ? !!(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/fixmap.h", .line = 218, }; ______r = __builtin_expect(!!(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/fixmap.h", .line = 218, }; ______r = !!(((__builtin_constant_p(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))) ? !!(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/fixmap.h", .line = 218, }; ______r = __builtin_expect(!!(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/fixmap.h"), "i" (218), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
7329 return ((((unsigned long)__FIXADDR_TOP) - ((vaddr)&(~(((1UL) << 12)-1)))) >> 12);
7331 static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long
7332 __set_fixmap_offset(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
7334 __set_fixmap(idx, phys, flags);
7335 return fix_to_virt(idx) + (phys & (((1UL) << 12) - 1));
7337 extern void generic_apic_probe(void);
7338 extern unsigned int apic_verbosity;
7339 extern int local_apic_timer_c2_ok;
7340 extern int disable_apic;
7341 extern void __inquire_remote_apic(int apicid);
7342 static inline __attribute__((always_inline)) void default_inquire_remote_apic(int apicid)
7344 if (__builtin_constant_p(((apic_verbosity >= 2))) ? !!((apic_verbosity >= 2)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/apic.h", .line = 63, }; ______r = !!((apic_verbosity >= 2)); ______f.miss_hit[______r]++; ______r; }))
7345 __inquire_remote_apic(apicid);
7347 static inline __attribute__((always_inline)) bool apic_from_smp_config(void)
7349 return smp_found_config && !disable_apic;
7351 static inline __attribute__((always_inline)) int is_vsmp_box(void)
7355 extern void xapic_wait_icr_idle(void);
7356 extern u32 safe_xapic_wait_icr_idle(void);
7357 extern void xapic_icr_write(u32, u32);
7358 extern int setup_profiling_timer(unsigned int);
7359 static inline __attribute__((always_inline)) void native_apic_mem_write(u32 reg, u32 v)
7361 volatile u32 *addr = (volatile u32 *)((fix_to_virt(FIX_APIC_BASE)) + reg);
7362 asm volatile ("661:\n\t" "movl %0, %1" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(3*32+19)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "xchgl %0, %1" "\n664:\n" ".previous" : "=r" (v), "=m" (*addr) : "i" (0), "0" (v), "m" (*addr))
7365 static inline __attribute__((always_inline)) u32 native_apic_mem_read(u32 reg)
7367 return *((volatile u32 *)((fix_to_virt(FIX_APIC_BASE)) + reg));
7369 extern void native_apic_wait_icr_idle(void);
7370 extern u32 native_safe_apic_wait_icr_idle(void);
7371 extern void native_apic_icr_write(u32 low, u32 id);
7372 extern u64 native_apic_icr_read(void);
7373 extern int x2apic_mode;
7374 static inline __attribute__((always_inline)) void check_x2apic(void)
7377 static inline __attribute__((always_inline)) void enable_x2apic(void)
7380 static inline __attribute__((always_inline)) int x2apic_enabled(void)
7384 static inline __attribute__((always_inline)) void x2apic_force_phys(void)
7387 extern void enable_IR_x2apic(void);
7388 extern int get_physical_broadcast(void);
7389 extern int lapic_get_maxlvt(void);
7390 extern void clear_local_APIC(void);
7391 extern void connect_bsp_APIC(void);
7392 extern void disconnect_bsp_APIC(int virt_wire_setup);
7393 extern void disable_local_APIC(void);
7394 extern void lapic_shutdown(void);
7395 extern int verify_local_APIC(void);
7396 extern void sync_Arb_IDs(void);
7397 extern void init_bsp_APIC(void);
7398 extern void setup_local_APIC(void);
7399 extern void end_local_APIC_setup(void);
7400 extern void bsp_end_local_APIC_setup(void);
7401 extern void init_apic_mappings(void);
7402 void register_lapic_address(unsigned long address);
7403 extern void setup_boot_APIC_clock(void);
7404 extern void setup_secondary_APIC_clock(void);
7405 extern int APIC_init_uniprocessor(void);
7406 extern int apic_force_enable(unsigned long addr);
7407 static inline __attribute__((always_inline)) int apic_is_clustered_box(void)
7411 extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
7415 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
7416 int (*apic_id_registered)(void);
7417 u32 irq_delivery_mode;
7419 const struct cpumask *(*target_cpus)(void);
7422 unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
7423 unsigned long (*check_apicid_present)(int apicid);
7424 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
7425 void (*init_apic_ldr)(void);
7426 void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
7427 void (*setup_apic_routing)(void);
7428 int (*multi_timer_check)(int apic, int irq);
7429 int (*cpu_present_to_apicid)(int mps_cpu);
7430 void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
7431 void (*setup_portio_remap)(void);
7432 int (*check_phys_apicid_present)(int phys_apicid);
7433 void (*enable_apic_mode)(void);
7434 int (*phys_pkg_id)(int cpuid_apic, int index_msb);
7435 int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid);
7436 unsigned int (*get_apic_id)(unsigned long x);
7437 unsigned long (*set_apic_id)(unsigned int id);
7438 unsigned long apic_id_mask;
7439 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
7440 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
7441 const struct cpumask *andmask);
7442 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
7443 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
7445 void (*send_IPI_allbutself)(int vector);
7446 void (*send_IPI_all)(int vector);
7447 void (*send_IPI_self)(int vector);
7448 int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
7449 int trampoline_phys_low;
7450 int trampoline_phys_high;
7451 void (*wait_for_init_deassert)(atomic_t *deassert);
7452 void (*smp_callin_clear_local_apic)(void);
7453 void (*inquire_remote_apic)(int apicid);
7454 u32 (*read)(u32 reg);
7455 void (*write)(u32 reg, u32 v);
7456 u64 (*icr_read)(void);
7457 void (*icr_write)(u32 low, u32 high);
7458 void (*wait_icr_idle)(void);
7459 u32 (*safe_wait_icr_idle)(void);
7460 int (*x86_32_early_logical_apicid)(int cpu);
7461 int (*x86_32_numa_cpu_node)(int cpu);
7463 extern struct apic *apic;
7464 extern struct apic *__apicdrivers[], *__apicdrivers_end[];
7465 extern atomic_t init_deasserted;
7466 extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
7467 static inline __attribute__((always_inline)) u32 apic_read(u32 reg)
7469 return apic->read(reg);
7471 static inline __attribute__((always_inline)) void apic_write(u32 reg, u32 val)
7473 apic->write(reg, val);
7475 static inline __attribute__((always_inline)) u64 apic_icr_read(void)
7477 return apic->icr_read();
7479 static inline __attribute__((always_inline)) void apic_icr_write(u32 low, u32 high)
7481 apic->icr_write(low, high);
7483 static inline __attribute__((always_inline)) void apic_wait_icr_idle(void)
7485 apic->wait_icr_idle();
7487 static inline __attribute__((always_inline)) u32 safe_apic_wait_icr_idle(void)
7489 return apic->safe_wait_icr_idle();
7491 static inline __attribute__((always_inline)) void ack_APIC_irq(void)
7493 apic_write(0xB0, 0);
7495 static inline __attribute__((always_inline)) unsigned default_get_apic_id(unsigned long x)
7497 unsigned int ver = ((apic_read(0x30)) & 0xFFu);
7498 if (__builtin_constant_p(((((ver) >= 0x14) || (__builtin_constant_p((3*32+26)) && ( ((((3*32+26))>>5)==0 && (1UL<<(((3*32+26))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((3*32+26))>>5)==1 && (1UL<<(((3*32+26))&31) & (0|0))) || ((((3*32+26))>>5)==2 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==3 && (1UL<<(((3*32+26))&31) & (0))) || ((((3*32+26))>>5)==4 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==5 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==6 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==7 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==8 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==9 && (1UL<<(((3*32+26))&31) & 0)) ) ? 1 : (__builtin_constant_p(((3*32+26))) ? constant_test_bit(((3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))))) ? !!((((ver) >= 0x14) || (__builtin_constant_p((3*32+26)) && ( ((((3*32+26))>>5)==0 && (1UL<<(((3*32+26))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((3*32+26))>>5)==1 && (1UL<<(((3*32+26))&31) & (0|0))) || ((((3*32+26))>>5)==2 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==3 && (1UL<<(((3*32+26))&31) & (0))) || ((((3*32+26))>>5)==4 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==5 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==6 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==7 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==8 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==9 && (1UL<<(((3*32+26))&31) & 0)) ) ? 1 : (__builtin_constant_p(((3*32+26))) ? constant_test_bit(((3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/apic.h", .line = 468, }; ______r = !!((((ver) >= 0x14) || (__builtin_constant_p((3*32+26)) && ( ((((3*32+26))>>5)==0 && (1UL<<(((3*32+26))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((3*32+26))>>5)==1 && (1UL<<(((3*32+26))&31) & (0|0))) || ((((3*32+26))>>5)==2 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==3 && (1UL<<(((3*32+26))&31) & (0))) || ((((3*32+26))>>5)==4 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==5 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==6 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==7 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==8 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==9 && (1UL<<(((3*32+26))&31) & 0)) ) ? 1 : (__builtin_constant_p(((3*32+26))) ? constant_test_bit(((3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; }))
7499 return (x >> 24) & 0xFF;
7501 return (x >> 24) & 0x0F;
7503 static inline __attribute__((always_inline)) void default_wait_for_init_deassert(atomic_t *deassert)
7505 while (!atomic_read(deassert))
7509 extern struct apic *generic_bigsmp_probe(void);
7510 static inline __attribute__((always_inline)) const struct cpumask *default_target_cpus(void)
7512 return cpu_online_mask;
7514 extern __attribute__((section(".data..percpu" ""))) __typeof__(u16) x86_bios_cpu_apicid; extern __typeof__(u16) *x86_bios_cpu_apicid_early_ptr; extern __typeof__(u16) x86_bios_cpu_apicid_early_map[];
7515 static inline __attribute__((always_inline)) unsigned int read_apic_id(void)
7518 reg = apic_read(0x20);
7519 return apic->get_apic_id(reg);
7521 extern void default_setup_apic_routing(void);
7522 extern struct apic apic_noop;
7523 static inline __attribute__((always_inline)) int noop_x86_32_early_logical_apicid(int cpu)
7527 extern void default_init_apic_ldr(void);
7528 static inline __attribute__((always_inline)) int default_apic_id_registered(void)
7530 return (__builtin_constant_p((read_apic_id())) ? constant_test_bit((read_apic_id()), ((phys_cpu_present_map).mask)) : variable_test_bit((read_apic_id()), ((phys_cpu_present_map).mask)));
7532 static inline __attribute__((always_inline)) int default_phys_pkg_id(int cpuid_apic, int index_msb)
7534 return cpuid_apic >> index_msb;
7536 static inline __attribute__((always_inline)) unsigned int
7537 default_cpu_mask_to_apicid(const struct cpumask *cpumask)
7539 return ((cpumask)->bits)[0] & 0xFFu;
7541 static inline __attribute__((always_inline)) unsigned int
7542 default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
7543 const struct cpumask *andmask)
7545 unsigned long mask1 = ((cpumask)->bits)[0];
7546 unsigned long mask2 = ((andmask)->bits)[0];
7547 unsigned long mask3 = ((cpu_online_mask)->bits)[0];
7548 return (unsigned int)(mask1 & mask2 & mask3);
7550 static inline __attribute__((always_inline)) unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)
7552 return (__builtin_constant_p((apicid)) ? constant_test_bit((apicid), ((*map).mask)) : variable_test_bit((apicid), ((*map).mask)));
7554 static inline __attribute__((always_inline)) unsigned long default_check_apicid_present(int bit)
7556 return (__builtin_constant_p((bit)) ? constant_test_bit((bit), ((phys_cpu_present_map).mask)) : variable_test_bit((bit), ((phys_cpu_present_map).mask)));
7558 static inline __attribute__((always_inline)) void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
7560 *retmap = *phys_map;
7562 static inline __attribute__((always_inline)) int __default_cpu_present_to_apicid(int mps_cpu)
7564 if (__builtin_constant_p(((mps_cpu < nr_cpu_ids && (__builtin_constant_p((cpumask_check((mps_cpu)))) ? constant_test_bit((cpumask_check((mps_cpu))), ((((cpu_present_mask))->bits))) : variable_test_bit((cpumask_check((mps_cpu))), ((((cpu_present_mask))->bits))))))) ? !!((mps_cpu < nr_cpu_ids && (__builtin_constant_p((cpumask_check((mps_cpu)))) ? constant_test_bit((cpumask_check((mps_cpu))), ((((cpu_present_mask))->bits))) : variable_test_bit((cpumask_check((mps_cpu))), ((((cpu_present_mask))->bits)))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/apic.h", .line = 594, }; ______r = !!((mps_cpu < nr_cpu_ids && (__builtin_constant_p((cpumask_check((mps_cpu)))) ? constant_test_bit((cpumask_check((mps_cpu))), ((((cpu_present_mask))->bits))) : variable_test_bit((cpumask_check((mps_cpu))), ((((cpu_present_mask))->bits)))))); ______f.miss_hit[______r]++; ______r; }))
7565 return (int)(*({ do { const void *__vpp_verify = (typeof((&(x86_bios_cpu_apicid))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(x86_bios_cpu_apicid))) *)(&(x86_bios_cpu_apicid)))); (typeof((typeof(*(&(x86_bios_cpu_apicid))) *)(&(x86_bios_cpu_apicid)))) (__ptr + (((__per_cpu_offset[mps_cpu])))); }); }));
7569 static inline __attribute__((always_inline)) int
7570 __default_check_phys_apicid_present(int phys_apicid)
7572 return (__builtin_constant_p((phys_apicid)) ? constant_test_bit((phys_apicid), ((phys_cpu_present_map).mask)) : variable_test_bit((phys_apicid), ((phys_cpu_present_map).mask)));
7574 static inline __attribute__((always_inline)) int default_cpu_present_to_apicid(int mps_cpu)
7576 return __default_cpu_present_to_apicid(mps_cpu);
7578 static inline __attribute__((always_inline)) int
7579 default_check_phys_apicid_present(int phys_apicid)
7581 return __default_check_phys_apicid_present(phys_apicid);
7583 static inline __attribute__((always_inline)) int invalid_vm86_irq(int irq)
7585 return irq < 3 || irq > 15;
7587 union IO_APIC_reg_00 {
7590 u32 __reserved_2 : 14,
7595 } __attribute__ ((packed)) bits;
7597 union IO_APIC_reg_01 {
7605 } __attribute__ ((packed)) bits;
7607 union IO_APIC_reg_02 {
7610 u32 __reserved_2 : 24,
7613 } __attribute__ ((packed)) bits;
7615 union IO_APIC_reg_03 {
7620 } __attribute__ ((packed)) bits;
7622 struct IO_APIC_route_entry {
7626 delivery_status : 1,
7632 __u32 __reserved_3 : 24,
7634 } __attribute__ ((packed));
7635 struct IR_IO_APIC_route_entry {
7639 delivery_status : 1,
7647 } __attribute__ ((packed));
7648 extern int nr_ioapics;
7649 extern int mpc_ioapic_id(int ioapic);
7650 extern unsigned int mpc_ioapic_addr(int ioapic);
7651 extern struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic);
7652 extern int mp_irq_entries;
7653 extern struct mpc_intsrc mp_irqs[256];
7654 extern int mpc_default_type;
7655 extern int sis_apic_bug;
7656 extern int skip_ioapic_setup;
7657 extern int noioapicquirk;
7658 extern int noioapicreroute;
7659 extern int timer_through_8259;
7660 struct io_apic_irq_attr;
7661 extern int io_apic_set_pci_routing(struct device *dev, int irq,
7662 struct io_apic_irq_attr *irq_attr);
7663 void setup_IO_APIC_irq_extra(u32 gsi);
7664 extern void ioapic_and_gsi_init(void);
7665 extern void ioapic_insert_resources(void);
7666 int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr);
7667 extern int save_ioapic_entries(void);
7668 extern void mask_ioapic_entries(void);
7669 extern int restore_ioapic_entries(void);
7670 extern int get_nr_irqs_gsi(void);
7671 extern void setup_ioapic_ids_from_mpc(void);
7672 extern void setup_ioapic_ids_from_mpc_nocheck(void);
7673 struct mp_ioapic_gsi{
7677 extern struct mp_ioapic_gsi mp_gsi_routing[];
7679 int mp_find_ioapic(u32 gsi);
7680 int mp_find_ioapic_pin(int ioapic, u32 gsi);
7681 void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) mp_register_ioapic(int id, u32 address, u32 gsi_base);
7682 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pre_init_apic_IRQ0(void);
7683 extern void mp_save_irq(struct mpc_intsrc *m);
7684 extern void disable_ioapic_support(void);
7685 extern int smp_num_siblings;
7686 extern unsigned int num_processors;
7687 static inline __attribute__((always_inline)) bool cpu_has_ht_siblings(void)
7689 bool has_siblings = false;
7690 has_siblings = (__builtin_constant_p((0*32+28)) && ( ((((0*32+28))>>5)==0 && (1UL<<(((0*32+28))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+28))>>5)==1 && (1UL<<(((0*32+28))&31) & (0|0))) || ((((0*32+28))>>5)==2 && (1UL<<(((0*32+28))&31) & 0)) || ((((0*32+28))>>5)==3 && (1UL<<(((0*32+28))&31) & (0))) || ((((0*32+28))>>5)==4 && (1UL<<(((0*32+28))&31) & 0)) || ((((0*32+28))>>5)==5 && (1UL<<(((0*32+28))&31) & 0)) || ((((0*32+28))>>5)==6 && (1UL<<(((0*32+28))&31) & 0)) || ((((0*32+28))>>5)==7 && (1UL<<(((0*32+28))&31) & 0)) || ((((0*32+28))>>5)==8 && (1UL<<(((0*32+28))&31) & 0)) || ((((0*32+28))>>5)==9 && (1UL<<(((0*32+28))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+28))) ? constant_test_bit(((0*32+28)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+28)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) && smp_num_siblings > 1;
7691 return has_siblings;
7693 extern __attribute__((section(".data..percpu" ""))) __typeof__(cpumask_var_t) cpu_sibling_map;
7694 extern __attribute__((section(".data..percpu" ""))) __typeof__(cpumask_var_t) cpu_core_map;
7695 extern __attribute__((section(".data..percpu" ""))) __typeof__(cpumask_var_t) cpu_llc_shared_map;
7696 extern __attribute__((section(".data..percpu" ""))) __typeof__(u16) cpu_llc_id;
7697 extern __attribute__((section(".data..percpu" ""))) __typeof__(int) cpu_number;
7698 static inline __attribute__((always_inline)) struct cpumask *cpu_sibling_mask(int cpu)
7700 return (*({ do { const void *__vpp_verify = (typeof((&(cpu_sibling_map))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(cpu_sibling_map))) *)(&(cpu_sibling_map)))); (typeof((typeof(*(&(cpu_sibling_map))) *)(&(cpu_sibling_map)))) (__ptr + (((__per_cpu_offset[cpu])))); }); }));
7702 static inline __attribute__((always_inline)) struct cpumask *cpu_core_mask(int cpu)
7704 return (*({ do { const void *__vpp_verify = (typeof((&(cpu_core_map))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(cpu_core_map))) *)(&(cpu_core_map)))); (typeof((typeof(*(&(cpu_core_map))) *)(&(cpu_core_map)))) (__ptr + (((__per_cpu_offset[cpu])))); }); }));
7706 static inline __attribute__((always_inline)) struct cpumask *cpu_llc_shared_mask(int cpu)
7708 return (*({ do { const void *__vpp_verify = (typeof((&(cpu_llc_shared_map))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(cpu_llc_shared_map))) *)(&(cpu_llc_shared_map)))); (typeof((typeof(*(&(cpu_llc_shared_map))) *)(&(cpu_llc_shared_map)))) (__ptr + (((__per_cpu_offset[cpu])))); }); }));
7710 extern __attribute__((section(".data..percpu" ""))) __typeof__(u16) x86_cpu_to_apicid; extern __typeof__(u16) *x86_cpu_to_apicid_early_ptr; extern __typeof__(u16) x86_cpu_to_apicid_early_map[];
7711 extern __attribute__((section(".data..percpu" ""))) __typeof__(u16) x86_bios_cpu_apicid; extern __typeof__(u16) *x86_bios_cpu_apicid_early_ptr; extern __typeof__(u16) x86_bios_cpu_apicid_early_map[];
7712 extern __attribute__((section(".data..percpu" ""))) __typeof__(int) x86_cpu_to_logical_apicid; extern __typeof__(int) *x86_cpu_to_logical_apicid_early_ptr; extern __typeof__(int) x86_cpu_to_logical_apicid_early_map[];
7713 extern unsigned long stack_start;
7715 void (*smp_prepare_boot_cpu)(void);
7716 void (*smp_prepare_cpus)(unsigned max_cpus);
7717 void (*smp_cpus_done)(unsigned max_cpus);
7718 void (*stop_other_cpus)(int wait);
7719 void (*smp_send_reschedule)(int cpu);
7720 int (*cpu_up)(unsigned cpu);
7721 int (*cpu_disable)(void);
7722 void (*cpu_die)(unsigned int cpu);
7723 void (*play_dead)(void);
7724 void (*send_call_func_ipi)(const struct cpumask *mask);
7725 void (*send_call_func_single_ipi)(int cpu);
7727 extern void set_cpu_sibling_map(int cpu);
7728 extern struct smp_ops smp_ops;
7729 static inline __attribute__((always_inline)) void smp_send_stop(void)
7731 smp_ops.stop_other_cpus(0);
7733 static inline __attribute__((always_inline)) void stop_other_cpus(void)
7735 smp_ops.stop_other_cpus(1);
7737 static inline __attribute__((always_inline)) void smp_prepare_boot_cpu(void)
7739 smp_ops.smp_prepare_boot_cpu();
7741 static inline __attribute__((always_inline)) void smp_prepare_cpus(unsigned int max_cpus)
7743 smp_ops.smp_prepare_cpus(max_cpus);
7745 static inline __attribute__((always_inline)) void smp_cpus_done(unsigned int max_cpus)
7747 smp_ops.smp_cpus_done(max_cpus);
7749 static inline __attribute__((always_inline)) int __cpu_up(unsigned int cpu)
7751 return smp_ops.cpu_up(cpu);
7753 static inline __attribute__((always_inline)) int __cpu_disable(void)
7755 return smp_ops.cpu_disable();
7757 static inline __attribute__((always_inline)) void __cpu_die(unsigned int cpu)
7759 smp_ops.cpu_die(cpu);
7761 static inline __attribute__((always_inline)) void play_dead(void)
7763 smp_ops.play_dead();
7765 static inline __attribute__((always_inline)) void smp_send_reschedule(int cpu)
7767 smp_ops.smp_send_reschedule(cpu);
7769 static inline __attribute__((always_inline)) void arch_send_call_function_single_ipi(int cpu)
7771 smp_ops.send_call_func_single_ipi(cpu);
7773 static inline __attribute__((always_inline)) void arch_send_call_function_ipi_mask(const struct cpumask *mask)
7775 smp_ops.send_call_func_ipi(mask);
7777 void cpu_disable_common(void);
7778 void native_smp_prepare_boot_cpu(void);
7779 void native_smp_prepare_cpus(unsigned int max_cpus);
7780 void native_smp_cpus_done(unsigned int max_cpus);
7781 int native_cpu_up(unsigned int cpunum);
7782 int native_cpu_disable(void);
7783 void native_cpu_die(unsigned int cpu);
7784 void native_play_dead(void);
7785 void play_dead_common(void);
7786 void wbinvd_on_cpu(int cpu);
7787 int wbinvd_on_all_cpus(void);
7788 void native_send_call_func_ipi(const struct cpumask *mask);
7789 void native_send_call_func_single_ipi(int cpu);
7790 void smp_store_cpu_info(int id);
7791 static inline __attribute__((always_inline)) int num_booting_cpus(void)
7793 return cpumask_weight(cpu_callout_mask);
7795 extern unsigned disabled_cpus __attribute__ ((__section__(".cpuinit.data")));
7796 extern int safe_smp_processor_id(void);
7797 static inline __attribute__((always_inline)) int logical_smp_processor_id(void)
7799 return (((apic_read(0xD0)) >> 24) & 0xFFu);
7801 extern int hard_smp_processor_id(void);
7802 extern void smp_send_stop(void);
7803 extern void smp_send_reschedule(int cpu);
7804 extern void smp_prepare_cpus(unsigned int max_cpus);
7805 extern int __cpu_up(unsigned int cpunum);
7806 extern void smp_cpus_done(unsigned int max_cpus);
7807 int smp_call_function(smp_call_func_t func, void *info, int wait);
7808 void smp_call_function_many(const struct cpumask *mask,
7809 smp_call_func_t func, void *info, bool wait);
7810 void __smp_call_function_single(int cpuid, struct call_single_data *data,
7812 int smp_call_function_any(const struct cpumask *mask,
7813 smp_call_func_t func, void *info, int wait);
7814 void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) call_function_init(void);
7815 void generic_smp_call_function_single_interrupt(void);
7816 void generic_smp_call_function_interrupt(void);
7817 void ipi_call_lock(void);
7818 void ipi_call_unlock(void);
7819 void ipi_call_lock_irq(void);
7820 void ipi_call_unlock_irq(void);
7821 int on_each_cpu(smp_call_func_t func, void *info, int wait);
7822 void smp_prepare_boot_cpu(void);
7823 extern unsigned int setup_max_cpus;
7824 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) setup_nr_cpu_ids(void);
7825 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) smp_init(void);
7826 extern unsigned int debug_smp_processor_id(void);
7827 extern void arch_disable_smp_support(void);
7828 void smp_setup_processor_id(void);
7829 enum pageblock_bits {
7831 PB_migrate_end = PB_migrate + 3 - 1,
7835 unsigned long get_pageblock_flags_group(struct page *page,
7836 int start_bitidx, int end_bitidx);
7837 void set_pageblock_flags_group(struct page *page, unsigned long flags,
7838 int start_bitidx, int end_bitidx);
7839 extern int page_group_by_mobility_disabled;
7840 static inline __attribute__((always_inline)) int get_pageblock_migratetype(struct page *page)
7842 return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
7845 struct list_head free_list[5];
7846 unsigned long nr_free;
7849 struct zone_padding {
7851 } __attribute__((__aligned__(1 << (6))));
7852 enum zone_stat_item {
7855 NR_INACTIVE_ANON = NR_LRU_BASE,
7866 NR_SLAB_RECLAIMABLE,
7867 NR_SLAB_UNRECLAIMABLE,
7879 NR_ANON_TRANSPARENT_HUGEPAGES,
7880 NR_VM_ZONE_STAT_ITEMS };
7882 LRU_INACTIVE_ANON = 0,
7883 LRU_ACTIVE_ANON = 0 + 1,
7884 LRU_INACTIVE_FILE = 0 + 2,
7885 LRU_ACTIVE_FILE = 0 + 2 + 1,
7889 static inline __attribute__((always_inline)) int is_file_lru(enum lru_list l)
7891 return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE);
7893 static inline __attribute__((always_inline)) int is_active_lru(enum lru_list l)
7895 return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE);
7897 static inline __attribute__((always_inline)) int is_unevictable_lru(enum lru_list l)
7899 return (l == LRU_UNEVICTABLE);
7901 enum zone_watermarks {
7907 struct per_cpu_pages {
7911 struct list_head lists[3];
7913 struct per_cpu_pageset {
7914 struct per_cpu_pages pcp;
7916 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
7925 struct zone_reclaim_stat {
7926 unsigned long recent_rotated[2];
7927 unsigned long recent_scanned[2];
7930 unsigned long watermark[NR_WMARK];
7931 unsigned long percpu_drift_mark;
7932 unsigned long lowmem_reserve[4];
7933 struct per_cpu_pageset *pageset;
7935 int all_unreclaimable;
7936 struct free_area free_area[11];
7937 unsigned long *pageblock_flags;
7938 unsigned int compact_considered;
7939 unsigned int compact_defer_shift;
7940 struct zone_padding _pad1_;
7941 spinlock_t lru_lock;
7943 struct list_head list;
7944 } lru[NR_LRU_LISTS];
7945 struct zone_reclaim_stat reclaim_stat;
7946 unsigned long pages_scanned;
7947 unsigned long flags;
7948 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
7949 unsigned int inactive_ratio;
7950 struct zone_padding _pad2_;
7951 wait_queue_head_t * wait_table;
7952 unsigned long wait_table_hash_nr_entries;
7953 unsigned long wait_table_bits;
7954 struct pglist_data *zone_pgdat;
7955 unsigned long zone_start_pfn;
7956 unsigned long spanned_pages;
7957 unsigned long present_pages;
7959 } __attribute__((__aligned__(1 << (6))));
7961 ZONE_RECLAIM_LOCKED,
7965 static inline __attribute__((always_inline)) void zone_set_flag(struct zone *zone, zone_flags_t flag)
7967 set_bit(flag, &zone->flags);
7969 static inline __attribute__((always_inline)) int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
7971 return test_and_set_bit(flag, &zone->flags);
7973 static inline __attribute__((always_inline)) void zone_clear_flag(struct zone *zone, zone_flags_t flag)
7975 clear_bit(flag, &zone->flags);
7977 static inline __attribute__((always_inline)) int zone_is_reclaim_congested(const struct zone *zone)
7979 return (__builtin_constant_p((ZONE_CONGESTED)) ? constant_test_bit((ZONE_CONGESTED), (&zone->flags)) : variable_test_bit((ZONE_CONGESTED), (&zone->flags)));
7981 static inline __attribute__((always_inline)) int zone_is_reclaim_locked(const struct zone *zone)
7983 return (__builtin_constant_p((ZONE_RECLAIM_LOCKED)) ? constant_test_bit((ZONE_RECLAIM_LOCKED), (&zone->flags)) : variable_test_bit((ZONE_RECLAIM_LOCKED), (&zone->flags)));
7985 static inline __attribute__((always_inline)) int zone_is_oom_locked(const struct zone *zone)
7987 return (__builtin_constant_p((ZONE_OOM_LOCKED)) ? constant_test_bit((ZONE_OOM_LOCKED), (&zone->flags)) : variable_test_bit((ZONE_OOM_LOCKED), (&zone->flags)));
7989 struct zonelist_cache;
7995 struct zonelist_cache *zlcache_ptr;
7996 struct zoneref _zonerefs[((1 << 0) * 4) + 1];
7998 struct node_active_region {
7999 unsigned long start_pfn;
8000 unsigned long end_pfn;
8003 extern struct page *mem_map;
8004 struct bootmem_data;
8005 typedef struct pglist_data {
8006 struct zone node_zones[4];
8007 struct zonelist node_zonelists[1];
8009 struct page *node_mem_map;
8010 unsigned long node_start_pfn;
8011 unsigned long node_present_pages;
8012 unsigned long node_spanned_pages;
8014 wait_queue_head_t kswapd_wait;
8015 struct task_struct *kswapd;
8016 int kswapd_max_order;
8017 enum zone_type classzone_idx;
8019 struct rw_semaphore;
8020 struct rw_semaphore {
8022 spinlock_t wait_lock;
8023 struct list_head wait_list;
8024 struct lockdep_map dep_map;
8026 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
8027 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
8028 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
8029 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
8030 static inline __attribute__((always_inline)) void __down_read(struct rw_semaphore *sem)
8032 asm volatile("# beginning down_read\n\t"
8033 ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " " "incl" " " "(%1)\n\t"
8035 " call call_rwsem_down_read_failed\n"
8037 "# ending down_read\n\t"
8042 static inline __attribute__((always_inline)) int __down_read_trylock(struct rw_semaphore *sem)
8045 asm volatile("# beginning __down_read_trylock\n\t"
8051 ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " cmpxchg %2,%0\n\t"
8054 "# ending __down_read_trylock\n\t"
8055 : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
8058 return result >= 0 ? 1 : 0;
8060 static inline __attribute__((always_inline)) void __down_write_nested(struct rw_semaphore *sem, int subclass)
8063 asm volatile("# beginning down_write\n\t"
8064 ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " xadd %1,(%2)\n\t"
8067 " call call_rwsem_down_write_failed\n"
8069 "# ending down_write"
8070 : "+m" (sem->count), "=d" (tmp)
8071 : "a" (sem), "1" (((-0x0000ffffL -1) + 0x00000001L))
8074 static inline __attribute__((always_inline)) void __down_write(struct rw_semaphore *sem)
8076 __down_write_nested(sem, 0);
8078 static inline __attribute__((always_inline)) int __down_write_trylock(struct rw_semaphore *sem)
8080 long ret = ({ __typeof__(*(((&sem->count)))) __ret; __typeof__(*(((&sem->count)))) __old = (((0x00000000L))); __typeof__(*(((&sem->count)))) __new = (((((-0x0000ffffL -1) + 0x00000001L)))); switch ((sizeof(*&sem->count))) { case 1: { volatile u8 *__ptr = (volatile u8 *)(((&sem->count))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgb %2,%1" : "=a" (__ret), "+m" (*__ptr) : "q" (__new), "0" (__old) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)(((&sem->count))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgw %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)(((&sem->count))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgl %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } default: __cmpxchg_wrong_size(); } __ret; })
8082 if (__builtin_constant_p(((ret == 0x00000000L))) ? !!((ret == 0x00000000L)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/rwsem.h", .line = 131, }; ______r = !!((ret == 0x00000000L)); ______f.miss_hit[______r]++; ______r; }))
8086 static inline __attribute__((always_inline)) void __up_read(struct rw_semaphore *sem)
8089 asm volatile("# beginning __up_read\n\t"
8090 ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " xadd %1,(%2)\n\t"
8092 " call call_rwsem_wake\n"
8094 "# ending __up_read\n"
8095 : "+m" (sem->count), "=d" (tmp)
8096 : "a" (sem), "1" (-0x00000001L)
8099 static inline __attribute__((always_inline)) void __up_write(struct rw_semaphore *sem)
8102 asm volatile("# beginning __up_write\n\t"
8103 ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " xadd %1,(%2)\n\t"
8105 " call call_rwsem_wake\n"
8107 "# ending __up_write\n"
8108 : "+m" (sem->count), "=d" (tmp)
8109 : "a" (sem), "1" (-((-0x0000ffffL -1) + 0x00000001L))
8112 static inline __attribute__((always_inline)) void __downgrade_write(struct rw_semaphore *sem)
8114 asm volatile("# beginning __downgrade_write\n\t"
8115 ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " " "addl" " " "%2,(%1)\n\t"
8117 " call call_rwsem_downgrade_wake\n"
8119 "# ending __downgrade_write\n"
8121 : "a" (sem), "er" (-(-0x0000ffffL -1))
8124 static inline __attribute__((always_inline)) void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8126 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " " "addl" " " "%1,%0"
8130 static inline __attribute__((always_inline)) long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
8133 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "xadd %0,%1"
8134 : "+r" (tmp), "+m" (sem->count)
8138 static inline __attribute__((always_inline)) int rwsem_is_locked(struct rw_semaphore *sem)
8140 return sem->count != 0;
8142 extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
8143 struct lock_class_key *key);
8144 extern void down_read(struct rw_semaphore *sem);
8145 extern int down_read_trylock(struct rw_semaphore *sem);
8146 extern void down_write(struct rw_semaphore *sem);
8147 extern int down_write_trylock(struct rw_semaphore *sem);
8148 extern void up_read(struct rw_semaphore *sem);
8149 extern void up_write(struct rw_semaphore *sem);
8150 extern void downgrade_write(struct rw_semaphore *sem);
8151 extern void down_read_nested(struct rw_semaphore *sem, int subclass);
8152 extern void down_write_nested(struct rw_semaphore *sem, int subclass);
8153 extern void down_read_non_owner(struct rw_semaphore *sem);
8154 extern void up_read_non_owner(struct rw_semaphore *sem);
8155 struct srcu_struct_array {
8158 struct srcu_struct {
8160 struct srcu_struct_array *per_cpu_ref;
8162 struct lockdep_map dep_map;
8164 int __init_srcu_struct(struct srcu_struct *sp, const char *name,
8165 struct lock_class_key *key);
8166 void cleanup_srcu_struct(struct srcu_struct *sp);
8167 int __srcu_read_lock(struct srcu_struct *sp) ;
8168 void __srcu_read_unlock(struct srcu_struct *sp, int idx) ;
8169 void synchronize_srcu(struct srcu_struct *sp);
8170 void synchronize_srcu_expedited(struct srcu_struct *sp);
8171 long srcu_batches_completed(struct srcu_struct *sp);
8172 static inline __attribute__((always_inline)) int srcu_read_lock_held(struct srcu_struct *sp)
8174 if (__builtin_constant_p(((debug_locks))) ? !!((debug_locks)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/srcu.h", .line = 96, }; ______r = !!((debug_locks)); ______f.miss_hit[______r]++; ______r; }))
8175 return lock_is_held(&sp->dep_map);
8178 static inline __attribute__((always_inline)) int srcu_read_lock(struct srcu_struct *sp)
8180 int retval = __srcu_read_lock(sp);
8181 lock_acquire(&(sp)->dep_map, 0, 0, 2, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; }));
8184 static inline __attribute__((always_inline)) void srcu_read_unlock(struct srcu_struct *sp, int idx)
8186 lock_release(&(sp)->dep_map, 1, ({ __label__ __here; __here: (unsigned long)&&__here; }));
8187 __srcu_read_unlock(sp, idx);
8189 struct notifier_block {
8190 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
8191 struct notifier_block *next;
8194 struct atomic_notifier_head {
8196 struct notifier_block *head;
8198 struct blocking_notifier_head {
8199 struct rw_semaphore rwsem;
8200 struct notifier_block *head;
8202 struct raw_notifier_head {
8203 struct notifier_block *head;
8205 struct srcu_notifier_head {
8207 struct srcu_struct srcu;
8208 struct notifier_block *head;
8210 extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
8211 extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
8212 struct notifier_block *nb);
8213 extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
8214 struct notifier_block *nb);
8215 extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
8216 struct notifier_block *nb);
8217 extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
8218 struct notifier_block *nb);
8219 extern int blocking_notifier_chain_cond_register(
8220 struct blocking_notifier_head *nh,
8221 struct notifier_block *nb);
8222 extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
8223 struct notifier_block *nb);
8224 extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
8225 struct notifier_block *nb);
8226 extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
8227 struct notifier_block *nb);
8228 extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
8229 struct notifier_block *nb);
8230 extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
8231 unsigned long val, void *v);
8232 extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
8233 unsigned long val, void *v, int nr_to_call, int *nr_calls);
8234 extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
8235 unsigned long val, void *v);
8236 extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
8237 unsigned long val, void *v, int nr_to_call, int *nr_calls);
8238 extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
8239 unsigned long val, void *v);
8240 extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
8241 unsigned long val, void *v, int nr_to_call, int *nr_calls);
8242 extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
8243 unsigned long val, void *v);
8244 extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
8245 unsigned long val, void *v, int nr_to_call, int *nr_calls);
8246 static inline __attribute__((always_inline)) int notifier_from_errno(int err)
8248 if (__builtin_constant_p(((err))) ? !!((err)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/notifier.h", .line = 167, }; ______r = !!((err)); ______f.miss_hit[______r]++; ______r; }))
8249 return 0x8000 | (0x0001 - err);
8252 static inline __attribute__((always_inline)) int notifier_to_errno(int ret)
8255 return ret > 0x0001 ? 0x0001 - ret : 0;
8257 extern struct blocking_notifier_head reboot_notifier_list;
8262 static inline __attribute__((always_inline)) void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
8263 static inline __attribute__((always_inline)) void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
8264 static inline __attribute__((always_inline)) void pgdat_resize_init(struct pglist_data *pgdat) {}
8265 static inline __attribute__((always_inline)) unsigned zone_span_seqbegin(struct zone *zone)
8269 static inline __attribute__((always_inline)) int zone_span_seqretry(struct zone *zone, unsigned iv)
8273 static inline __attribute__((always_inline)) void zone_span_writelock(struct zone *zone) {}
8274 static inline __attribute__((always_inline)) void zone_span_writeunlock(struct zone *zone) {}
8275 static inline __attribute__((always_inline)) void zone_seqlock_init(struct zone *zone) {}
8276 static inline __attribute__((always_inline)) int mhp_notimplemented(const char *func)
8278 printk("<4>" "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
8282 static inline __attribute__((always_inline)) void register_page_bootmem_info_node(struct pglist_data *pgdat)
8285 static inline __attribute__((always_inline)) void lock_memory_hotplug(void) {}
8286 static inline __attribute__((always_inline)) void unlock_memory_hotplug(void) {}
8287 static inline __attribute__((always_inline)) int is_mem_section_removable(unsigned long pfn,
8288 unsigned long nr_pages)
8292 extern int mem_online_node(int nid);
8293 extern int add_memory(int nid, u64 start, u64 size);
8294 extern int arch_add_memory(int nid, u64 start, u64 size);
8295 extern int remove_memory(u64 start, u64 size);
8296 extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
8298 extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
8299 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
8300 unsigned long pnum);
8301 extern struct mutex zonelists_mutex;
8302 void build_all_zonelists(void *data);
8303 void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
8304 bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
8305 int classzone_idx, int alloc_flags);
8306 bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
8307 int classzone_idx, int alloc_flags);
8308 enum memmap_context {
8312 extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
8314 enum memmap_context context);
8315 static inline __attribute__((always_inline)) void memory_present(int nid, unsigned long start, unsigned long end) {}
8316 static inline __attribute__((always_inline)) int local_memory_node(int node_id) { return node_id; };
8317 static inline __attribute__((always_inline)) int populated_zone(struct zone *zone)
8319 return (!!zone->present_pages);
8321 extern int movable_zone;
8322 static inline __attribute__((always_inline)) int zone_movable_is_highmem(void)
8324 return movable_zone == ZONE_HIGHMEM;
8326 static inline __attribute__((always_inline)) int is_highmem_idx(enum zone_type idx)
8328 return (idx == ZONE_HIGHMEM ||
8329 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
8331 static inline __attribute__((always_inline)) int is_normal_idx(enum zone_type idx)
8333 return (idx == ZONE_NORMAL);
8335 static inline __attribute__((always_inline)) int is_highmem(struct zone *zone)
8337 int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
8338 return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
8339 (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
8340 zone_movable_is_highmem());
8342 static inline __attribute__((always_inline)) int is_normal(struct zone *zone)
8344 return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
8346 static inline __attribute__((always_inline)) int is_dma32(struct zone *zone)
8350 static inline __attribute__((always_inline)) int is_dma(struct zone *zone)
8352 return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
8355 int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
8356 void *, size_t *, loff_t *);
8357 extern int sysctl_lowmem_reserve_ratio[4 -1];
8358 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
8359 void *, size_t *, loff_t *);
8360 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
8361 void *, size_t *, loff_t *);
8362 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
8363 void *, size_t *, loff_t *);
8364 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
8365 void *, size_t *, loff_t *);
8366 extern int numa_zonelist_order_handler(struct ctl_table *, int,
8367 void *, size_t *, loff_t *);
8368 extern char numa_zonelist_order[];
8369 extern struct pglist_data contig_page_data;
8370 extern struct pglist_data *first_online_pgdat(void);
8371 extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
8372 extern struct zone *next_zone(struct zone *zone);
8373 static inline __attribute__((always_inline)) struct zone *zonelist_zone(struct zoneref *zoneref)
8375 return zoneref->zone;
8377 static inline __attribute__((always_inline)) int zonelist_zone_idx(struct zoneref *zoneref)
8379 return zoneref->zone_idx;
8381 static inline __attribute__((always_inline)) int zonelist_node_idx(struct zoneref *zoneref)
8385 struct zoneref *next_zones_zonelist(struct zoneref *z,
8386 enum zone_type highest_zoneidx,
8388 struct zone **zone);
8389 static inline __attribute__((always_inline)) struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
8390 enum zone_type highest_zoneidx,
8394 return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
8397 void memory_present(int nid, unsigned long start, unsigned long end);
8398 unsigned long __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) node_memmap_size_bytes(int, unsigned long, unsigned long);
8399 static inline __attribute__((always_inline)) int memmap_valid_within(unsigned long pfn,
8400 struct page *page, struct zone *zone)
8404 extern void *pcpu_base_addr;
8405 extern const unsigned long *pcpu_unit_offsets;
8406 struct pcpu_group_info {
8408 unsigned long base_offset;
8409 unsigned int *cpu_map;
8411 struct pcpu_alloc_info {
8413 size_t reserved_size;
8420 struct pcpu_group_info groups[];
8428 extern const char *pcpu_fc_names[PCPU_FC_NR];
8429 extern enum pcpu_fc pcpu_chosen_fc;
8430 typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
8432 typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
8433 typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
8434 typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
8435 extern struct pcpu_alloc_info * __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_alloc_alloc_info(int nr_groups,
8437 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
8438 extern int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
8440 extern int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
8442 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
8443 pcpu_fc_alloc_fn_t alloc_fn,
8444 pcpu_fc_free_fn_t free_fn);
8445 extern int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_page_first_chunk(size_t reserved_size,
8446 pcpu_fc_alloc_fn_t alloc_fn,
8447 pcpu_fc_free_fn_t free_fn,
8448 pcpu_fc_populate_pte_fn_t populate_pte_fn);
8449 extern void *__alloc_reserved_percpu(size_t size, size_t align);
8450 extern bool is_kernel_percpu_address(unsigned long addr);
8451 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) percpu_init_late(void);
8452 extern void *__alloc_percpu(size_t size, size_t align);
8453 extern void free_percpu(void *__pdata);
8454 extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
8455 extern void __bad_size_call_parameter(void);
8456 int arch_update_cpu_topology(void);
8457 static inline __attribute__((always_inline)) int numa_mem_id(void)
8459 return numa_node_id();
8461 struct vm_area_struct;
8462 static inline __attribute__((always_inline)) int allocflags_to_migratetype(gfp_t gfp_flags)
8464 ({ int __ret_warn_on = !!((gfp_flags & ((( gfp_t)0x80000u)|(( gfp_t)0x08u))) == ((( gfp_t)0x80000u)|(( gfp_t)0x08u))); if (__builtin_constant_p((((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 152, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 152, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 152, }; ______r = !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 152, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) warn_slowpath_null("include/linux/gfp.h", 152); (__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 152, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); });
8465 if (__builtin_constant_p((((__builtin_constant_p(page_group_by_mobility_disabled) ? !!(page_group_by_mobility_disabled) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 154, }; ______r = __builtin_expect(!!(page_group_by_mobility_disabled), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(page_group_by_mobility_disabled) ? !!(page_group_by_mobility_disabled) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 154, }; ______r = __builtin_expect(!!(page_group_by_mobility_disabled), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 154, }; ______r = !!(((__builtin_constant_p(page_group_by_mobility_disabled) ? !!(page_group_by_mobility_disabled) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 154, }; ______r = __builtin_expect(!!(page_group_by_mobility_disabled), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
8467 return (((gfp_flags & (( gfp_t)0x08u)) != 0) << 1) |
8468 ((gfp_flags & (( gfp_t)0x80000u)) != 0);
8470 static inline __attribute__((always_inline)) enum zone_type gfp_zone(gfp_t flags)
8473 int bit = ( int) (flags & ((( gfp_t)0x01u)|(( gfp_t)0x02u)|(( gfp_t)0x04u)|(( gfp_t)0x08u)));
8474 z = (( (ZONE_NORMAL << 0 * 2) | (ZONE_DMA << 0x01u * 2) | (ZONE_HIGHMEM << 0x02u * 2) | (ZONE_NORMAL << 0x04u * 2) | (ZONE_NORMAL << 0x08u * 2) | (ZONE_DMA << (0x08u | 0x01u) * 2) | (ZONE_MOVABLE << (0x08u | 0x02u) * 2) | (ZONE_NORMAL << (0x08u | 0x04u) * 2) ) >> (bit * 2)) &
8476 do { (void)((( 1 << (0x01u | 0x02u) | 1 << (0x01u | 0x04u) | 1 << (0x04u | 0x02u) | 1 << (0x01u | 0x04u | 0x02u) | 1 << (0x08u | 0x02u | 0x01u) | 1 << (0x08u | 0x04u | 0x01u) | 1 << (0x08u | 0x04u | 0x02u) | 1 << (0x08u | 0x04u | 0x01u | 0x02u) ) >> bit) & 1); } while (0);
8479 static inline __attribute__((always_inline)) int gfp_zonelist(gfp_t flags)
8481 if (__builtin_constant_p(((0 && (__builtin_constant_p(flags & (( gfp_t)0x40000u)) ? !!(flags & (( gfp_t)0x40000u)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 265, }; ______r = __builtin_expect(!!(flags & (( gfp_t)0x40000u)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!((0 && (__builtin_constant_p(flags & (( gfp_t)0x40000u)) ? !!(flags & (( gfp_t)0x40000u)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 265, }; ______r = __builtin_expect(!!(flags & (( gfp_t)0x40000u)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 265, }; ______r = !!((0 && (__builtin_constant_p(flags & (( gfp_t)0x40000u)) ? !!(flags & (( gfp_t)0x40000u)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 265, }; ______r = __builtin_expect(!!(flags & (( gfp_t)0x40000u)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
8485 static inline __attribute__((always_inline)) struct zonelist *node_zonelist(int nid, gfp_t flags)
8487 return (&contig_page_data)->node_zonelists + gfp_zonelist(flags);
8489 static inline __attribute__((always_inline)) void arch_free_page(struct page *page, int order) { }
8490 static inline __attribute__((always_inline)) void arch_alloc_page(struct page *page, int order) { }
8492 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
8493 struct zonelist *zonelist, nodemask_t *nodemask);
8494 static inline __attribute__((always_inline)) struct page *
8495 __alloc_pages(gfp_t gfp_mask, unsigned int order,
8496 struct zonelist *zonelist)
8498 return __alloc_pages_nodemask(gfp_mask, order, zonelist, ((void *)0));
8500 static inline __attribute__((always_inline)) struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
8503 if (__builtin_constant_p(((nid < 0))) ? !!((nid < 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 307, }; ______r = !!((nid < 0)); ______f.miss_hit[______r]++; ______r; }))
8504 nid = numa_node_id();
8505 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
8507 static inline __attribute__((always_inline)) struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
8510 do { (void)(nid < 0 || nid >= (1 << 0)); } while (0);
8511 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
8513 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
8514 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
8515 void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
8516 void free_pages_exact(void *virt, size_t size);
8517 void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
8518 extern void __free_pages(struct page *page, unsigned int order);
8519 extern void free_pages(unsigned long addr, unsigned int order);
8520 extern void free_hot_cold_page(struct page *page, int cold);
8521 void page_alloc_init(void);
8522 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
8523 void drain_all_pages(void);
8524 void drain_local_pages(void *dummy);
8525 extern gfp_t gfp_allowed_mask;
8526 extern void pm_restrict_gfp_mask(void);
8527 extern void pm_restore_gfp_mask(void);
8529 IRQ_NONE = (0 << 0),
8530 IRQ_HANDLED = (1 << 0),
8531 IRQ_WAKE_THREAD = (1 << 1),
8533 typedef enum irqreturn irqreturn_t;
8535 extern struct irq_desc *irq_to_desc(unsigned int irq);
8536 unsigned int irq_get_next_irq(unsigned int offset);
8537 static inline __attribute__((always_inline)) int irq_canonicalize(int irq)
8539 return ((irq == 2) ? 9 : irq);
8541 extern void irq_ctx_init(int cpu);
8542 extern void fixup_irqs(void);
8543 extern void irq_force_complete_move(int);
8544 extern void (*x86_platform_ipi_callback)(void);
8545 extern void native_init_IRQ(void);
8546 extern bool handle_irq(unsigned irq, struct pt_regs *regs);
8547 extern unsigned int do_IRQ(struct pt_regs *regs);
8548 extern unsigned long used_vectors[(((256) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))];
8549 extern int vector_used_by_percpu_irq(unsigned int vector);
8550 extern void init_ISA_irqs(void);
8551 extern __attribute__((section(".data..percpu" ""))) __typeof__(struct pt_regs *) irq_regs;
8552 static inline __attribute__((always_inline)) struct pt_regs *get_irq_regs(void)
8554 return ({ typeof(irq_regs) pfo_ret__; switch (sizeof(irq_regs)) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m" (irq_regs)); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_regs)); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_regs)); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_regs)); break; default: __bad_percpu_size(); } pfo_ret__; });
8556 static inline __attribute__((always_inline)) struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
8558 struct pt_regs *old_regs;
8559 old_regs = get_irq_regs();
8560 do { typedef typeof(irq_regs) pto_T__; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/irq_regs.h", .line = 26, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pto_T__ pto_tmp__; pto_tmp__ = (new_regs); (void)pto_tmp__; } switch (sizeof(irq_regs)) { case 1: asm("mov" "b %1,""%%""fs"":" "%P" "0" : "+m" (irq_regs) : "qi" ((pto_T__)(new_regs))); break; case 2: asm("mov" "w %1,""%%""fs"":" "%P" "0" : "+m" (irq_regs) : "ri" ((pto_T__)(new_regs))); break; case 4: asm("mov" "l %1,""%%""fs"":" "%P" "0" : "+m" (irq_regs) : "ri" ((pto_T__)(new_regs))); break; case 8: asm("mov" "q %1,""%%""fs"":" "%P" "0" : "+m" (irq_regs) : "re" ((pto_T__)(new_regs))); break; default: __bad_percpu_size(); } } while (0);
8566 typedef void (*irq_flow_handler_t)(unsigned int irq,
8567 struct irq_desc *desc);
8568 typedef void (*irq_preflow_handler_t)(struct irq_data *data);
8570 IRQ_TYPE_NONE = 0x00000000,
8571 IRQ_TYPE_EDGE_RISING = 0x00000001,
8572 IRQ_TYPE_EDGE_FALLING = 0x00000002,
8573 IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING),
8574 IRQ_TYPE_LEVEL_HIGH = 0x00000004,
8575 IRQ_TYPE_LEVEL_LOW = 0x00000008,
8576 IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
8577 IRQ_TYPE_SENSE_MASK = 0x0000000f,
8578 IRQ_TYPE_PROBE = 0x00000010,
8579 IRQ_LEVEL = (1 << 8),
8580 IRQ_PER_CPU = (1 << 9),
8581 IRQ_NOPROBE = (1 << 10),
8582 IRQ_NOREQUEST = (1 << 11),
8583 IRQ_NOAUTOEN = (1 << 12),
8584 IRQ_NO_BALANCING = (1 << 13),
8585 IRQ_MOVE_PCNTXT = (1 << 14),
8586 IRQ_NESTED_THREAD = (1 << 15),
8587 IRQ_NOTHREAD = (1 << 16),
8589 static inline __attribute__((always_inline)) __attribute__((deprecated)) bool CHECK_IRQ_PER_CPU(unsigned int status)
8591 return status & IRQ_PER_CPU;
8594 IRQ_SET_MASK_OK = 0,
8595 IRQ_SET_MASK_OK_NOCOPY,
8601 unsigned int state_use_accessors;
8602 struct irq_chip *chip;
8605 struct msi_desc *msi_desc;
8606 cpumask_var_t affinity;
8609 IRQD_TRIGGER_MASK = 0xf,
8610 IRQD_SETAFFINITY_PENDING = (1 << 8),
8611 IRQD_NO_BALANCING = (1 << 10),
8612 IRQD_PER_CPU = (1 << 11),
8613 IRQD_AFFINITY_SET = (1 << 12),
8614 IRQD_LEVEL = (1 << 13),
8615 IRQD_WAKEUP_STATE = (1 << 14),
8616 IRQD_MOVE_PCNTXT = (1 << 15),
8617 IRQD_IRQ_DISABLED = (1 << 16),
8618 IRQD_IRQ_MASKED = (1 << 17),
8619 IRQD_IRQ_INPROGRESS = (1 << 18),
8621 static inline __attribute__((always_inline)) bool irqd_is_setaffinity_pending(struct irq_data *d)
8623 return d->state_use_accessors & IRQD_SETAFFINITY_PENDING;
8625 static inline __attribute__((always_inline)) bool irqd_is_per_cpu(struct irq_data *d)
8627 return d->state_use_accessors & IRQD_PER_CPU;
8629 static inline __attribute__((always_inline)) bool irqd_can_balance(struct irq_data *d)
8631 return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING));
8633 static inline __attribute__((always_inline)) bool irqd_affinity_was_set(struct irq_data *d)
8635 return d->state_use_accessors & IRQD_AFFINITY_SET;
8637 static inline __attribute__((always_inline)) void irqd_mark_affinity_was_set(struct irq_data *d)
8639 d->state_use_accessors |= IRQD_AFFINITY_SET;
8641 static inline __attribute__((always_inline)) u32 irqd_get_trigger_type(struct irq_data *d)
8643 return d->state_use_accessors & IRQD_TRIGGER_MASK;
8645 static inline __attribute__((always_inline)) void irqd_set_trigger_type(struct irq_data *d, u32 type)
8647 d->state_use_accessors &= ~IRQD_TRIGGER_MASK;
8648 d->state_use_accessors |= type & IRQD_TRIGGER_MASK;
8650 static inline __attribute__((always_inline)) bool irqd_is_level_type(struct irq_data *d)
8652 return d->state_use_accessors & IRQD_LEVEL;
8654 static inline __attribute__((always_inline)) bool irqd_is_wakeup_set(struct irq_data *d)
8656 return d->state_use_accessors & IRQD_WAKEUP_STATE;
8658 static inline __attribute__((always_inline)) bool irqd_can_move_in_process_context(struct irq_data *d)
8660 return d->state_use_accessors & IRQD_MOVE_PCNTXT;
8662 static inline __attribute__((always_inline)) bool irqd_irq_disabled(struct irq_data *d)
8664 return d->state_use_accessors & IRQD_IRQ_DISABLED;
8666 static inline __attribute__((always_inline)) bool irqd_irq_masked(struct irq_data *d)
8668 return d->state_use_accessors & IRQD_IRQ_MASKED;
8670 static inline __attribute__((always_inline)) bool irqd_irq_inprogress(struct irq_data *d)
8672 return d->state_use_accessors & IRQD_IRQ_INPROGRESS;
8674 static inline __attribute__((always_inline)) void irqd_set_chained_irq_inprogress(struct irq_data *d)
8676 d->state_use_accessors |= IRQD_IRQ_INPROGRESS;
8678 static inline __attribute__((always_inline)) void irqd_clr_chained_irq_inprogress(struct irq_data *d)
8680 d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS;
8684 unsigned int (*irq_startup)(struct irq_data *data);
8685 void (*irq_shutdown)(struct irq_data *data);
8686 void (*irq_enable)(struct irq_data *data);
8687 void (*irq_disable)(struct irq_data *data);
8688 void (*irq_ack)(struct irq_data *data);
8689 void (*irq_mask)(struct irq_data *data);
8690 void (*irq_mask_ack)(struct irq_data *data);
8691 void (*irq_unmask)(struct irq_data *data);
8692 void (*irq_eoi)(struct irq_data *data);
8693 int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
8694 int (*irq_retrigger)(struct irq_data *data);
8695 int (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
8696 int (*irq_set_wake)(struct irq_data *data, unsigned int on);
8697 void (*irq_bus_lock)(struct irq_data *data);
8698 void (*irq_bus_sync_unlock)(struct irq_data *data);
8699 void (*irq_cpu_online)(struct irq_data *data);
8700 void (*irq_cpu_offline)(struct irq_data *data);
8701 void (*irq_suspend)(struct irq_data *data);
8702 void (*irq_resume)(struct irq_data *data);
8703 void (*irq_pm_shutdown)(struct irq_data *data);
8704 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
8705 unsigned long flags;
8708 IRQCHIP_SET_TYPE_MASKED = (1 << 0),
8709 IRQCHIP_EOI_IF_HANDLED = (1 << 1),
8710 IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
8711 IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
8713 struct irq_affinity_notify;
8714 struct proc_dir_entry;
8715 struct timer_rand_state;
8717 struct irq_data irq_data;
8718 struct timer_rand_state *timer_rand_state;
8719 unsigned int *kstat_irqs;
8720 irq_flow_handler_t handle_irq;
8721 struct irqaction *action;
8722 unsigned int status_use_accessors;
8723 unsigned int core_internal_state__do_not_mess_with_it;
8725 unsigned int wake_depth;
8726 unsigned int irq_count;
8727 unsigned long last_unhandled;
8728 unsigned int irqs_unhandled;
8729 raw_spinlock_t lock;
8730 const struct cpumask *affinity_hint;
8731 struct irq_affinity_notify *affinity_notify;
8732 cpumask_var_t pending_mask;
8733 unsigned long threads_oneshot;
8734 atomic_t threads_active;
8735 wait_queue_head_t wait_for_threads;
8736 struct proc_dir_entry *dir;
8738 } __attribute__((__aligned__(1 << (6))));
8739 extern struct irq_desc irq_desc[((32 * 8) < ( 32 * 64 ) ? (256 + (32 * 8)) : (256 + ( 32 * 64 )))];
8740 static inline __attribute__((always_inline)) struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
8742 return &desc->irq_data;
8744 static inline __attribute__((always_inline)) struct irq_chip *irq_desc_get_chip(struct irq_desc *desc)
8746 return desc->irq_data.chip;
8748 static inline __attribute__((always_inline)) void *irq_desc_get_chip_data(struct irq_desc *desc)
8750 return desc->irq_data.chip_data;
8752 static inline __attribute__((always_inline)) void *irq_desc_get_handler_data(struct irq_desc *desc)
8754 return desc->irq_data.handler_data;
8756 static inline __attribute__((always_inline)) struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
8758 return desc->irq_data.msi_desc;
8760 static inline __attribute__((always_inline)) void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
8762 desc->handle_irq(irq, desc);
8764 int generic_handle_irq(unsigned int irq);
8765 static inline __attribute__((always_inline)) int irq_has_action(unsigned int irq)
8767 struct irq_desc *desc = irq_to_desc(irq);
8768 return desc->action != ((void *)0);
8770 static inline __attribute__((always_inline)) void __irq_set_handler_locked(unsigned int irq,
8771 irq_flow_handler_t handler)
8773 struct irq_desc *desc;
8774 desc = irq_to_desc(irq);
8775 desc->handle_irq = handler;
8777 static inline __attribute__((always_inline)) void
8778 __irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip,
8779 irq_flow_handler_t handler, const char *name)
8781 struct irq_desc *desc;
8782 desc = irq_to_desc(irq);
8783 irq_desc_get_irq_data(desc)->chip = chip;
8784 desc->handle_irq = handler;
8787 static inline __attribute__((always_inline)) int irq_balancing_disabled(unsigned int irq)
8789 struct irq_desc *desc;
8790 desc = irq_to_desc(irq);
8791 return desc->status_use_accessors & (IRQ_PER_CPU | IRQ_NO_BALANCING);
8793 static inline __attribute__((always_inline)) void
8794 irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
8796 struct irq_desc *desc = irq_to_desc(irq);
8797 if (__builtin_constant_p(((desc))) ? !!((desc)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/irqdesc.h", .line = 159, }; ______r = !!((desc)); ______f.miss_hit[______r]++; ______r; }))
8798 lockdep_init_map(&(&desc->lock)->dep_map, "class", class, 0);
8800 struct proc_dir_entry;
8802 struct notifier_block;
8803 void create_prof_cpu_mask(struct proc_dir_entry *de);
8804 int create_proc_profile(void);
8809 extern int prof_on __attribute__((__section__(".data..read_mostly")));
8810 int profile_init(void);
8811 int profile_setup(char *str);
8812 void profile_tick(int type);
8813 void profile_hits(int type, void *ip, unsigned int nr_hits);
8814 static inline __attribute__((always_inline)) void profile_hit(int type, void *ip)
8816 if (__builtin_constant_p((((__builtin_constant_p(prof_on == type) ? !!(prof_on == type) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/profile.h", .line = 61, }; ______r = __builtin_expect(!!(prof_on == type), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(prof_on == type) ? !!(prof_on == type) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/profile.h", .line = 61, }; ______r = __builtin_expect(!!(prof_on == type), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/profile.h", .line = 61, }; ______r = !!(((__builtin_constant_p(prof_on == type) ? !!(prof_on == type) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/profile.h", .line = 61, }; ______r = __builtin_expect(!!(prof_on == type), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
8817 profile_hits(type, ip, 1);
8821 void profile_task_exit(struct task_struct * task);
8822 int profile_handoff_task(struct task_struct * task);
8823 void profile_munmap(unsigned long addr);
8824 int task_handoff_register(struct notifier_block * n);
8825 int task_handoff_unregister(struct notifier_block * n);
8826 int profile_event_register(enum profile_type, struct notifier_block * n);
8827 int profile_event_unregister(enum profile_type, struct notifier_block * n);
8828 int register_timer_hook(int (*hook)(struct pt_regs *));
8829 void unregister_timer_hook(int (*hook)(struct pt_regs *));
8831 extern char _text[], _stext[], _etext[];
8832 extern char _data[], _sdata[], _edata[];
8833 extern char __bss_start[], __bss_stop[];
8834 extern char __init_begin[], __init_end[];
8835 extern char _sinittext[], _einittext[];
8837 extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
8838 extern char __kprobes_text_start[], __kprobes_text_end[];
8839 extern char __entry_text_start[], __entry_text_end[];
8840 extern char __initdata_begin[], __initdata_end[];
8841 extern char __start_rodata[], __end_rodata[];
8842 extern char __ctors_start[], __ctors_end[];
8843 static inline __attribute__((always_inline)) int arch_is_kernel_text(unsigned long addr)
8847 static inline __attribute__((always_inline)) int arch_is_kernel_data(unsigned long addr)
8851 struct exception_table_entry {
8852 unsigned long insn, fixup;
8854 extern int fixup_exception(struct pt_regs *regs);
8855 extern int __get_user_1(void);
8856 extern int __get_user_2(void);
8857 extern int __get_user_4(void);
8858 extern int __get_user_8(void);
8859 extern int __get_user_bad(void);
8860 extern void __put_user_bad(void);
8861 extern void __put_user_1(void);
8862 extern void __put_user_2(void);
8863 extern void __put_user_4(void);
8864 extern void __put_user_8(void);
8865 struct __large_struct { unsigned long buf[100]; };
8866 extern struct movsl_mask {
8868 } __attribute__((__aligned__((1 << (6))))) movsl_mask;
8869 unsigned long __attribute__((warn_unused_result)) __copy_to_user_ll
8870 (void *to, const void *from, unsigned long n);
8871 unsigned long __attribute__((warn_unused_result)) __copy_from_user_ll
8872 (void *to, const void *from, unsigned long n);
8873 unsigned long __attribute__((warn_unused_result)) __copy_from_user_ll_nozero
8874 (void *to, const void *from, unsigned long n);
8875 unsigned long __attribute__((warn_unused_result)) __copy_from_user_ll_nocache
8876 (void *to, const void *from, unsigned long n);
8877 unsigned long __attribute__((warn_unused_result)) __copy_from_user_ll_nocache_nozero
8878 (void *to, const void *from, unsigned long n);
8879 static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result))
8880 __copy_to_user_inatomic(void *to, const void *from, unsigned long n)
8882 if (__builtin_constant_p(((__builtin_constant_p(n)))) ? !!((__builtin_constant_p(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 46, }; ______r = !!((__builtin_constant_p(n))); ______f.miss_hit[______r]++; ______r; })) {
8886 do { ret = 0; (void)0; switch (1) { case 1: asm volatile("1: mov""b"" %""b""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "iq"(*(u8 *)from), "m" ((*(struct __large_struct *)((u8 *)to))), "i" (1), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %""w""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "ir"(*(u8 *)from), "m" ((*(struct __large_struct *)((u8 *)to))), "i" (1), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %""k""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "ir"(*(u8 *)from), "m" ((*(struct __large_struct *)((u8 *)to))), "i" (1), "0" (ret)); break; case 8: asm volatile("1: movl %%eax,0(%2)\n" "2: movl %%edx,4(%2)\n" "3:\n" ".section .fixup,\"ax\"\n" "4: movl %3,%0\n" " jmp 3b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "4b" "\n" " .previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "2b" "," "4b" "\n" " .previous\n" : "=r" (ret) : "A" ((__typeof__(*(u8 *)to))(*(u8 *)from)), "r" ((u8 *)to), "i" (1), "0" (ret)); break; default: __put_user_bad(); } } while (0)
8890 do { ret = 0; (void)0; switch (2) { case 1: asm volatile("1: mov""b"" %""b""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "iq"(*(u16 *)from), "m" ((*(struct __large_struct *)((u16 *)to))), "i" (2), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %""w""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "ir"(*(u16 *)from), "m" ((*(struct __large_struct *)((u16 *)to))), "i" (2), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %""k""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "ir"(*(u16 *)from), "m" ((*(struct __large_struct *)((u16 *)to))), "i" (2), "0" (ret)); break; case 8: asm volatile("1: movl %%eax,0(%2)\n" "2: movl %%edx,4(%2)\n" "3:\n" ".section .fixup,\"ax\"\n" "4: movl %3,%0\n" " jmp 3b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "4b" "\n" " .previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "2b" "," "4b" "\n" " .previous\n" : "=r" (ret) : "A" ((__typeof__(*(u16 *)to))(*(u16 *)from)), "r" ((u16 *)to), "i" (2), "0" (ret)); break; default: __put_user_bad(); } } while (0)
8894 do { ret = 0; (void)0; switch (4) { case 1: asm volatile("1: mov""b"" %""b""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "iq"(*(u32 *)from), "m" ((*(struct __large_struct *)((u32 *)to))), "i" (4), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %""w""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "ir"(*(u32 *)from), "m" ((*(struct __large_struct *)((u32 *)to))), "i" (4), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %""k""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "ir"(*(u32 *)from), "m" ((*(struct __large_struct *)((u32 *)to))), "i" (4), "0" (ret)); break; case 8: asm volatile("1: movl %%eax,0(%2)\n" "2: movl %%edx,4(%2)\n" "3:\n" ".section .fixup,\"ax\"\n" "4: movl %3,%0\n" " jmp 3b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "4b" "\n" " .previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "2b" "," "4b" "\n" " .previous\n" : "=r" (ret) : "A" ((__typeof__(*(u32 *)to))(*(u32 *)from)), "r" ((u32 *)to), "i" (4), "0" (ret)); break; default: __put_user_bad(); } } while (0)
8899 return __copy_to_user_ll(to, from, n);
8901 static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result))
8902 __copy_to_user(void *to, const void *from, unsigned long n)
8905 return __copy_to_user_inatomic(to, from, n);
8907 static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long
8908 __copy_from_user_inatomic(void *to, const void *from, unsigned long n)
8910 if (__builtin_constant_p(((__builtin_constant_p(n)))) ? !!((__builtin_constant_p(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 96, }; ______r = !!((__builtin_constant_p(n))); ______f.miss_hit[______r]++; ______r; })) {
8914 do { ret = 0; (void)0; switch (1) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 8: (*(u8 *)to) = __get_user_bad(); break; default: (*(u8 *)to) = __get_user_bad(); } } while (0);
8917 do { ret = 0; (void)0; switch (2) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 8: (*(u16 *)to) = __get_user_bad(); break; default: (*(u16 *)to) = __get_user_bad(); } } while (0);
8920 do { ret = 0; (void)0; switch (4) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 8: (*(u32 *)to) = __get_user_bad(); break; default: (*(u32 *)to) = __get_user_bad(); } } while (0);
8924 return __copy_from_user_ll_nozero(to, from, n);
8926 static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long
8927 __copy_from_user(void *to, const void *from, unsigned long n)
8930 if (__builtin_constant_p(((__builtin_constant_p(n)))) ? !!((__builtin_constant_p(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 140, }; ______r = !!((__builtin_constant_p(n))); ______f.miss_hit[______r]++; ______r; })) {
8934 do { ret = 0; (void)0; switch (1) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 8: (*(u8 *)to) = __get_user_bad(); break; default: (*(u8 *)to) = __get_user_bad(); } } while (0);
8937 do { ret = 0; (void)0; switch (2) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 8: (*(u16 *)to) = __get_user_bad(); break; default: (*(u16 *)to) = __get_user_bad(); } } while (0);
8940 do { ret = 0; (void)0; switch (4) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 8: (*(u32 *)to) = __get_user_bad(); break; default: (*(u32 *)to) = __get_user_bad(); } } while (0);
8944 return __copy_from_user_ll(to, from, n);
8946 static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long __copy_from_user_nocache(void *to,
8947 const void *from, unsigned long n)
8950 if (__builtin_constant_p(((__builtin_constant_p(n)))) ? !!((__builtin_constant_p(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 162, }; ______r = !!((__builtin_constant_p(n))); ______f.miss_hit[______r]++; ______r; })) {
8954 do { ret = 0; (void)0; switch (1) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 8: (*(u8 *)to) = __get_user_bad(); break; default: (*(u8 *)to) = __get_user_bad(); } } while (0);
8957 do { ret = 0; (void)0; switch (2) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 8: (*(u16 *)to) = __get_user_bad(); break; default: (*(u16 *)to) = __get_user_bad(); } } while (0);
8960 do { ret = 0; (void)0; switch (4) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 8: (*(u32 *)to) = __get_user_bad(); break; default: (*(u32 *)to) = __get_user_bad(); } } while (0);
8964 return __copy_from_user_ll_nocache(to, from, n);
8966 static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long
8967 __copy_from_user_inatomic_nocache(void *to, const void *from,
8970 return __copy_from_user_ll_nocache_nozero(to, from, n);
8972 unsigned long __attribute__((warn_unused_result)) copy_to_user(void *to,
8973 const void *from, unsigned long n);
8974 unsigned long __attribute__((warn_unused_result)) _copy_from_user(void *to,
8977 extern void copy_from_user_overflow(void)
8978 __attribute__((warning("copy_from_user() buffer size is not provably correct")))
8980 static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result)) copy_from_user(void *to,
8984 int sz = __builtin_object_size(to, 0);
8985 if (__builtin_constant_p((((__builtin_constant_p(sz == -1 || sz >= n) ? !!(sz == -1 || sz >= n) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 208, }; ______r = __builtin_expect(!!(sz == -1 || sz >= n), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(sz == -1 || sz >= n) ? !!(sz == -1 || sz >= n) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 208, }; ______r = __builtin_expect(!!(sz == -1 || sz >= n), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 208, }; ______r = !!(((__builtin_constant_p(sz == -1 || sz >= n) ? !!(sz == -1 || sz >= n) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 208, }; ______r = __builtin_expect(!!(sz == -1 || sz >= n), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
8986 n = _copy_from_user(to, from, n);
8988 copy_from_user_overflow();
8991 long __attribute__((warn_unused_result)) strncpy_from_user(char *dst, const char *src,
8993 long __attribute__((warn_unused_result)) __strncpy_from_user(char *dst,
8994 const char *src, long count);
8995 long strnlen_user(const char *str, long n);
8996 unsigned long __attribute__((warn_unused_result)) clear_user(void *mem, unsigned long len);
8997 unsigned long __attribute__((warn_unused_result)) __clear_user(void *mem, unsigned long len);
8998 extern char __brk_base[], __brk_limit[];
8999 extern struct exception_table_entry __stop___ex_table[];
9000 extern void apic_timer_interrupt(void);
9001 extern void x86_platform_ipi(void);
9002 extern void error_interrupt(void);
9003 extern void irq_work_interrupt(void);
9004 extern void spurious_interrupt(void);
9005 extern void thermal_interrupt(void);
9006 extern void reschedule_interrupt(void);
9007 extern void mce_self_interrupt(void);
9008 extern void invalidate_interrupt(void);
9009 extern void invalidate_interrupt0(void);
9010 extern void invalidate_interrupt1(void);
9011 extern void invalidate_interrupt2(void);
9012 extern void invalidate_interrupt3(void);
9013 extern void invalidate_interrupt4(void);
9014 extern void invalidate_interrupt5(void);
9015 extern void invalidate_interrupt6(void);
9016 extern void invalidate_interrupt7(void);
9017 extern void invalidate_interrupt8(void);
9018 extern void invalidate_interrupt9(void);
9019 extern void invalidate_interrupt10(void);
9020 extern void invalidate_interrupt11(void);
9021 extern void invalidate_interrupt12(void);
9022 extern void invalidate_interrupt13(void);
9023 extern void invalidate_interrupt14(void);
9024 extern void invalidate_interrupt15(void);
9025 extern void invalidate_interrupt16(void);
9026 extern void invalidate_interrupt17(void);
9027 extern void invalidate_interrupt18(void);
9028 extern void invalidate_interrupt19(void);
9029 extern void invalidate_interrupt20(void);
9030 extern void invalidate_interrupt21(void);
9031 extern void invalidate_interrupt22(void);
9032 extern void invalidate_interrupt23(void);
9033 extern void invalidate_interrupt24(void);
9034 extern void invalidate_interrupt25(void);
9035 extern void invalidate_interrupt26(void);
9036 extern void invalidate_interrupt27(void);
9037 extern void invalidate_interrupt28(void);
9038 extern void invalidate_interrupt29(void);
9039 extern void invalidate_interrupt30(void);
9040 extern void invalidate_interrupt31(void);
9041 extern void irq_move_cleanup_interrupt(void);
9042 extern void reboot_interrupt(void);
9043 extern void threshold_interrupt(void);
9044 extern void call_function_interrupt(void);
9045 extern void call_function_single_interrupt(void);
9046 extern unsigned long io_apic_irqs;
9047 extern void init_VISWS_APIC_irqs(void);
9048 extern void setup_IO_APIC(void);
9049 extern void disable_IO_APIC(void);
9050 struct io_apic_irq_attr {
9056 static inline __attribute__((always_inline)) void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
9057 int ioapic, int ioapic_pin,
9058 int trigger, int polarity)
9060 irq_attr->ioapic = ioapic;
9061 irq_attr->ioapic_pin = ioapic_pin;
9062 irq_attr->trigger = trigger;
9063 irq_attr->polarity = polarity;
9065 struct irq_2_iommu {
9066 struct intel_iommu *iommu;
9072 struct irq_pin_list *irq_2_pin;
9073 cpumask_var_t domain;
9074 cpumask_var_t old_domain;
9076 u8 move_in_progress : 1;
9078 extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *);
9079 extern void send_cleanup_vector(struct irq_cfg *);
9081 int __ioapic_set_affinity(struct irq_data *, const struct cpumask *,
9082 unsigned int *dest_id);
9083 extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr);
9084 extern void setup_ioapic_dest(void);
9085 extern void enable_IO_APIC(void);
9086 extern atomic_t irq_err_count;
9087 extern atomic_t irq_mis_count;
9088 extern void eisa_set_level_irq(unsigned int irq);
9089 extern void smp_apic_timer_interrupt(struct pt_regs *);
9090 extern void smp_spurious_interrupt(struct pt_regs *);
9091 extern void smp_x86_platform_ipi(struct pt_regs *);
9092 extern void smp_error_interrupt(struct pt_regs *);
9093 extern __attribute__((regparm(0))) void smp_irq_move_cleanup_interrupt(void);
9094 extern void smp_reschedule_interrupt(struct pt_regs *);
9095 extern void smp_call_function_interrupt(struct pt_regs *);
9096 extern void smp_call_function_single_interrupt(struct pt_regs *);
9097 extern void smp_invalidate_interrupt(struct pt_regs *);
9098 extern void (*__attribute__ ((__section__(".init.rodata"))) interrupt[256 -0x20])(void);
9099 typedef int vector_irq_t[256];
9100 extern __attribute__((section(".data..percpu" ""))) __typeof__(vector_irq_t) vector_irq;
9101 extern void setup_vector_irq(int cpu);
9102 extern void lock_vector_lock(void);
9103 extern void unlock_vector_lock(void);
9104 extern void __setup_vector_irq(int cpu);
9106 extern int setup_irq(unsigned int irq, struct irqaction *new);
9107 extern void remove_irq(unsigned int irq, struct irqaction *act);
9108 extern void irq_cpu_online(void);
9109 extern void irq_cpu_offline(void);
9110 extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask);
9111 void irq_move_irq(struct irq_data *data);
9112 void irq_move_masked_irq(struct irq_data *data);
9113 extern int no_irq_affinity;
9114 extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
9115 extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
9116 extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
9117 extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
9118 extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
9119 extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
9120 extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
9121 extern void handle_nested_irq(unsigned int irq);
9122 extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
9123 irqreturn_t action_ret);
9124 extern int noirqdebug_setup(char *str);
9125 extern int can_request_irq(unsigned int irq, unsigned long irqflags);
9126 extern struct irq_chip no_irq_chip;
9127 extern struct irq_chip dummy_irq_chip;
9129 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
9130 irq_flow_handler_t handle, const char *name);
9131 static inline __attribute__((always_inline)) void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
9132 irq_flow_handler_t handle)
9134 irq_set_chip_and_handler_name(irq, chip, handle, ((void *)0));
9137 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
9139 static inline __attribute__((always_inline)) void
9140 irq_set_handler(unsigned int irq, irq_flow_handler_t handle)
9142 __irq_set_handler(irq, handle, 0, ((void *)0));
9144 static inline __attribute__((always_inline)) void
9145 irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
9147 __irq_set_handler(irq, handle, 1, ((void *)0));
9149 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
9150 static inline __attribute__((always_inline)) void irq_set_status_flags(unsigned int irq, unsigned long set)
9152 irq_modify_status(irq, 0, set);
9154 static inline __attribute__((always_inline)) void irq_clear_status_flags(unsigned int irq, unsigned long clr)
9156 irq_modify_status(irq, clr, 0);
9158 static inline __attribute__((always_inline)) void irq_set_noprobe(unsigned int irq)
9160 irq_modify_status(irq, 0, IRQ_NOPROBE);
9162 static inline __attribute__((always_inline)) void irq_set_probe(unsigned int irq)
9164 irq_modify_status(irq, IRQ_NOPROBE, 0);
9166 static inline __attribute__((always_inline)) void irq_set_nothread(unsigned int irq)
9168 irq_modify_status(irq, 0, IRQ_NOTHREAD);
9170 static inline __attribute__((always_inline)) void irq_set_thread(unsigned int irq)
9172 irq_modify_status(irq, IRQ_NOTHREAD, 0);
9174 static inline __attribute__((always_inline)) void irq_set_nested_thread(unsigned int irq, bool nest)
9176 if (__builtin_constant_p(((nest))) ? !!((nest)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/irq.h", .line = 476, }; ______r = !!((nest)); ______f.miss_hit[______r]++; ______r; }))
9177 irq_set_status_flags(irq, IRQ_NESTED_THREAD);
9179 irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
9181 extern unsigned int create_irq_nr(unsigned int irq_want, int node);
9182 extern int create_irq(void);
9183 extern void destroy_irq(unsigned int irq);
9184 extern void dynamic_irq_cleanup(unsigned int irq);
9185 static inline __attribute__((always_inline)) void dynamic_irq_init(unsigned int irq)
9187 dynamic_irq_cleanup(irq);
9189 extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
9190 extern int irq_set_handler_data(unsigned int irq, void *data);
9191 extern int irq_set_chip_data(unsigned int irq, void *data);
9192 extern int irq_set_irq_type(unsigned int irq, unsigned int type);
9193 extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
9194 extern struct irq_data *irq_get_irq_data(unsigned int irq);
9195 static inline __attribute__((always_inline)) struct irq_chip *irq_get_chip(unsigned int irq)
9197 struct irq_data *d = irq_get_irq_data(irq);
9198 return d ? d->chip : ((void *)0);
9200 static inline __attribute__((always_inline)) struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
9204 static inline __attribute__((always_inline)) void *irq_get_chip_data(unsigned int irq)
9206 struct irq_data *d = irq_get_irq_data(irq);
9207 return d ? d->chip_data : ((void *)0);
9209 static inline __attribute__((always_inline)) void *irq_data_get_irq_chip_data(struct irq_data *d)
9211 return d->chip_data;
9213 static inline __attribute__((always_inline)) void *irq_get_handler_data(unsigned int irq)
9215 struct irq_data *d = irq_get_irq_data(irq);
9216 return d ? d->handler_data : ((void *)0);
9218 static inline __attribute__((always_inline)) void *irq_data_get_irq_handler_data(struct irq_data *d)
9220 return d->handler_data;
9222 static inline __attribute__((always_inline)) struct msi_desc *irq_get_msi_desc(unsigned int irq)
9224 struct irq_data *d = irq_get_irq_data(irq);
9225 return d ? d->msi_desc : ((void *)0);
9227 static inline __attribute__((always_inline)) struct msi_desc *irq_data_get_msi(struct irq_data *d)
9231 int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node);
9232 void irq_free_descs(unsigned int irq, unsigned int cnt);
9233 int irq_reserve_irqs(unsigned int from, unsigned int cnt);
9234 static inline __attribute__((always_inline)) int irq_alloc_desc(int node)
9236 return irq_alloc_descs(-1, 0, 1, node);
9238 static inline __attribute__((always_inline)) int irq_alloc_desc_at(unsigned int at, int node)
9240 return irq_alloc_descs(at, at, 1, node);
9242 static inline __attribute__((always_inline)) int irq_alloc_desc_from(unsigned int from, int node)
9244 return irq_alloc_descs(-1, from, 1, node);
9246 static inline __attribute__((always_inline)) void irq_free_desc(unsigned int irq)
9248 irq_free_descs(irq, 1);
9250 static inline __attribute__((always_inline)) int irq_reserve_irq(unsigned int irq)
9252 return irq_reserve_irqs(irq, 1);
9254 struct irq_chip_regs {
9255 unsigned long enable;
9256 unsigned long disable;
9261 unsigned long polarity;
9263 struct irq_chip_type {
9264 struct irq_chip chip;
9265 struct irq_chip_regs regs;
9266 irq_flow_handler_t handler;
9269 struct irq_chip_generic {
9270 raw_spinlock_t lock;
9272 unsigned int irq_base;
9273 unsigned int irq_cnt;
9279 unsigned int num_ct;
9281 struct list_head list;
9282 struct irq_chip_type chip_types[0];
9285 IRQ_GC_INIT_MASK_CACHE = 1 << 0,
9286 IRQ_GC_INIT_NESTED_LOCK = 1 << 1,
9288 void irq_gc_noop(struct irq_data *d);
9289 void irq_gc_mask_disable_reg(struct irq_data *d);
9290 void irq_gc_mask_set_bit(struct irq_data *d);
9291 void irq_gc_mask_clr_bit(struct irq_data *d);
9292 void irq_gc_unmask_enable_reg(struct irq_data *d);
9293 void irq_gc_ack_set_bit(struct irq_data *d);
9294 void irq_gc_ack_clr_bit(struct irq_data *d);
9295 void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
9296 void irq_gc_eoi(struct irq_data *d);
9297 int irq_gc_set_wake(struct irq_data *d, unsigned int on);
9298 struct irq_chip_generic *
9299 irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
9300 void *reg_base, irq_flow_handler_t handler);
9301 void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
9302 enum irq_gc_flags flags, unsigned int clr,
9304 int irq_setup_alt_chip(struct irq_data *d, unsigned int type);
9305 void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
9306 unsigned int clr, unsigned int set);
9307 static inline __attribute__((always_inline)) struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
9309 return ({ const typeof( ((struct irq_chip_type *)0)->chip ) *__mptr = (d->chip); (struct irq_chip_type *)( (char *)__mptr - __builtin_offsetof(struct irq_chip_type,chip) );});
9311 static inline __attribute__((always_inline)) void irq_gc_lock(struct irq_chip_generic *gc)
9313 _raw_spin_lock(&gc->lock);
9315 static inline __attribute__((always_inline)) void irq_gc_unlock(struct irq_chip_generic *gc)
9317 _raw_spin_unlock(&gc->lock);
9320 unsigned int __softirq_pending;
9321 unsigned int __nmi_count;
9322 unsigned int irq0_irqs;
9323 unsigned int apic_timer_irqs;
9324 unsigned int irq_spurious_count;
9325 unsigned int x86_platform_ipis;
9326 unsigned int apic_perf_irqs;
9327 unsigned int apic_irq_work_irqs;
9328 unsigned int irq_resched_count;
9329 unsigned int irq_call_count;
9330 unsigned int irq_tlb_count;
9331 unsigned int irq_thermal_count;
9332 unsigned int irq_threshold_count;
9333 } __attribute__((__aligned__((1 << (6))))) irq_cpustat_t;
9334 extern __attribute__((section(".data..percpu" ""))) __typeof__(irq_cpustat_t) irq_stat __attribute__((__aligned__((1 << (6)))));
9335 extern void ack_bad_irq(unsigned int irq);
9336 extern u64 arch_irq_stat_cpu(unsigned int cpu);
9337 extern u64 arch_irq_stat(void);
9338 extern void synchronize_irq(unsigned int irq);
9340 extern void account_system_vtime(struct task_struct *tsk);
9341 extern void irq_enter(void);
9342 extern void irq_exit(void);
9344 unsigned long st_dev;
9345 unsigned long st_ino;
9346 unsigned short st_mode;
9347 unsigned short st_nlink;
9348 unsigned short st_uid;
9349 unsigned short st_gid;
9350 unsigned long st_rdev;
9351 unsigned long st_size;
9352 unsigned long st_blksize;
9353 unsigned long st_blocks;
9354 unsigned long st_atime;
9355 unsigned long st_atime_nsec;
9356 unsigned long st_mtime;
9357 unsigned long st_mtime_nsec;
9358 unsigned long st_ctime;
9359 unsigned long st_ctime_nsec;
9360 unsigned long __unused4;
9361 unsigned long __unused5;
9364 unsigned long long st_dev;
9365 unsigned char __pad0[4];
9366 unsigned long __st_ino;
9367 unsigned int st_mode;
9368 unsigned int st_nlink;
9369 unsigned long st_uid;
9370 unsigned long st_gid;
9371 unsigned long long st_rdev;
9372 unsigned char __pad3[4];
9374 unsigned long st_blksize;
9375 unsigned long long st_blocks;
9376 unsigned long st_atime;
9377 unsigned long st_atime_nsec;
9378 unsigned long st_mtime;
9379 unsigned int st_mtime_nsec;
9380 unsigned long st_ctime;
9381 unsigned long st_ctime_nsec;
9382 unsigned long long st_ino;
9384 struct __old_kernel_stat {
9385 unsigned short st_dev;
9386 unsigned short st_ino;
9387 unsigned short st_mode;
9388 unsigned short st_nlink;
9389 unsigned short st_uid;
9390 unsigned short st_gid;
9391 unsigned short st_rdev;
9392 unsigned long st_size;
9393 unsigned long st_atime;
9394 unsigned long st_mtime;
9395 unsigned long st_ctime;
9406 struct timespec atime;
9407 struct timespec mtime;
9408 struct timespec ctime;
9409 unsigned long blksize;
9410 unsigned long long blocks;
9413 struct __sysctl_args {
9420 unsigned long __unused[4];
9446 INOTIFY_MAX_USER_INSTANCES=1,
9447 INOTIFY_MAX_USER_WATCHES=2,
9448 INOTIFY_MAX_QUEUED_EVENTS=3
9461 KERN_REALROOTDEV=16,
9462 KERN_SPARC_REBOOT=21,
9466 KERN_PPC_HTABRECLAIM=25,
9467 KERN_PPC_ZEROPAGED=26,
9468 KERN_PPC_POWERSAVE_NAP=27,
9470 KERN_SG_BIG_BUFF=29,
9480 KERN_MAX_THREADS=39,
9485 KERN_SPARC_STOP_A=44,
9487 KERN_OVERFLOWUID=46,
9488 KERN_OVERFLOWGID=47,
9491 KERN_IEEE_EMULATION_WARNINGS=50,
9492 KERN_S390_USER_DEBUG_LOGGING=51,
9493 KERN_CORE_USES_PID=52,
9497 KERN_CORE_PATTERN=56,
9498 KERN_PANIC_ON_OOPS=57,
9500 KERN_HPPA_UNALIGNED=59,
9501 KERN_PRINTK_RATELIMIT=60,
9502 KERN_PRINTK_RATELIMIT_BURST=61,
9504 KERN_NGROUPS_MAX=63,
9505 KERN_SPARC_SCONS_PWROFF=64,
9507 KERN_UNKNOWN_NMI_PANIC=66,
9508 KERN_BOOTLOADER_TYPE=67,
9510 KERN_SETUID_DUMPABLE=69,
9512 KERN_ACPI_VIDEO_FLAGS=71,
9513 KERN_IA64_UNALIGNED=72,
9515 KERN_MAX_LOCK_DEPTH=74,
9516 KERN_NMI_WATCHDOG=75,
9517 KERN_PANIC_ON_NMI=76,
9525 VM_OVERCOMMIT_MEMORY=5,
9531 VM_DIRTY_BACKGROUND=11,
9534 VM_DIRTY_EXPIRE_CS=14,
9535 VM_NR_PDFLUSH_THREADS=15,
9536 VM_OVERCOMMIT_RATIO=16,
9538 VM_HUGETLB_PAGES=18,
9540 VM_LOWMEM_RESERVE_RATIO=20,
9541 VM_MIN_FREE_KBYTES=21,
9542 VM_MAX_MAP_COUNT=22,
9545 VM_HUGETLB_GROUP=25,
9546 VM_VFS_CACHE_PRESSURE=26,
9547 VM_LEGACY_VA_LAYOUT=27,
9548 VM_SWAP_TOKEN_TIMEOUT=28,
9549 VM_DROP_PAGECACHE=29,
9550 VM_PERCPU_PAGELIST_FRACTION=30,
9551 VM_ZONE_RECLAIM_MODE=31,
9584 RANDOM_ENTROPY_COUNT=2,
9585 RANDOM_READ_THRESH=3,
9586 RANDOM_WRITE_THRESH=4,
9598 BUS_ISA_PORT_BASE=2,
9599 BUS_ISA_PORT_SHIFT=3
9603 NET_CORE_WMEM_MAX=1,
9604 NET_CORE_RMEM_MAX=2,
9605 NET_CORE_WMEM_DEFAULT=3,
9606 NET_CORE_RMEM_DEFAULT=4,
9607 NET_CORE_MAX_BACKLOG=6,
9608 NET_CORE_FASTROUTE=7,
9609 NET_CORE_MSG_COST=8,
9610 NET_CORE_MSG_BURST=9,
9611 NET_CORE_OPTMEM_MAX=10,
9612 NET_CORE_HOT_LIST_LENGTH=11,
9613 NET_CORE_DIVERT_VERSION=12,
9614 NET_CORE_NO_CONG_THRESH=13,
9615 NET_CORE_NO_CONG=14,
9616 NET_CORE_LO_CONG=15,
9617 NET_CORE_MOD_CONG=16,
9618 NET_CORE_DEV_WEIGHT=17,
9619 NET_CORE_SOMAXCONN=18,
9621 NET_CORE_AEVENT_ETIME=20,
9622 NET_CORE_AEVENT_RSEQTH=21,
9623 NET_CORE_WARNINGS=22,
9627 NET_UNIX_DESTROY_DELAY=1,
9628 NET_UNIX_DELETE_DELAY=2,
9629 NET_UNIX_MAX_DGRAM_QLEN=3,
9633 NET_NF_CONNTRACK_MAX=1,
9634 NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2,
9635 NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3,
9636 NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4,
9637 NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5,
9638 NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6,
9639 NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7,
9640 NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8,
9641 NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9,
9642 NET_NF_CONNTRACK_UDP_TIMEOUT=10,
9643 NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11,
9644 NET_NF_CONNTRACK_ICMP_TIMEOUT=12,
9645 NET_NF_CONNTRACK_GENERIC_TIMEOUT=13,
9646 NET_NF_CONNTRACK_BUCKETS=14,
9647 NET_NF_CONNTRACK_LOG_INVALID=15,
9648 NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16,
9649 NET_NF_CONNTRACK_TCP_LOOSE=17,
9650 NET_NF_CONNTRACK_TCP_BE_LIBERAL=18,
9651 NET_NF_CONNTRACK_TCP_MAX_RETRANS=19,
9652 NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20,
9653 NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21,
9654 NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22,
9655 NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23,
9656 NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24,
9657 NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25,
9658 NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26,
9659 NET_NF_CONNTRACK_COUNT=27,
9660 NET_NF_CONNTRACK_ICMPV6_TIMEOUT=28,
9661 NET_NF_CONNTRACK_FRAG6_TIMEOUT=29,
9662 NET_NF_CONNTRACK_FRAG6_LOW_THRESH=30,
9663 NET_NF_CONNTRACK_FRAG6_HIGH_THRESH=31,
9664 NET_NF_CONNTRACK_CHECKSUM=32,
9673 NET_IPV4_FIB_HASH=19,
9674 NET_IPV4_NETFILTER=20,
9675 NET_IPV4_TCP_TIMESTAMPS=33,
9676 NET_IPV4_TCP_WINDOW_SCALING=34,
9677 NET_IPV4_TCP_SACK=35,
9678 NET_IPV4_TCP_RETRANS_COLLAPSE=36,
9679 NET_IPV4_DEFAULT_TTL=37,
9680 NET_IPV4_AUTOCONFIG=38,
9681 NET_IPV4_NO_PMTU_DISC=39,
9682 NET_IPV4_TCP_SYN_RETRIES=40,
9683 NET_IPV4_IPFRAG_HIGH_THRESH=41,
9684 NET_IPV4_IPFRAG_LOW_THRESH=42,
9685 NET_IPV4_IPFRAG_TIME=43,
9686 NET_IPV4_TCP_MAX_KA_PROBES=44,
9687 NET_IPV4_TCP_KEEPALIVE_TIME=45,
9688 NET_IPV4_TCP_KEEPALIVE_PROBES=46,
9689 NET_IPV4_TCP_RETRIES1=47,
9690 NET_IPV4_TCP_RETRIES2=48,
9691 NET_IPV4_TCP_FIN_TIMEOUT=49,
9692 NET_IPV4_IP_MASQ_DEBUG=50,
9693 NET_TCP_SYNCOOKIES=51,
9696 NET_TCP_SYN_TAILDROP=54,
9697 NET_TCP_MAX_SYN_BACKLOG=55,
9698 NET_IPV4_LOCAL_PORT_RANGE=56,
9699 NET_IPV4_ICMP_ECHO_IGNORE_ALL=57,
9700 NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS=58,
9701 NET_IPV4_ICMP_SOURCEQUENCH_RATE=59,
9702 NET_IPV4_ICMP_DESTUNREACH_RATE=60,
9703 NET_IPV4_ICMP_TIMEEXCEED_RATE=61,
9704 NET_IPV4_ICMP_PARAMPROB_RATE=62,
9705 NET_IPV4_ICMP_ECHOREPLY_RATE=63,
9706 NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES=64,
9707 NET_IPV4_IGMP_MAX_MEMBERSHIPS=65,
9708 NET_TCP_TW_RECYCLE=66,
9709 NET_IPV4_ALWAYS_DEFRAG=67,
9710 NET_IPV4_TCP_KEEPALIVE_INTVL=68,
9711 NET_IPV4_INET_PEER_THRESHOLD=69,
9712 NET_IPV4_INET_PEER_MINTTL=70,
9713 NET_IPV4_INET_PEER_MAXTTL=71,
9714 NET_IPV4_INET_PEER_GC_MINTIME=72,
9715 NET_IPV4_INET_PEER_GC_MAXTIME=73,
9716 NET_TCP_ORPHAN_RETRIES=74,
9717 NET_TCP_ABORT_ON_OVERFLOW=75,
9718 NET_TCP_SYNACK_RETRIES=76,
9719 NET_TCP_MAX_ORPHANS=77,
9720 NET_TCP_MAX_TW_BUCKETS=78,
9722 NET_TCP_REORDERING=80,
9729 NET_TCP_ADV_WIN_SCALE=87,
9730 NET_IPV4_NONLOCAL_BIND=88,
9731 NET_IPV4_ICMP_RATELIMIT=89,
9732 NET_IPV4_ICMP_RATEMASK=90,
9733 NET_TCP_TW_REUSE=91,
9735 NET_TCP_LOW_LATENCY=93,
9736 NET_IPV4_IPFRAG_SECRET_INTERVAL=94,
9737 NET_IPV4_IGMP_MAX_MSF=96,
9738 NET_TCP_NO_METRICS_SAVE=97,
9739 NET_TCP_DEFAULT_WIN_SCALE=105,
9740 NET_TCP_MODERATE_RCVBUF=106,
9741 NET_TCP_TSO_WIN_DIVISOR=107,
9742 NET_TCP_BIC_BETA=108,
9743 NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR=109,
9744 NET_TCP_CONG_CONTROL=110,
9746 NET_IPV4_IPFRAG_MAX_DIST=112,
9747 NET_TCP_MTU_PROBING=113,
9748 NET_TCP_BASE_MSS=114,
9749 NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS=115,
9750 NET_TCP_DMA_COPYBREAK=116,
9751 NET_TCP_SLOW_START_AFTER_IDLE=117,
9752 NET_CIPSOV4_CACHE_ENABLE=118,
9753 NET_CIPSOV4_CACHE_BUCKET_SIZE=119,
9754 NET_CIPSOV4_RBM_OPTFMT=120,
9755 NET_CIPSOV4_RBM_STRICTVALID=121,
9756 NET_TCP_AVAIL_CONG_CONTROL=122,
9757 NET_TCP_ALLOWED_CONG_CONTROL=123,
9758 NET_TCP_MAX_SSTHRESH=124,
9759 NET_TCP_FRTO_RESPONSE=125,
9762 NET_IPV4_ROUTE_FLUSH=1,
9763 NET_IPV4_ROUTE_MIN_DELAY=2,
9764 NET_IPV4_ROUTE_MAX_DELAY=3,
9765 NET_IPV4_ROUTE_GC_THRESH=4,
9766 NET_IPV4_ROUTE_MAX_SIZE=5,
9767 NET_IPV4_ROUTE_GC_MIN_INTERVAL=6,
9768 NET_IPV4_ROUTE_GC_TIMEOUT=7,
9769 NET_IPV4_ROUTE_GC_INTERVAL=8,
9770 NET_IPV4_ROUTE_REDIRECT_LOAD=9,
9771 NET_IPV4_ROUTE_REDIRECT_NUMBER=10,
9772 NET_IPV4_ROUTE_REDIRECT_SILENCE=11,
9773 NET_IPV4_ROUTE_ERROR_COST=12,
9774 NET_IPV4_ROUTE_ERROR_BURST=13,
9775 NET_IPV4_ROUTE_GC_ELASTICITY=14,
9776 NET_IPV4_ROUTE_MTU_EXPIRES=15,
9777 NET_IPV4_ROUTE_MIN_PMTU=16,
9778 NET_IPV4_ROUTE_MIN_ADVMSS=17,
9779 NET_IPV4_ROUTE_SECRET_INTERVAL=18,
9780 NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS=19,
9784 NET_PROTO_CONF_ALL=-2,
9785 NET_PROTO_CONF_DEFAULT=-3
9789 NET_IPV4_CONF_FORWARDING=1,
9790 NET_IPV4_CONF_MC_FORWARDING=2,
9791 NET_IPV4_CONF_PROXY_ARP=3,
9792 NET_IPV4_CONF_ACCEPT_REDIRECTS=4,
9793 NET_IPV4_CONF_SECURE_REDIRECTS=5,
9794 NET_IPV4_CONF_SEND_REDIRECTS=6,
9795 NET_IPV4_CONF_SHARED_MEDIA=7,
9796 NET_IPV4_CONF_RP_FILTER=8,
9797 NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE=9,
9798 NET_IPV4_CONF_BOOTP_RELAY=10,
9799 NET_IPV4_CONF_LOG_MARTIANS=11,
9800 NET_IPV4_CONF_TAG=12,
9801 NET_IPV4_CONF_ARPFILTER=13,
9802 NET_IPV4_CONF_MEDIUM_ID=14,
9803 NET_IPV4_CONF_NOXFRM=15,
9804 NET_IPV4_CONF_NOPOLICY=16,
9805 NET_IPV4_CONF_FORCE_IGMP_VERSION=17,
9806 NET_IPV4_CONF_ARP_ANNOUNCE=18,
9807 NET_IPV4_CONF_ARP_IGNORE=19,
9808 NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
9809 NET_IPV4_CONF_ARP_ACCEPT=21,
9810 NET_IPV4_CONF_ARP_NOTIFY=22,
9814 NET_IPV4_NF_CONNTRACK_MAX=1,
9815 NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2,
9816 NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3,
9817 NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4,
9818 NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5,
9819 NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6,
9820 NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7,
9821 NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8,
9822 NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9,
9823 NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT=10,
9824 NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11,
9825 NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT=12,
9826 NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT=13,
9827 NET_IPV4_NF_CONNTRACK_BUCKETS=14,
9828 NET_IPV4_NF_CONNTRACK_LOG_INVALID=15,
9829 NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16,
9830 NET_IPV4_NF_CONNTRACK_TCP_LOOSE=17,
9831 NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL=18,
9832 NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS=19,
9833 NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20,
9834 NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21,
9835 NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22,
9836 NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23,
9837 NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24,
9838 NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25,
9839 NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26,
9840 NET_IPV4_NF_CONNTRACK_COUNT=27,
9841 NET_IPV4_NF_CONNTRACK_CHECKSUM=28,
9848 NET_IPV6_BINDV6ONLY=20,
9849 NET_IPV6_IP6FRAG_HIGH_THRESH=21,
9850 NET_IPV6_IP6FRAG_LOW_THRESH=22,
9851 NET_IPV6_IP6FRAG_TIME=23,
9852 NET_IPV6_IP6FRAG_SECRET_INTERVAL=24,
9853 NET_IPV6_MLD_MAX_MSF=25,
9856 NET_IPV6_ROUTE_FLUSH=1,
9857 NET_IPV6_ROUTE_GC_THRESH=2,
9858 NET_IPV6_ROUTE_MAX_SIZE=3,
9859 NET_IPV6_ROUTE_GC_MIN_INTERVAL=4,
9860 NET_IPV6_ROUTE_GC_TIMEOUT=5,
9861 NET_IPV6_ROUTE_GC_INTERVAL=6,
9862 NET_IPV6_ROUTE_GC_ELASTICITY=7,
9863 NET_IPV6_ROUTE_MTU_EXPIRES=8,
9864 NET_IPV6_ROUTE_MIN_ADVMSS=9,
9865 NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS=10
9868 NET_IPV6_FORWARDING=1,
9869 NET_IPV6_HOP_LIMIT=2,
9871 NET_IPV6_ACCEPT_RA=4,
9872 NET_IPV6_ACCEPT_REDIRECTS=5,
9873 NET_IPV6_AUTOCONF=6,
9874 NET_IPV6_DAD_TRANSMITS=7,
9875 NET_IPV6_RTR_SOLICITS=8,
9876 NET_IPV6_RTR_SOLICIT_INTERVAL=9,
9877 NET_IPV6_RTR_SOLICIT_DELAY=10,
9878 NET_IPV6_USE_TEMPADDR=11,
9879 NET_IPV6_TEMP_VALID_LFT=12,
9880 NET_IPV6_TEMP_PREFERED_LFT=13,
9881 NET_IPV6_REGEN_MAX_RETRY=14,
9882 NET_IPV6_MAX_DESYNC_FACTOR=15,
9883 NET_IPV6_MAX_ADDRESSES=16,
9884 NET_IPV6_FORCE_MLD_VERSION=17,
9885 NET_IPV6_ACCEPT_RA_DEFRTR=18,
9886 NET_IPV6_ACCEPT_RA_PINFO=19,
9887 NET_IPV6_ACCEPT_RA_RTR_PREF=20,
9888 NET_IPV6_RTR_PROBE_INTERVAL=21,
9889 NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN=22,
9890 NET_IPV6_PROXY_NDP=23,
9891 NET_IPV6_ACCEPT_SOURCE_ROUTE=25,
9895 NET_IPV6_ICMP_RATELIMIT=1
9898 NET_NEIGH_MCAST_SOLICIT=1,
9899 NET_NEIGH_UCAST_SOLICIT=2,
9900 NET_NEIGH_APP_SOLICIT=3,
9901 NET_NEIGH_RETRANS_TIME=4,
9902 NET_NEIGH_REACHABLE_TIME=5,
9903 NET_NEIGH_DELAY_PROBE_TIME=6,
9904 NET_NEIGH_GC_STALE_TIME=7,
9905 NET_NEIGH_UNRES_QLEN=8,
9906 NET_NEIGH_PROXY_QLEN=9,
9907 NET_NEIGH_ANYCAST_DELAY=10,
9908 NET_NEIGH_PROXY_DELAY=11,
9909 NET_NEIGH_LOCKTIME=12,
9910 NET_NEIGH_GC_INTERVAL=13,
9911 NET_NEIGH_GC_THRESH1=14,
9912 NET_NEIGH_GC_THRESH2=15,
9913 NET_NEIGH_GC_THRESH3=16,
9914 NET_NEIGH_RETRANS_TIME_MS=17,
9915 NET_NEIGH_REACHABLE_TIME_MS=18,
9921 NET_IPX_PPROP_BROADCASTING=1,
9922 NET_IPX_FORWARDING=2
9932 NET_LLC_STATION_ACK_TIMEOUT=1,
9935 NET_LLC2_ACK_TIMEOUT=1,
9936 NET_LLC2_P_TIMEOUT=2,
9937 NET_LLC2_REJ_TIMEOUT=3,
9938 NET_LLC2_BUSY_TIMEOUT=4,
9941 NET_ATALK_AARP_EXPIRY_TIME=1,
9942 NET_ATALK_AARP_TICK_TIME=2,
9943 NET_ATALK_AARP_RETRANSMIT_LIMIT=3,
9944 NET_ATALK_AARP_RESOLVE_TIME=4
9947 NET_NETROM_DEFAULT_PATH_QUALITY=1,
9948 NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER=2,
9949 NET_NETROM_NETWORK_TTL_INITIALISER=3,
9950 NET_NETROM_TRANSPORT_TIMEOUT=4,
9951 NET_NETROM_TRANSPORT_MAXIMUM_TRIES=5,
9952 NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY=6,
9953 NET_NETROM_TRANSPORT_BUSY_DELAY=7,
9954 NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE=8,
9955 NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT=9,
9956 NET_NETROM_ROUTING_CONTROL=10,
9957 NET_NETROM_LINK_FAILS_COUNT=11,
9961 NET_AX25_IP_DEFAULT_MODE=1,
9962 NET_AX25_DEFAULT_MODE=2,
9963 NET_AX25_BACKOFF_TYPE=3,
9964 NET_AX25_CONNECT_MODE=4,
9965 NET_AX25_STANDARD_WINDOW=5,
9966 NET_AX25_EXTENDED_WINDOW=6,
9967 NET_AX25_T1_TIMEOUT=7,
9968 NET_AX25_T2_TIMEOUT=8,
9969 NET_AX25_T3_TIMEOUT=9,
9970 NET_AX25_IDLE_TIMEOUT=10,
9973 NET_AX25_PROTOCOL=13,
9974 NET_AX25_DAMA_SLAVE_TIMEOUT=14
9977 NET_ROSE_RESTART_REQUEST_TIMEOUT=1,
9978 NET_ROSE_CALL_REQUEST_TIMEOUT=2,
9979 NET_ROSE_RESET_REQUEST_TIMEOUT=3,
9980 NET_ROSE_CLEAR_REQUEST_TIMEOUT=4,
9981 NET_ROSE_ACK_HOLD_BACK_TIMEOUT=5,
9982 NET_ROSE_ROUTING_CONTROL=6,
9983 NET_ROSE_LINK_FAIL_TIMEOUT=7,
9985 NET_ROSE_WINDOW_SIZE=9,
9986 NET_ROSE_NO_ACTIVITY_TIMEOUT=10
9989 NET_X25_RESTART_REQUEST_TIMEOUT=1,
9990 NET_X25_CALL_REQUEST_TIMEOUT=2,
9991 NET_X25_RESET_REQUEST_TIMEOUT=3,
9992 NET_X25_CLEAR_REQUEST_TIMEOUT=4,
9993 NET_X25_ACK_HOLD_BACK_TIMEOUT=5,
9998 NET_TR_RIF_TIMEOUT=1
10001 NET_DECNET_NODE_TYPE = 1,
10002 NET_DECNET_NODE_ADDRESS = 2,
10003 NET_DECNET_NODE_NAME = 3,
10004 NET_DECNET_DEFAULT_DEVICE = 4,
10005 NET_DECNET_TIME_WAIT = 5,
10006 NET_DECNET_DN_COUNT = 6,
10007 NET_DECNET_DI_COUNT = 7,
10008 NET_DECNET_DR_COUNT = 8,
10009 NET_DECNET_DST_GC_INTERVAL = 9,
10010 NET_DECNET_CONF = 10,
10011 NET_DECNET_NO_FC_MAX_CWND = 11,
10012 NET_DECNET_MEM = 12,
10013 NET_DECNET_RMEM = 13,
10014 NET_DECNET_WMEM = 14,
10015 NET_DECNET_DEBUG_LEVEL = 255
10018 NET_DECNET_CONF_LOOPBACK = -2,
10019 NET_DECNET_CONF_DDCMP = -3,
10020 NET_DECNET_CONF_PPP = -4,
10021 NET_DECNET_CONF_X25 = -5,
10022 NET_DECNET_CONF_GRE = -6,
10023 NET_DECNET_CONF_ETHER = -7
10026 NET_DECNET_CONF_DEV_PRIORITY = 1,
10027 NET_DECNET_CONF_DEV_T1 = 2,
10028 NET_DECNET_CONF_DEV_T2 = 3,
10029 NET_DECNET_CONF_DEV_T3 = 4,
10030 NET_DECNET_CONF_DEV_FORWARDING = 5,
10031 NET_DECNET_CONF_DEV_BLKSIZE = 6,
10032 NET_DECNET_CONF_DEV_STATE = 7
10035 NET_SCTP_RTO_INITIAL = 1,
10036 NET_SCTP_RTO_MIN = 2,
10037 NET_SCTP_RTO_MAX = 3,
10038 NET_SCTP_RTO_ALPHA = 4,
10039 NET_SCTP_RTO_BETA = 5,
10040 NET_SCTP_VALID_COOKIE_LIFE = 6,
10041 NET_SCTP_ASSOCIATION_MAX_RETRANS = 7,
10042 NET_SCTP_PATH_MAX_RETRANS = 8,
10043 NET_SCTP_MAX_INIT_RETRANSMITS = 9,
10044 NET_SCTP_HB_INTERVAL = 10,
10045 NET_SCTP_PRESERVE_ENABLE = 11,
10046 NET_SCTP_MAX_BURST = 12,
10047 NET_SCTP_ADDIP_ENABLE = 13,
10048 NET_SCTP_PRSCTP_ENABLE = 14,
10049 NET_SCTP_SNDBUF_POLICY = 15,
10050 NET_SCTP_SACK_TIMEOUT = 16,
10051 NET_SCTP_RCVBUF_POLICY = 17,
10054 NET_BRIDGE_NF_CALL_ARPTABLES = 1,
10055 NET_BRIDGE_NF_CALL_IPTABLES = 2,
10056 NET_BRIDGE_NF_CALL_IP6TABLES = 3,
10057 NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4,
10058 NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5,
10061 NET_IRDA_DISCOVERY=1,
10062 NET_IRDA_DEVNAME=2,
10064 NET_IRDA_FAST_POLL=4,
10065 NET_IRDA_DISCOVERY_SLOTS=5,
10066 NET_IRDA_DISCOVERY_TIMEOUT=6,
10067 NET_IRDA_SLOT_TIMEOUT=7,
10068 NET_IRDA_MAX_BAUD_RATE=8,
10069 NET_IRDA_MIN_TX_TURN_TIME=9,
10070 NET_IRDA_MAX_TX_DATA_SIZE=10,
10071 NET_IRDA_MAX_TX_WINDOW=11,
10072 NET_IRDA_MAX_NOREPLY_TIME=12,
10073 NET_IRDA_WARN_NOREPLY_TIME=13,
10074 NET_IRDA_LAP_KEEPALIVE_TIME=14,
10105 FS_DQ_CACHE_HITS = 5,
10106 FS_DQ_ALLOCATED = 6,
10109 FS_DQ_WARNINGS = 9,
10122 DEV_CDROM_AUTOCLOSE=2,
10123 DEV_CDROM_AUTOEJECT=3,
10126 DEV_CDROM_CHECK_MEDIA=6
10129 DEV_PARPORT_DEFAULT=-3
10132 DEV_RAID_SPEED_LIMIT_MIN=1,
10133 DEV_RAID_SPEED_LIMIT_MAX=2
10136 DEV_PARPORT_DEFAULT_TIMESLICE=1,
10137 DEV_PARPORT_DEFAULT_SPINTIME=2
10140 DEV_PARPORT_SPINTIME=1,
10141 DEV_PARPORT_BASE_ADDR=2,
10144 DEV_PARPORT_MODES=5,
10145 DEV_PARPORT_DEVICES=6,
10146 DEV_PARPORT_AUTOPROBE=16
10149 DEV_PARPORT_DEVICES_ACTIVE=-3,
10152 DEV_PARPORT_DEVICE_TIMESLICE=1,
10155 DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES=1,
10156 DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES=2,
10157 DEV_MAC_HID_MOUSE_BUTTON_EMULATION=3,
10158 DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE=4,
10159 DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE=5,
10160 DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES=6
10163 DEV_SCSI_LOGGING_LEVEL=1,
10166 DEV_IPMI_POWEROFF_POWERCYCLE=1,
10170 ABI_DEFHANDLER_COFF=1,
10171 ABI_DEFHANDLER_ELF=2,
10172 ABI_DEFHANDLER_LCALL7=3,
10173 ABI_DEFHANDLER_LIBCSO=4,
10175 ABI_FAKE_UTSNAME=6,
10177 extern void rcutorture_record_test_transition(void);
10178 extern void rcutorture_record_progress(unsigned long vernum);
10180 struct rcu_head *next;
10181 void (*func)(struct rcu_head *head);
10183 extern void call_rcu_sched(struct rcu_head *head,
10184 void (*func)(struct rcu_head *rcu));
10185 extern void synchronize_sched(void);
10186 extern void rcu_barrier_bh(void);
10187 extern void rcu_barrier_sched(void);
10188 static inline __attribute__((always_inline)) void __rcu_read_lock_bh(void)
10190 local_bh_disable();
10192 static inline __attribute__((always_inline)) void __rcu_read_unlock_bh(void)
10196 extern void __rcu_read_lock(void);
10197 extern void __rcu_read_unlock(void);
10198 void synchronize_rcu(void);
10199 extern void rcu_sched_qs(int cpu);
10200 extern void rcu_bh_qs(int cpu);
10201 extern void rcu_check_callbacks(int cpu, int user);
10202 struct notifier_block;
10203 static inline __attribute__((always_inline)) void rcu_enter_nohz(void)
10206 static inline __attribute__((always_inline)) void rcu_exit_nohz(void)
10209 extern void rcu_init(void);
10210 extern void rcu_note_context_switch(int cpu);
10211 extern int rcu_needs_cpu(int cpu);
10212 extern void rcu_cpu_stall_reset(void);
10213 static inline __attribute__((always_inline)) void rcu_virt_note_context_switch(int cpu)
10215 rcu_note_context_switch(cpu);
10217 extern void exit_rcu(void);
10218 extern void synchronize_rcu_bh(void);
10219 extern void synchronize_sched_expedited(void);
10220 extern void synchronize_rcu_expedited(void);
10221 static inline __attribute__((always_inline)) void synchronize_rcu_bh_expedited(void)
10223 synchronize_sched_expedited();
10225 extern void rcu_barrier(void);
10226 extern unsigned long rcutorture_testseq;
10227 extern unsigned long rcutorture_vernum;
10228 extern long rcu_batches_completed(void);
10229 extern long rcu_batches_completed_bh(void);
10230 extern long rcu_batches_completed_sched(void);
10231 extern void rcu_force_quiescent_state(void);
10232 extern void rcu_bh_force_quiescent_state(void);
10233 extern void rcu_sched_force_quiescent_state(void);
10234 static inline __attribute__((always_inline)) int rcu_blocking_is_gp(void)
10236 return cpumask_weight(cpu_online_mask) == 1;
10238 extern void rcu_scheduler_starting(void);
10239 extern int rcu_scheduler_active __attribute__((__section__(".data..read_mostly")));
10240 static inline __attribute__((always_inline)) void init_rcu_head_on_stack(struct rcu_head *head)
10243 static inline __attribute__((always_inline)) void destroy_rcu_head_on_stack(struct rcu_head *head)
10246 extern struct lockdep_map rcu_lock_map;
10247 extern struct lockdep_map rcu_bh_lock_map;
10248 extern struct lockdep_map rcu_sched_lock_map;
10249 extern int debug_lockdep_rcu_enabled(void);
10250 static inline __attribute__((always_inline)) int rcu_read_lock_held(void)
10252 if (__builtin_constant_p(((!debug_lockdep_rcu_enabled()))) ? !!((!debug_lockdep_rcu_enabled())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 216, }; ______r = !!((!debug_lockdep_rcu_enabled())); ______f.miss_hit[______r]++; ______r; }))
10254 return lock_is_held(&rcu_lock_map);
10256 extern int rcu_read_lock_bh_held(void);
10257 static inline __attribute__((always_inline)) int rcu_read_lock_sched_held(void)
10259 int lockdep_opinion = 0;
10260 if (__builtin_constant_p(((!debug_lockdep_rcu_enabled()))) ? !!((!debug_lockdep_rcu_enabled())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 247, }; ______r = !!((!debug_lockdep_rcu_enabled())); ______f.miss_hit[______r]++; ______r; }))
10262 if (__builtin_constant_p(((debug_locks))) ? !!((debug_locks)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 249, }; ______r = !!((debug_locks)); ______f.miss_hit[______r]++; ______r; }))
10263 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
10264 return lockdep_opinion || (current_thread_info()->preempt_count) != 0 || ({ unsigned long _flags; do { ({ unsigned long __dummy; typeof(_flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _flags = arch_local_save_flags(); } while (0); ({ ({ unsigned long __dummy; typeof(_flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(_flags); }); });
10266 static inline __attribute__((always_inline)) void rcu_read_lock(void)
10270 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; }));
10272 static inline __attribute__((always_inline)) void rcu_read_unlock(void)
10274 lock_release(&rcu_lock_map, 1, ({ __label__ __here; __here: (unsigned long)&&__here; }));
10276 __rcu_read_unlock();
10278 static inline __attribute__((always_inline)) void rcu_read_lock_bh(void)
10280 __rcu_read_lock_bh();
10282 lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; }));
10284 static inline __attribute__((always_inline)) void rcu_read_unlock_bh(void)
10286 lock_release(&rcu_bh_lock_map, 1, ({ __label__ __here; __here: (unsigned long)&&__here; }));
10288 __rcu_read_unlock_bh();
10290 static inline __attribute__((always_inline)) void rcu_read_lock_sched(void)
10292 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
10294 lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; }));
10296 static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void rcu_read_lock_sched_notrace(void)
10298 do { do { (current_thread_info()->preempt_count) += (1); } while (0); __asm__ __volatile__("": : :"memory"); } while (0);
10301 static inline __attribute__((always_inline)) void rcu_read_unlock_sched(void)
10303 lock_release(&rcu_sched_lock_map, 1, ({ __label__ __here; __here: (unsigned long)&&__here; }));
10305 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 681, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 681, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 681, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 681, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
10307 static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void rcu_read_unlock_sched_notrace(void)
10310 do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 688, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 688, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 688, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 688, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
10312 struct rcu_synchronize {
10313 struct rcu_head head;
10314 struct completion completion;
10316 extern void wakeme_after_rcu(struct rcu_head *head);
10317 extern void call_rcu(struct rcu_head *head,
10318 void (*func)(struct rcu_head *head));
10319 extern void call_rcu_bh(struct rcu_head *head,
10320 void (*func)(struct rcu_head *head));
10321 static inline __attribute__((always_inline)) void debug_rcu_head_queue(struct rcu_head *head)
10324 static inline __attribute__((always_inline)) void debug_rcu_head_unqueue(struct rcu_head *head)
10327 static inline __attribute__((always_inline)) __attribute__((always_inline)) bool __is_kfree_rcu_offset(unsigned long offset)
10329 return offset < 4096;
10331 static inline __attribute__((always_inline)) __attribute__((always_inline))
10332 void __kfree_rcu(struct rcu_head *head, unsigned long offset)
10334 typedef void (*rcu_callback)(struct rcu_head *);
10335 do { ((void)sizeof(char[1 - 2*!!(!__builtin_constant_p(offset))])); if (__builtin_constant_p(((!__builtin_constant_p(offset)))) ? !!((!__builtin_constant_p(offset))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 822, }; ______r = !!((!__builtin_constant_p(offset))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0);
10336 do { ((void)sizeof(char[1 - 2*!!(!__is_kfree_rcu_offset(offset))])); if (__builtin_constant_p(((!__is_kfree_rcu_offset(offset)))) ? !!((!__is_kfree_rcu_offset(offset))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 825, }; ______r = !!((!__is_kfree_rcu_offset(offset))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0);
10337 call_rcu(head, (rcu_callback)offset);
10339 extern void kfree(const void *);
10340 static inline __attribute__((always_inline)) void __rcu_reclaim(struct rcu_head *head)
10342 unsigned long offset = (unsigned long)head->func;
10343 if (__builtin_constant_p(((__is_kfree_rcu_offset(offset)))) ? !!((__is_kfree_rcu_offset(offset))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 836, }; ______r = !!((__is_kfree_rcu_offset(offset))); ______f.miss_hit[______r]++; ______r; }))
10344 kfree((void *)head - offset);
10350 struct ctl_table_root;
10351 struct ctl_table_set {
10352 struct list_head list;
10353 struct ctl_table_set *parent;
10354 int (*is_seen)(struct ctl_table_set *);
10356 extern void setup_sysctl_set(struct ctl_table_set *p,
10357 struct ctl_table_set *parent,
10358 int (*is_seen)(struct ctl_table_set *));
10359 struct ctl_table_header;
10360 extern void sysctl_head_get(struct ctl_table_header *);
10361 extern void sysctl_head_put(struct ctl_table_header *);
10362 extern int sysctl_is_seen(struct ctl_table_header *);
10363 extern struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *);
10364 extern struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev);
10365 extern struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces,
10366 struct ctl_table_header *prev);
10367 extern void sysctl_head_finish(struct ctl_table_header *prev);
10368 extern int sysctl_perm(struct ctl_table_root *root,
10369 struct ctl_table *table, int op);
10370 typedef struct ctl_table ctl_table;
10371 typedef int proc_handler (struct ctl_table *ctl, int write,
10372 void *buffer, size_t *lenp, loff_t *ppos);
10373 extern int proc_dostring(struct ctl_table *, int,
10374 void *, size_t *, loff_t *);
10375 extern int proc_dointvec(struct ctl_table *, int,
10376 void *, size_t *, loff_t *);
10377 extern int proc_dointvec_minmax(struct ctl_table *, int,
10378 void *, size_t *, loff_t *);
10379 extern int proc_dointvec_jiffies(struct ctl_table *, int,
10380 void *, size_t *, loff_t *);
10381 extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
10382 void *, size_t *, loff_t *);
10383 extern int proc_dointvec_ms_jiffies(struct ctl_table *, int,
10384 void *, size_t *, loff_t *);
10385 extern int proc_doulongvec_minmax(struct ctl_table *, int,
10386 void *, size_t *, loff_t *);
10387 extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int,
10388 void *, size_t *, loff_t *);
10389 extern int proc_do_large_bitmap(struct ctl_table *, int,
10390 void *, size_t *, loff_t *);
10393 const char *procname;
10397 struct ctl_table *child;
10398 struct ctl_table *parent;
10399 proc_handler *proc_handler;
10403 struct ctl_table_root {
10404 struct list_head root_list;
10405 struct ctl_table_set default_set;
10406 struct ctl_table_set *(*lookup)(struct ctl_table_root *root,
10407 struct nsproxy *namespaces);
10408 int (*permissions)(struct ctl_table_root *root,
10409 struct nsproxy *namespaces, struct ctl_table *table);
10411 struct ctl_table_header
10415 struct ctl_table *ctl_table;
10416 struct list_head ctl_entry;
10420 struct rcu_head rcu;
10422 struct completion *unregistering;
10423 struct ctl_table *ctl_table_arg;
10424 struct ctl_table_root *root;
10425 struct ctl_table_set *set;
10426 struct ctl_table *attached_by;
10427 struct ctl_table *attached_to;
10428 struct ctl_table_header *parent;
10431 const char *procname;
10433 void register_sysctl_root(struct ctl_table_root *root);
10434 struct ctl_table_header *__register_sysctl_paths(
10435 struct ctl_table_root *root, struct nsproxy *namespaces,
10436 const struct ctl_path *path, struct ctl_table *table);
10437 struct ctl_table_header *register_sysctl_table(struct ctl_table * table);
10438 struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
10439 struct ctl_table *table);
10440 void unregister_sysctl_table(struct ctl_table_header * table);
10441 int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table);
10442 extern char modprobe_path[];
10443 extern int __request_module(bool wait, const char *name, ...)
10444 __attribute__((format(printf, 2, 3)));
10452 struct subprocess_info {
10453 struct work_struct work;
10454 struct completion *complete;
10458 enum umh_wait wait;
10460 int (*init)(struct subprocess_info *info, struct cred *new);
10461 void (*cleanup)(struct subprocess_info *info);
10464 struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
10465 char **envp, gfp_t gfp_mask);
10466 void call_usermodehelper_setfns(struct subprocess_info *info,
10467 int (*init)(struct subprocess_info *info, struct cred *new),
10468 void (*cleanup)(struct subprocess_info *info),
10470 int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait);
10471 void call_usermodehelper_freeinfo(struct subprocess_info *info);
10472 static inline __attribute__((always_inline)) int
10473 call_usermodehelper_fns(char *path, char **argv, char **envp,
10474 enum umh_wait wait,
10475 int (*init)(struct subprocess_info *info, struct cred *new),
10476 void (*cleanup)(struct subprocess_info *), void *data)
10478 struct subprocess_info *info;
10479 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? ((( gfp_t)0x20u)) : ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u));
10480 info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
10481 if (__builtin_constant_p(((info == ((void *)0)))) ? !!((info == ((void *)0))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kmod.h", .line = 98, }; ______r = !!((info == ((void *)0))); ______f.miss_hit[______r]++; ______r; }))
10483 call_usermodehelper_setfns(info, init, cleanup, data);
10484 return call_usermodehelper_exec(info, wait);
10486 static inline __attribute__((always_inline)) int
10487 call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait)
10489 return call_usermodehelper_fns(path, argv, envp, wait,
10490 ((void *)0), ((void *)0), ((void *)0));
10492 extern struct ctl_table usermodehelper_table[];
10493 extern void usermodehelper_init(void);
10494 extern int usermodehelper_disable(void);
10495 extern void usermodehelper_enable(void);
10496 extern bool usermodehelper_is_disabled(void);
10497 struct user_i387_struct {
10507 struct user_fxsr_struct {
10508 unsigned short cwd;
10509 unsigned short swd;
10510 unsigned short twd;
10511 unsigned short fop;
10519 long xmm_space[32];
10522 struct user_regs_struct {
10534 unsigned long orig_ax;
10537 unsigned long flags;
10542 struct user_regs_struct regs;
10544 struct user_i387_struct i387;
10545 unsigned long int u_tsize;
10546 unsigned long int u_dsize;
10547 unsigned long int u_ssize;
10548 unsigned long start_code;
10549 unsigned long start_stack;
10552 unsigned long u_ar0;
10553 struct user_i387_struct *u_fpstate;
10554 unsigned long magic;
10558 struct user_ymmh_regs {
10559 __u32 ymmh_space[64];
10561 struct user_xsave_hdr {
10563 __u64 reserved1[2];
10564 __u64 reserved2[5];
10566 struct user_xstateregs {
10568 __u64 fpx_space[58];
10569 __u64 xstate_fx_sw[6];
10571 struct user_xsave_hdr xsave_hdr;
10572 struct user_ymmh_regs ymmh;
10574 typedef unsigned long elf_greg_t;
10575 typedef elf_greg_t elf_gregset_t[(sizeof(struct user_regs_struct) / sizeof(elf_greg_t))];
10576 typedef struct user_i387_struct elf_fpregset_t;
10577 typedef struct user_fxsr_struct elf_fpxregset_t;
10578 extern const char VDSO32_PRELINK[];
10579 extern void __kernel_sigreturn;
10580 extern void __kernel_rt_sigreturn;
10581 extern const char vdso32_int80_start, vdso32_int80_end;
10582 extern const char vdso32_syscall_start, vdso32_syscall_end;
10583 extern const char vdso32_sysenter_start, vdso32_sysenter_end;
10584 extern unsigned int vdso_enabled;
10586 unsigned int entry_number;
10587 unsigned int base_addr;
10588 unsigned int limit;
10589 unsigned int seg_32bit:1;
10590 unsigned int contents:2;
10591 unsigned int read_exec_only:1;
10592 unsigned int limit_in_pages:1;
10593 unsigned int seg_not_present:1;
10594 unsigned int useable:1;
10596 static inline __attribute__((always_inline)) void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
10598 desc->limit0 = info->limit & 0x0ffff;
10599 desc->base0 = (info->base_addr & 0x0000ffff);
10600 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
10601 desc->type = (info->read_exec_only ^ 1) << 1;
10602 desc->type |= info->contents << 2;
10605 desc->p = info->seg_not_present ^ 1;
10606 desc->limit = (info->limit & 0xf0000) >> 16;
10607 desc->avl = info->useable;
10608 desc->d = info->seg_32bit;
10609 desc->g = info->limit_in_pages;
10610 desc->base2 = (info->base_addr & 0xff000000) >> 24;
10613 extern struct desc_ptr idt_descr;
10614 extern gate_desc idt_table[];
10616 struct desc_struct gdt[32];
10617 } __attribute__((aligned(((1UL) << 12))));
10618 extern __attribute__((section(".data..percpu" "..page_aligned"))) __typeof__(struct gdt_page) gdt_page __attribute__((aligned(((1UL) << 12))));
10619 static inline __attribute__((always_inline)) struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10621 return (*({ do { const void *__vpp_verify = (typeof((&(gdt_page))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(gdt_page))) *)(&(gdt_page)))); (typeof((typeof(*(&(gdt_page))) *)(&(gdt_page)))) (__ptr + (((__per_cpu_offset[cpu])))); }); })).gdt;
10623 static inline __attribute__((always_inline)) void pack_gate(gate_desc *gate, unsigned char type,
10624 unsigned long base, unsigned dpl, unsigned flags,
10625 unsigned short seg)
10627 gate->a = (seg << 16) | (base & 0xffff);
10628 gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10630 static inline __attribute__((always_inline)) int desc_empty(const void *ptr)
10632 const u32 *desc = ptr;
10633 return !(desc[0] | desc[1]);
10635 static inline __attribute__((always_inline)) void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
10637 __builtin_memcpy(&idt[entry], gate, sizeof(*gate));
10639 static inline __attribute__((always_inline)) void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
10641 __builtin_memcpy(&ldt[entry], desc, 8);
10643 static inline __attribute__((always_inline)) void
10644 native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type)
10648 case DESC_TSS: size = sizeof(tss_desc); break;
10649 case DESC_LDT: size = sizeof(ldt_desc); break;
10650 default: size = sizeof(*gdt); break;
10652 __builtin_memcpy(&gdt[entry], desc, size);
10654 static inline __attribute__((always_inline)) void pack_descriptor(struct desc_struct *desc, unsigned long base,
10655 unsigned long limit, unsigned char type,
10656 unsigned char flags)
10658 desc->a = ((base & 0xffff) << 16) | (limit & 0xffff);
10659 desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
10660 (limit & 0x000f0000) | ((type & 0xff) << 8) |
10661 ((flags & 0xf) << 20);
10664 static inline __attribute__((always_inline)) void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size)
10666 pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
10668 static inline __attribute__((always_inline)) void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
10670 struct desc_struct *d = get_cpu_gdt_table(cpu);
10672 set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS,
10673 __builtin_offsetof(struct tss_struct,io_bitmap) + (65536/8) +
10674 sizeof(unsigned long) - 1);
10675 write_gdt_entry(d, entry, &tss, DESC_TSS);
10677 static inline __attribute__((always_inline)) void native_set_ldt(const void *addr, unsigned int entries)
10679 if (__builtin_constant_p((((__builtin_constant_p(entries == 0) ? !!(entries == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 194, }; ______r = __builtin_expect(!!(entries == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(entries == 0) ? !!(entries == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 194, }; ______r = __builtin_expect(!!(entries == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 194, }; ______r = !!(((__builtin_constant_p(entries == 0) ? !!(entries == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 194, }; ______r = __builtin_expect(!!(entries == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
10680 asm volatile("lldt %w0"::"q" (0));
10682 unsigned cpu = debug_smp_processor_id();
10684 set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT,
10686 write_gdt_entry(get_cpu_gdt_table(cpu), ((12)+5),
10688 asm volatile("lldt %w0"::"q" (((12)+5)*8));
10691 static inline __attribute__((always_inline)) void native_load_tr_desc(void)
10693 asm volatile("ltr %w0"::"q" (((12)+4)*8));
10695 static inline __attribute__((always_inline)) void native_load_gdt(const struct desc_ptr *dtr)
10697 asm volatile("lgdt %0"::"m" (*dtr));
10699 static inline __attribute__((always_inline)) void native_load_idt(const struct desc_ptr *dtr)
10701 asm volatile("lidt %0"::"m" (*dtr));
10703 static inline __attribute__((always_inline)) void native_store_gdt(struct desc_ptr *dtr)
10705 asm volatile("sgdt %0":"=m" (*dtr));
10707 static inline __attribute__((always_inline)) void native_store_idt(struct desc_ptr *dtr)
10709 asm volatile("sidt %0":"=m" (*dtr));
10711 static inline __attribute__((always_inline)) unsigned long native_store_tr(void)
10714 asm volatile("str %0":"=r" (tr));
10717 static inline __attribute__((always_inline)) void native_load_tls(struct thread_struct *t, unsigned int cpu)
10719 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10721 for (i = 0; i < 3; i++)
10722 gdt[6 + i] = t->tls_array[i];
10724 static inline __attribute__((always_inline)) void clear_LDT(void)
10726 set_ldt(((void *)0), 0);
10728 static inline __attribute__((always_inline)) void load_LDT_nolock(mm_context_t *pc)
10730 set_ldt(pc->ldt, pc->size);
10732 static inline __attribute__((always_inline)) void load_LDT(mm_context_t *pc)
10734 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
10735 load_LDT_nolock(pc);
10736 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 284, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 284, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 284, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 284, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
10738 static inline __attribute__((always_inline)) unsigned long get_desc_base(const struct desc_struct *desc)
10740 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
10742 static inline __attribute__((always_inline)) void set_desc_base(struct desc_struct *desc, unsigned long base)
10744 desc->base0 = base & 0xffff;
10745 desc->base1 = (base >> 16) & 0xff;
10746 desc->base2 = (base >> 24) & 0xff;
10748 static inline __attribute__((always_inline)) unsigned long get_desc_limit(const struct desc_struct *desc)
10750 return desc->limit0 | (desc->limit << 16);
10752 static inline __attribute__((always_inline)) void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10754 desc->limit0 = limit & 0xffff;
10755 desc->limit = (limit >> 16) & 0xf;
10757 static inline __attribute__((always_inline)) void _set_gate(int gate, unsigned type, void *addr,
10758 unsigned dpl, unsigned ist, unsigned seg)
10761 pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
10762 write_idt_entry(idt_table, gate, &s);
10764 static inline __attribute__((always_inline)) void set_intr_gate(unsigned int n, void *addr)
10766 do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 331, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 331, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 331, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 331, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (331), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
10767 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, (((12)+0)*8));
10769 extern int first_system_vector;
10770 extern unsigned long used_vectors[];
10771 static inline __attribute__((always_inline)) void alloc_system_vector(int vector)
10773 if (__builtin_constant_p(((!(__builtin_constant_p((vector)) ? constant_test_bit((vector), (used_vectors)) : variable_test_bit((vector), (used_vectors)))))) ? !!((!(__builtin_constant_p((vector)) ? constant_test_bit((vector), (used_vectors)) : variable_test_bit((vector), (used_vectors))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 341, }; ______r = !!((!(__builtin_constant_p((vector)) ? constant_test_bit((vector), (used_vectors)) : variable_test_bit((vector), (used_vectors))))); ______f.miss_hit[______r]++; ______r; })) {
10774 set_bit(vector, used_vectors);
10775 if (__builtin_constant_p(((first_system_vector > vector))) ? !!((first_system_vector > vector)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 343, }; ______r = !!((first_system_vector > vector)); ______f.miss_hit[______r]++; ______r; }))
10776 first_system_vector = vector;
10778 do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (346), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0);
10781 static inline __attribute__((always_inline)) void alloc_intr_gate(unsigned int n, void *addr)
10783 alloc_system_vector(n);
10784 set_intr_gate(n, addr);
10786 static inline __attribute__((always_inline)) void set_system_intr_gate(unsigned int n, void *addr)
10788 do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 361, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 361, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 361, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 361, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (361), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
10789 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, (((12)+0)*8));
10791 static inline __attribute__((always_inline)) void set_system_trap_gate(unsigned int n, void *addr)
10793 do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 367, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 367, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 367, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 367, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (367), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
10794 _set_gate(n, GATE_TRAP, addr, 0x3, 0, (((12)+0)*8));
10796 static inline __attribute__((always_inline)) void set_trap_gate(unsigned int n, void *addr)
10798 do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 373, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 373, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 373, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 373, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (373), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
10799 _set_gate(n, GATE_TRAP, addr, 0, 0, (((12)+0)*8));
10801 static inline __attribute__((always_inline)) void set_task_gate(unsigned int n, unsigned int gdt_entry)
10803 do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 379, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 379, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 379, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 379, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (379), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
10804 _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10806 static inline __attribute__((always_inline)) void set_intr_gate_ist(int n, void *addr, unsigned ist)
10808 do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 385, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 385, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 385, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 385, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (385), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
10809 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, (((12)+0)*8));
10811 static inline __attribute__((always_inline)) void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10813 do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 391, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 391, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 391, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 391, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (391), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
10814 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, (((12)+0)*8));
10816 struct task_struct;
10817 struct linux_binprm;
10818 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10820 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10821 extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10823 typedef __u32 Elf32_Addr;
10824 typedef __u16 Elf32_Half;
10825 typedef __u32 Elf32_Off;
10826 typedef __s32 Elf32_Sword;
10827 typedef __u32 Elf32_Word;
10828 typedef __u64 Elf64_Addr;
10829 typedef __u16 Elf64_Half;
10830 typedef __s16 Elf64_SHalf;
10831 typedef __u64 Elf64_Off;
10832 typedef __s32 Elf64_Sword;
10833 typedef __u32 Elf64_Word;
10834 typedef __u64 Elf64_Xword;
10835 typedef __s64 Elf64_Sxword;
10836 typedef struct dynamic{
10844 Elf64_Sxword d_tag;
10850 typedef struct elf32_rel {
10851 Elf32_Addr r_offset;
10854 typedef struct elf64_rel {
10855 Elf64_Addr r_offset;
10856 Elf64_Xword r_info;
10858 typedef struct elf32_rela{
10859 Elf32_Addr r_offset;
10861 Elf32_Sword r_addend;
10863 typedef struct elf64_rela {
10864 Elf64_Addr r_offset;
10865 Elf64_Xword r_info;
10866 Elf64_Sxword r_addend;
10868 typedef struct elf32_sym{
10869 Elf32_Word st_name;
10870 Elf32_Addr st_value;
10871 Elf32_Word st_size;
10872 unsigned char st_info;
10873 unsigned char st_other;
10874 Elf32_Half st_shndx;
10876 typedef struct elf64_sym {
10877 Elf64_Word st_name;
10878 unsigned char st_info;
10879 unsigned char st_other;
10880 Elf64_Half st_shndx;
10881 Elf64_Addr st_value;
10882 Elf64_Xword st_size;
10884 typedef struct elf32_hdr{
10885 unsigned char e_ident[16];
10887 Elf32_Half e_machine;
10888 Elf32_Word e_version;
10889 Elf32_Addr e_entry;
10892 Elf32_Word e_flags;
10893 Elf32_Half e_ehsize;
10894 Elf32_Half e_phentsize;
10895 Elf32_Half e_phnum;
10896 Elf32_Half e_shentsize;
10897 Elf32_Half e_shnum;
10898 Elf32_Half e_shstrndx;
10900 typedef struct elf64_hdr {
10901 unsigned char e_ident[16];
10903 Elf64_Half e_machine;
10904 Elf64_Word e_version;
10905 Elf64_Addr e_entry;
10908 Elf64_Word e_flags;
10909 Elf64_Half e_ehsize;
10910 Elf64_Half e_phentsize;
10911 Elf64_Half e_phnum;
10912 Elf64_Half e_shentsize;
10913 Elf64_Half e_shnum;
10914 Elf64_Half e_shstrndx;
10916 typedef struct elf32_phdr{
10918 Elf32_Off p_offset;
10919 Elf32_Addr p_vaddr;
10920 Elf32_Addr p_paddr;
10921 Elf32_Word p_filesz;
10922 Elf32_Word p_memsz;
10923 Elf32_Word p_flags;
10924 Elf32_Word p_align;
10926 typedef struct elf64_phdr {
10928 Elf64_Word p_flags;
10929 Elf64_Off p_offset;
10930 Elf64_Addr p_vaddr;
10931 Elf64_Addr p_paddr;
10932 Elf64_Xword p_filesz;
10933 Elf64_Xword p_memsz;
10934 Elf64_Xword p_align;
10936 typedef struct elf32_shdr {
10937 Elf32_Word sh_name;
10938 Elf32_Word sh_type;
10939 Elf32_Word sh_flags;
10940 Elf32_Addr sh_addr;
10941 Elf32_Off sh_offset;
10942 Elf32_Word sh_size;
10943 Elf32_Word sh_link;
10944 Elf32_Word sh_info;
10945 Elf32_Word sh_addralign;
10946 Elf32_Word sh_entsize;
10948 typedef struct elf64_shdr {
10949 Elf64_Word sh_name;
10950 Elf64_Word sh_type;
10951 Elf64_Xword sh_flags;
10952 Elf64_Addr sh_addr;
10953 Elf64_Off sh_offset;
10954 Elf64_Xword sh_size;
10955 Elf64_Word sh_link;
10956 Elf64_Word sh_info;
10957 Elf64_Xword sh_addralign;
10958 Elf64_Xword sh_entsize;
10960 typedef struct elf32_note {
10961 Elf32_Word n_namesz;
10962 Elf32_Word n_descsz;
10965 typedef struct elf64_note {
10966 Elf64_Word n_namesz;
10967 Elf64_Word n_descsz;
10970 extern Elf32_Dyn _DYNAMIC [];
10971 static inline __attribute__((always_inline)) int elf_coredump_extra_notes_size(void) { return 0; }
10972 static inline __attribute__((always_inline)) int elf_coredump_extra_notes_write(struct file *file,
10973 loff_t *foffset) { return 0; }
10976 enum kobj_ns_type {
10977 KOBJ_NS_TYPE_NONE = 0,
10981 struct kobj_ns_type_operations {
10982 enum kobj_ns_type type;
10983 void *(*grab_current_ns)(void);
10984 const void *(*netlink_ns)(struct sock *sk);
10985 const void *(*initial_ns)(void);
10986 void (*drop_ns)(void *);
10988 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
10989 int kobj_ns_type_registered(enum kobj_ns_type type);
10990 const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
10991 const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
10992 void *kobj_ns_grab_current(enum kobj_ns_type type);
10993 const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
10994 const void *kobj_ns_initial(enum kobj_ns_type type);
10995 void kobj_ns_drop(enum kobj_ns_type type, void *ns);
11002 struct lock_class_key *key;
11003 struct lock_class_key skey;
11005 struct attribute_group {
11007 mode_t (*is_visible)(struct kobject *,
11008 struct attribute *, int);
11009 struct attribute **attrs;
11012 struct vm_area_struct;
11013 struct bin_attribute {
11014 struct attribute attr;
11017 ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *,
11018 char *, loff_t, size_t);
11019 ssize_t (*write)(struct file *,struct kobject *, struct bin_attribute *,
11020 char *, loff_t, size_t);
11021 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
11022 struct vm_area_struct *vma);
11025 ssize_t (*show)(struct kobject *, struct attribute *,char *);
11026 ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
11028 struct sysfs_dirent;
11029 int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *),
11030 void *data, struct module *owner);
11031 int __attribute__((warn_unused_result)) sysfs_create_dir(struct kobject *kobj);
11032 void sysfs_remove_dir(struct kobject *kobj);
11033 int __attribute__((warn_unused_result)) sysfs_rename_dir(struct kobject *kobj, const char *new_name);
11034 int __attribute__((warn_unused_result)) sysfs_move_dir(struct kobject *kobj,
11035 struct kobject *new_parent_kobj);
11036 int __attribute__((warn_unused_result)) sysfs_create_file(struct kobject *kobj,
11037 const struct attribute *attr);
11038 int __attribute__((warn_unused_result)) sysfs_create_files(struct kobject *kobj,
11039 const struct attribute **attr);
11040 int __attribute__((warn_unused_result)) sysfs_chmod_file(struct kobject *kobj,
11041 const struct attribute *attr, mode_t mode);
11042 void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr);
11043 void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr);
11044 int __attribute__((warn_unused_result)) sysfs_create_bin_file(struct kobject *kobj,
11045 const struct bin_attribute *attr);
11046 void sysfs_remove_bin_file(struct kobject *kobj,
11047 const struct bin_attribute *attr);
11048 int __attribute__((warn_unused_result)) sysfs_create_link(struct kobject *kobj, struct kobject *target,
11050 int __attribute__((warn_unused_result)) sysfs_create_link_nowarn(struct kobject *kobj,
11051 struct kobject *target,
11053 void sysfs_remove_link(struct kobject *kobj, const char *name);
11054 int sysfs_rename_link(struct kobject *kobj, struct kobject *target,
11055 const char *old_name, const char *new_name);
11056 void sysfs_delete_link(struct kobject *dir, struct kobject *targ,
11058 int __attribute__((warn_unused_result)) sysfs_create_group(struct kobject *kobj,
11059 const struct attribute_group *grp);
11060 int sysfs_update_group(struct kobject *kobj,
11061 const struct attribute_group *grp);
11062 void sysfs_remove_group(struct kobject *kobj,
11063 const struct attribute_group *grp);
11064 int sysfs_add_file_to_group(struct kobject *kobj,
11065 const struct attribute *attr, const char *group);
11066 void sysfs_remove_file_from_group(struct kobject *kobj,
11067 const struct attribute *attr, const char *group);
11068 int sysfs_merge_group(struct kobject *kobj,
11069 const struct attribute_group *grp);
11070 void sysfs_unmerge_group(struct kobject *kobj,
11071 const struct attribute_group *grp);
11072 void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);
11073 void sysfs_notify_dirent(struct sysfs_dirent *sd);
11074 struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
11076 const unsigned char *name);
11077 struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd);
11078 void sysfs_put(struct sysfs_dirent *sd);
11079 int __attribute__((warn_unused_result)) sysfs_init(void);
11083 void kref_init(struct kref *kref);
11084 void kref_get(struct kref *kref);
11085 int kref_put(struct kref *kref, void (*release) (struct kref *kref));
11086 int kref_sub(struct kref *kref, unsigned int count,
11087 void (*release) (struct kref *kref));
11088 extern char uevent_helper[];
11089 extern u64 uevent_seqnum;
11090 enum kobject_action {
11101 struct list_head entry;
11102 struct kobject *parent;
11104 struct kobj_type *ktype;
11105 struct sysfs_dirent *sd;
11107 unsigned int state_initialized:1;
11108 unsigned int state_in_sysfs:1;
11109 unsigned int state_add_uevent_sent:1;
11110 unsigned int state_remove_uevent_sent:1;
11111 unsigned int uevent_suppress:1;
11113 extern int kobject_set_name(struct kobject *kobj, const char *name, ...)
11114 __attribute__((format(printf, 2, 3)));
11115 extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
11117 static inline __attribute__((always_inline)) const char *kobject_name(const struct kobject *kobj)
11121 extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype);
11122 extern int __attribute__((warn_unused_result)) kobject_add(struct kobject *kobj,
11123 struct kobject *parent,
11124 const char *fmt, ...)
11125 __attribute__((format(printf, 3, 4)));
11126 extern int __attribute__((warn_unused_result)) kobject_init_and_add(struct kobject *kobj,
11127 struct kobj_type *ktype,
11128 struct kobject *parent,
11129 const char *fmt, ...)
11130 __attribute__((format(printf, 4, 5)));
11131 extern void kobject_del(struct kobject *kobj);
11132 extern struct kobject * __attribute__((warn_unused_result)) kobject_create(void);
11133 extern struct kobject * __attribute__((warn_unused_result)) kobject_create_and_add(const char *name,
11134 struct kobject *parent);
11135 extern int __attribute__((warn_unused_result)) kobject_rename(struct kobject *, const char *new_name);
11136 extern int __attribute__((warn_unused_result)) kobject_move(struct kobject *, struct kobject *);
11137 extern struct kobject *kobject_get(struct kobject *kobj);
11138 extern void kobject_put(struct kobject *kobj);
11139 extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
11141 void (*release)(struct kobject *kobj);
11142 const struct sysfs_ops *sysfs_ops;
11143 struct attribute **default_attrs;
11144 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
11145 const void *(*namespace)(struct kobject *kobj);
11147 struct kobj_uevent_env {
11153 struct kset_uevent_ops {
11154 int (* const filter)(struct kset *kset, struct kobject *kobj);
11155 const char *(* const name)(struct kset *kset, struct kobject *kobj);
11156 int (* const uevent)(struct kset *kset, struct kobject *kobj,
11157 struct kobj_uevent_env *env);
11159 struct kobj_attribute {
11160 struct attribute attr;
11161 ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
11163 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
11164 const char *buf, size_t count);
11166 extern const struct sysfs_ops kobj_sysfs_ops;
11169 struct list_head list;
11170 spinlock_t list_lock;
11171 struct kobject kobj;
11172 const struct kset_uevent_ops *uevent_ops;
11174 extern void kset_init(struct kset *kset);
11175 extern int __attribute__((warn_unused_result)) kset_register(struct kset *kset);
11176 extern void kset_unregister(struct kset *kset);
11177 extern struct kset * __attribute__((warn_unused_result)) kset_create_and_add(const char *name,
11178 const struct kset_uevent_ops *u,
11179 struct kobject *parent_kobj);
11180 static inline __attribute__((always_inline)) struct kset *to_kset(struct kobject *kobj)
11182 return kobj ? ({ const typeof( ((struct kset *)0)->kobj ) *__mptr = (kobj); (struct kset *)( (char *)__mptr - __builtin_offsetof(struct kset,kobj) );}) : ((void *)0);
11184 static inline __attribute__((always_inline)) struct kset *kset_get(struct kset *k)
11186 return k ? to_kset(kobject_get(&k->kobj)) : ((void *)0);
11188 static inline __attribute__((always_inline)) void kset_put(struct kset *k)
11190 kobject_put(&k->kobj);
11192 static inline __attribute__((always_inline)) struct kobj_type *get_ktype(struct kobject *kobj)
11194 return kobj->ktype;
11196 extern struct kobject *kset_find_obj(struct kset *, const char *);
11197 extern struct kobject *kset_find_obj_hinted(struct kset *, const char *,
11199 extern struct kobject *kernel_kobj;
11200 extern struct kobject *mm_kobj;
11201 extern struct kobject *hypervisor_kobj;
11202 extern struct kobject *power_kobj;
11203 extern struct kobject *firmware_kobj;
11204 int kobject_uevent(struct kobject *kobj, enum kobject_action action);
11205 int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
11207 int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
11208 __attribute__((format (printf, 2, 3)));
11209 int kobject_action_type(const char *buf, size_t count,
11210 enum kobject_action *type);
11211 struct kernel_param;
11212 struct kernel_param_ops {
11213 int (*set)(const char *val, const struct kernel_param *kp);
11214 int (*get)(char *buffer, const struct kernel_param *kp);
11215 void (*free)(void *arg);
11217 struct kernel_param {
11219 const struct kernel_param_ops *ops;
11224 const struct kparam_string *str;
11225 const struct kparam_array *arr;
11228 struct kparam_string {
11229 unsigned int maxlen;
11232 struct kparam_array
11235 unsigned int elemsize;
11237 const struct kernel_param_ops *ops;
11240 static inline __attribute__((always_inline)) int
11241 __check_old_set_param(int (*oldset)(const char *, struct kernel_param *))
11245 extern void __kernel_param_lock(void);
11246 extern void __kernel_param_unlock(void);
11247 extern int parse_args(const char *name,
11249 const struct kernel_param *params,
11251 int (*unknown)(char *param, char *val));
11252 extern void destroy_params(const struct kernel_param *params, unsigned num);
11253 extern struct kernel_param_ops param_ops_byte;
11254 extern int param_set_byte(const char *val, const struct kernel_param *kp);
11255 extern int param_get_byte(char *buffer, const struct kernel_param *kp);
11256 extern struct kernel_param_ops param_ops_short;
11257 extern int param_set_short(const char *val, const struct kernel_param *kp);
11258 extern int param_get_short(char *buffer, const struct kernel_param *kp);
11259 extern struct kernel_param_ops param_ops_ushort;
11260 extern int param_set_ushort(const char *val, const struct kernel_param *kp);
11261 extern int param_get_ushort(char *buffer, const struct kernel_param *kp);
11262 extern struct kernel_param_ops param_ops_int;
11263 extern int param_set_int(const char *val, const struct kernel_param *kp);
11264 extern int param_get_int(char *buffer, const struct kernel_param *kp);
11265 extern struct kernel_param_ops param_ops_uint;
11266 extern int param_set_uint(const char *val, const struct kernel_param *kp);
11267 extern int param_get_uint(char *buffer, const struct kernel_param *kp);
11268 extern struct kernel_param_ops param_ops_long;
11269 extern int param_set_long(const char *val, const struct kernel_param *kp);
11270 extern int param_get_long(char *buffer, const struct kernel_param *kp);
11271 extern struct kernel_param_ops param_ops_ulong;
11272 extern int param_set_ulong(const char *val, const struct kernel_param *kp);
11273 extern int param_get_ulong(char *buffer, const struct kernel_param *kp);
11274 extern struct kernel_param_ops param_ops_charp;
11275 extern int param_set_charp(const char *val, const struct kernel_param *kp);
11276 extern int param_get_charp(char *buffer, const struct kernel_param *kp);
11277 extern struct kernel_param_ops param_ops_bool;
11278 extern int param_set_bool(const char *val, const struct kernel_param *kp);
11279 extern int param_get_bool(char *buffer, const struct kernel_param *kp);
11280 extern struct kernel_param_ops param_ops_invbool;
11281 extern int param_set_invbool(const char *val, const struct kernel_param *kp);
11282 extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
11283 extern struct kernel_param_ops param_array_ops;
11284 extern struct kernel_param_ops param_ops_string;
11285 extern int param_set_copystring(const char *val, const struct kernel_param *);
11286 extern int param_get_string(char *buffer, const struct kernel_param *kp);
11288 extern int module_param_sysfs_setup(struct module *mod,
11289 const struct kernel_param *kparam,
11290 unsigned int num_params);
11291 extern void module_param_sysfs_remove(struct module *mod);
11292 struct jump_label_key {
11294 struct jump_entry *entries;
11295 struct jump_label_mod *next;
11297 static inline __attribute__((always_inline)) __attribute__((always_inline)) bool arch_static_branch(struct jump_label_key *key)
11300 ".byte 0xe9 \n\t .long 0\n\t"
11301 ".pushsection __jump_table, \"aw\" \n\t"
11302 " " ".balign 4" " " "\n\t"
11303 " " ".long" " " "1b, %l[l_yes], %c0 \n\t"
11305 : : "i" (key) : : l_yes);
11310 typedef u32 jump_label_t;
11311 struct jump_entry {
11313 jump_label_t target;
11316 enum jump_label_type {
11317 JUMP_LABEL_DISABLE = 0,
11321 static inline __attribute__((always_inline)) __attribute__((always_inline)) bool static_branch(struct jump_label_key *key)
11323 return arch_static_branch(key);
11325 extern struct jump_entry __start___jump_table[];
11326 extern struct jump_entry __stop___jump_table[];
11327 extern void jump_label_lock(void);
11328 extern void jump_label_unlock(void);
11329 extern void arch_jump_label_transform(struct jump_entry *entry,
11330 enum jump_label_type type);
11331 extern void arch_jump_label_text_poke_early(jump_label_t addr);
11332 extern int jump_label_text_reserved(void *start, void *end);
11333 extern void jump_label_inc(struct jump_label_key *key);
11334 extern void jump_label_dec(struct jump_label_key *key);
11335 extern bool jump_label_enabled(struct jump_label_key *key);
11336 extern void jump_label_apply_nops(struct module *mod);
11339 struct tracepoint_func {
11343 struct tracepoint {
11345 struct jump_label_key key;
11346 void (*regfunc)(void);
11347 void (*unregfunc)(void);
11348 struct tracepoint_func *funcs;
11350 extern int tracepoint_probe_register(const char *name, void *probe, void *data);
11352 tracepoint_probe_unregister(const char *name, void *probe, void *data);
11353 extern int tracepoint_probe_register_noupdate(const char *name, void *probe,
11355 extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
11357 extern void tracepoint_probe_update_all(void);
11358 struct tracepoint_iter {
11359 struct module *module;
11360 struct tracepoint * const *tracepoint;
11362 extern void tracepoint_iter_start(struct tracepoint_iter *iter);
11363 extern void tracepoint_iter_next(struct tracepoint_iter *iter);
11364 extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
11365 extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
11366 extern int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
11367 struct tracepoint * const *begin, struct tracepoint * const *end);
11368 static inline __attribute__((always_inline)) void tracepoint_synchronize_unregister(void)
11370 synchronize_sched();
11373 void tracepoint_update_probe_range(struct tracepoint * const *begin,
11374 struct tracepoint * const *end);
11375 struct mod_arch_specific
11379 extern struct tracepoint
11380 __tracepoint_module_load
11381 ; static inline __attribute__((always_inline)) void
11383 (struct module *mod) { if (__builtin_constant_p(((static_branch(&__tracepoint_module_load.key)))) ? !!((static_branch(&__tracepoint_module_load.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11384 "include/trace/events/module.h"
11387 , }; ______r = !!((static_branch(&__tracepoint_module_load.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11388 "include/trace/events/module.h"
11391 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_module_load)->funcs)) *_________p1 = (typeof(*((&__tracepoint_module_load)->funcs))* )(*(volatile typeof(((&__tracepoint_module_load)->funcs)) *)&(((&__tracepoint_module_load)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_module_load)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11392 "include/trace/events/module.h"
11395 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, struct module *mod))(it_func))(__data, mod); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
11396 register_trace_module_load
11397 (void (*probe)(void *__data, struct module *mod), void *data) { return tracepoint_probe_register("module_load", (void *)probe, data); } static inline __attribute__((always_inline)) int
11398 unregister_trace_module_load
11399 (void (*probe)(void *__data, struct module *mod), void *data) { return tracepoint_probe_unregister("module_load", (void *)probe, data); } static inline __attribute__((always_inline)) void
11400 check_trace_callback_type_module_load
11401 (void (*cb)(void *__data, struct module *mod)) { }
11403 extern struct tracepoint
11404 __tracepoint_module_free
11405 ; static inline __attribute__((always_inline)) void
11407 (struct module *mod) { if (__builtin_constant_p(((static_branch(&__tracepoint_module_free.key)))) ? !!((static_branch(&__tracepoint_module_free.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11408 "include/trace/events/module.h"
11411 , }; ______r = !!((static_branch(&__tracepoint_module_free.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11412 "include/trace/events/module.h"
11415 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_module_free)->funcs)) *_________p1 = (typeof(*((&__tracepoint_module_free)->funcs))* )(*(volatile typeof(((&__tracepoint_module_free)->funcs)) *)&(((&__tracepoint_module_free)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_module_free)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11416 "include/trace/events/module.h"
11419 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, struct module *mod))(it_func))(__data, mod); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
11420 register_trace_module_free
11421 (void (*probe)(void *__data, struct module *mod), void *data) { return tracepoint_probe_register("module_free", (void *)probe, data); } static inline __attribute__((always_inline)) int
11422 unregister_trace_module_free
11423 (void (*probe)(void *__data, struct module *mod), void *data) { return tracepoint_probe_unregister("module_free", (void *)probe, data); } static inline __attribute__((always_inline)) void
11424 check_trace_callback_type_module_free
11425 (void (*cb)(void *__data, struct module *mod)) { }
11428 extern struct tracepoint
11429 __tracepoint_module_get
11430 ; static inline __attribute__((always_inline)) void
11432 (struct module *mod, unsigned long ip) { if (__builtin_constant_p(((static_branch(&__tracepoint_module_get.key)))) ? !!((static_branch(&__tracepoint_module_get.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11433 "include/trace/events/module.h"
11436 , }; ______r = !!((static_branch(&__tracepoint_module_get.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11437 "include/trace/events/module.h"
11440 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_module_get)->funcs)) *_________p1 = (typeof(*((&__tracepoint_module_get)->funcs))* )(*(volatile typeof(((&__tracepoint_module_get)->funcs)) *)&(((&__tracepoint_module_get)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_module_get)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11441 "include/trace/events/module.h"
11444 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, struct module *mod, unsigned long ip))(it_func))(__data, mod, ip); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
11445 register_trace_module_get
11446 (void (*probe)(void *__data, struct module *mod, unsigned long ip), void *data) { return tracepoint_probe_register("module_get", (void *)probe, data); } static inline __attribute__((always_inline)) int
11447 unregister_trace_module_get
11448 (void (*probe)(void *__data, struct module *mod, unsigned long ip), void *data) { return tracepoint_probe_unregister("module_get", (void *)probe, data); } static inline __attribute__((always_inline)) void
11449 check_trace_callback_type_module_get
11450 (void (*cb)(void *__data, struct module *mod, unsigned long ip)) { }
11452 extern struct tracepoint
11453 __tracepoint_module_put
11454 ; static inline __attribute__((always_inline)) void
11456 (struct module *mod, unsigned long ip) { if (__builtin_constant_p(((static_branch(&__tracepoint_module_put.key)))) ? !!((static_branch(&__tracepoint_module_put.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11457 "include/trace/events/module.h"
11460 , }; ______r = !!((static_branch(&__tracepoint_module_put.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11461 "include/trace/events/module.h"
11464 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_module_put)->funcs)) *_________p1 = (typeof(*((&__tracepoint_module_put)->funcs))* )(*(volatile typeof(((&__tracepoint_module_put)->funcs)) *)&(((&__tracepoint_module_put)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_module_put)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11465 "include/trace/events/module.h"
11468 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, struct module *mod, unsigned long ip))(it_func))(__data, mod, ip); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
11469 register_trace_module_put
11470 (void (*probe)(void *__data, struct module *mod, unsigned long ip), void *data) { return tracepoint_probe_register("module_put", (void *)probe, data); } static inline __attribute__((always_inline)) int
11471 unregister_trace_module_put
11472 (void (*probe)(void *__data, struct module *mod, unsigned long ip), void *data) { return tracepoint_probe_unregister("module_put", (void *)probe, data); } static inline __attribute__((always_inline)) void
11473 check_trace_callback_type_module_put
11474 (void (*cb)(void *__data, struct module *mod, unsigned long ip)) { }
11476 extern struct tracepoint
11477 __tracepoint_module_request
11478 ; static inline __attribute__((always_inline)) void
11479 trace_module_request
11480 (char *name, bool wait, unsigned long ip) { if (__builtin_constant_p(((static_branch(&__tracepoint_module_request.key)))) ? !!((static_branch(&__tracepoint_module_request.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11481 "include/trace/events/module.h"
11484 , }; ______r = !!((static_branch(&__tracepoint_module_request.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11485 "include/trace/events/module.h"
11488 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_module_request)->funcs)) *_________p1 = (typeof(*((&__tracepoint_module_request)->funcs))* )(*(volatile typeof(((&__tracepoint_module_request)->funcs)) *)&(((&__tracepoint_module_request)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_module_request)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11489 "include/trace/events/module.h"
11492 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, char *name, bool wait, unsigned long ip))(it_func))(__data, name, wait, ip); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
11493 register_trace_module_request
11494 (void (*probe)(void *__data, char *name, bool wait, unsigned long ip), void *data) { return tracepoint_probe_register("module_request", (void *)probe, data); } static inline __attribute__((always_inline)) int
11495 unregister_trace_module_request
11496 (void (*probe)(void *__data, char *name, bool wait, unsigned long ip), void *data) { return tracepoint_probe_unregister("module_request", (void *)probe, data); } static inline __attribute__((always_inline)) void
11497 check_trace_callback_type_module_request
11498 (void (*cb)(void *__data, char *name, bool wait, unsigned long ip)) { }
11500 struct kernel_symbol
11502 unsigned long value;
11505 struct modversion_info
11508 char name[(64 - sizeof(unsigned long))];
11511 struct module_attribute {
11512 struct attribute attr;
11513 ssize_t (*show)(struct module_attribute *, struct module *, char *);
11514 ssize_t (*store)(struct module_attribute *, struct module *,
11515 const char *, size_t count);
11516 void (*setup)(struct module *, const char *);
11517 int (*test)(struct module *);
11518 void (*free)(struct module *);
11520 struct module_version_attribute {
11521 struct module_attribute mattr;
11522 const char *module_name;
11523 const char *version;
11524 } __attribute__ ((__aligned__(sizeof(void *))));
11525 extern ssize_t __modver_version_show(struct module_attribute *,
11526 struct module *, char *);
11527 struct module_kobject
11529 struct kobject kobj;
11530 struct module *mod;
11531 struct kobject *drivers_dir;
11532 struct module_param_attrs *mp;
11534 extern int init_module(void);
11535 extern void cleanup_module(void);
11536 struct exception_table_entry;
11537 const struct exception_table_entry *
11538 search_extable(const struct exception_table_entry *first,
11539 const struct exception_table_entry *last,
11540 unsigned long value);
11541 void sort_extable(struct exception_table_entry *start,
11542 struct exception_table_entry *finish);
11543 void sort_main_extable(void);
11544 void trim_init_extable(struct module *m);
11545 extern struct module __this_module;
11546 const struct exception_table_entry *search_exception_tables(unsigned long add);
11547 struct notifier_block;
11548 extern int modules_disabled;
11549 void *__symbol_get(const char *symbol);
11550 void *__symbol_get_gpl(const char *symbol);
11551 struct module_use {
11552 struct list_head source_list;
11553 struct list_head target_list;
11554 struct module *source, *target;
11559 MODULE_STATE_COMING,
11560 MODULE_STATE_GOING,
11564 enum module_state state;
11565 struct list_head list;
11566 char name[(64 - sizeof(unsigned long))];
11567 struct module_kobject mkobj;
11568 struct module_attribute *modinfo_attrs;
11569 const char *version;
11570 const char *srcversion;
11571 struct kobject *holders_dir;
11572 const struct kernel_symbol *syms;
11573 const unsigned long *crcs;
11574 unsigned int num_syms;
11575 struct kernel_param *kp;
11576 unsigned int num_kp;
11577 unsigned int num_gpl_syms;
11578 const struct kernel_symbol *gpl_syms;
11579 const unsigned long *gpl_crcs;
11580 const struct kernel_symbol *unused_syms;
11581 const unsigned long *unused_crcs;
11582 unsigned int num_unused_syms;
11583 unsigned int num_unused_gpl_syms;
11584 const struct kernel_symbol *unused_gpl_syms;
11585 const unsigned long *unused_gpl_crcs;
11586 const struct kernel_symbol *gpl_future_syms;
11587 const unsigned long *gpl_future_crcs;
11588 unsigned int num_gpl_future_syms;
11589 unsigned int num_exentries;
11590 struct exception_table_entry *extable;
11594 unsigned int init_size, core_size;
11595 unsigned int init_text_size, core_text_size;
11596 unsigned int init_ro_size, core_ro_size;
11597 struct mod_arch_specific arch;
11598 unsigned int taints;
11600 struct list_head bug_list;
11601 struct bug_entry *bug_table;
11602 Elf32_Sym *symtab, *core_symtab;
11603 unsigned int num_symtab, core_num_syms;
11604 char *strtab, *core_strtab;
11605 struct module_sect_attrs *sect_attrs;
11606 struct module_notes_attrs *notes_attrs;
11609 unsigned int percpu_size;
11610 unsigned int num_tracepoints;
11611 struct tracepoint * const *tracepoints_ptrs;
11612 struct jump_entry *jump_entries;
11613 unsigned int num_jump_entries;
11614 unsigned int num_trace_bprintk_fmt;
11615 const char **trace_bprintk_fmt_start;
11616 struct ftrace_event_call **trace_events;
11617 unsigned int num_trace_events;
11618 unsigned int num_ftrace_callsites;
11619 unsigned long *ftrace_callsites;
11620 struct list_head source_list;
11621 struct list_head target_list;
11622 struct task_struct *waiter;
11623 void (*exit)(void);
11624 struct module_ref {
11629 extern struct mutex module_mutex;
11630 static inline __attribute__((always_inline)) int module_is_live(struct module *mod)
11632 return mod->state != MODULE_STATE_GOING;
11634 struct module *__module_text_address(unsigned long addr);
11635 struct module *__module_address(unsigned long addr);
11636 bool is_module_address(unsigned long addr);
11637 bool is_module_percpu_address(unsigned long addr);
11638 bool is_module_text_address(unsigned long addr);
11639 static inline __attribute__((always_inline)) int within_module_core(unsigned long addr, struct module *mod)
11641 return (unsigned long)mod->module_core <= addr &&
11642 addr < (unsigned long)mod->module_core + mod->core_size;
11644 static inline __attribute__((always_inline)) int within_module_init(unsigned long addr, struct module *mod)
11646 return (unsigned long)mod->module_init <= addr &&
11647 addr < (unsigned long)mod->module_init + mod->init_size;
11649 struct module *find_module(const char *name);
11651 const struct kernel_symbol *start, *stop;
11652 const unsigned long *crcs;
11660 const struct kernel_symbol *find_symbol(const char *name,
11661 struct module **owner,
11662 const unsigned long **crc,
11665 bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
11666 struct module *owner,
11667 void *data), void *data);
11668 int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
11669 char *name, char *module_name, int *exported);
11670 unsigned long module_kallsyms_lookup_name(const char *name);
11671 int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
11672 struct module *, unsigned long),
11674 extern void __module_put_and_exit(struct module *mod, long code)
11675 __attribute__((noreturn));
11676 unsigned int module_refcount(struct module *mod);
11677 void __symbol_put(const char *symbol);
11678 void symbol_put_addr(void *addr);
11679 static inline __attribute__((always_inline)) void __module_get(struct module *module)
11681 if (__builtin_constant_p(((module))) ? !!((module)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 510, }; ______r = !!((module)); ______f.miss_hit[______r]++; ______r; })) {
11682 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
11683 do { do { const void *__vpp_verify = (typeof(&(((module->refptr->incs)))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((module->refptr->incs)))) { case 1: do { typedef typeof((((module->refptr->incs)))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((module->refptr->incs))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((((module->refptr->incs)))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((module->refptr->incs))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((((module->refptr->incs)))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((module->refptr->incs))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((((module->refptr->incs))))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((((module->refptr->incs)))))); (typeof(*(&((((module->refptr->incs)))))) *)tcp_ptr__; }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
11684 trace_module_get(module, ({ __label__ __here; __here: (unsigned long)&&__here; }));
11685 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 514, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 514, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 514, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 514, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
11688 static inline __attribute__((always_inline)) int try_module_get(struct module *module)
11691 if (__builtin_constant_p(((module))) ? !!((module)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 522, }; ______r = !!((module)); ______f.miss_hit[______r]++; ______r; })) {
11692 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
11693 if (__builtin_constant_p((((__builtin_constant_p(module_is_live(module)) ? !!(module_is_live(module)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 525, }; ______r = __builtin_expect(!!(module_is_live(module)), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(module_is_live(module)) ? !!(module_is_live(module)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 525, }; ______r = __builtin_expect(!!(module_is_live(module)), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 525, }; ______r = !!(((__builtin_constant_p(module_is_live(module)) ? !!(module_is_live(module)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 525, }; ______r = __builtin_expect(!!(module_is_live(module)), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; })) {
11694 do { do { const void *__vpp_verify = (typeof(&(((module->refptr->incs)))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((module->refptr->incs)))) { case 1: do { typedef typeof((((module->refptr->incs)))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((module->refptr->incs))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((((module->refptr->incs)))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((module->refptr->incs))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((((module->refptr->incs)))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((module->refptr->incs))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((((module->refptr->incs))))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((((module->refptr->incs)))))); (typeof(*(&((((module->refptr->incs)))))) *)tcp_ptr__; }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
11695 trace_module_get(module, ({ __label__ __here; __here: (unsigned long)&&__here; }));
11698 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 531, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 531, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 531, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 531, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
11702 extern void module_put(struct module *module);
11703 int ref_module(struct module *a, struct module *b);
11704 const char *module_address_lookup(unsigned long addr,
11705 unsigned long *symbolsize,
11706 unsigned long *offset,
11709 int lookup_module_symbol_name(unsigned long addr, char *symname);
11710 int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
11711 const struct exception_table_entry *search_module_extables(unsigned long addr);
11712 int register_module_notifier(struct notifier_block * nb);
11713 int unregister_module_notifier(struct notifier_block * nb);
11714 extern void print_modules(void);
11715 extern void module_update_tracepoints(void);
11716 extern int module_get_iter_tracepoints(struct tracepoint_iter *iter);
11717 extern struct kset *module_kset;
11718 extern struct kobj_type module_ktype;
11719 extern int module_sysfs_initialized;
11720 static inline __attribute__((always_inline)) void set_all_modules_text_rw(void) { }
11721 static inline __attribute__((always_inline)) void set_all_modules_text_ro(void) { }
11722 void module_bug_finalize(const Elf32_Ehdr *, const Elf32_Shdr *,
11724 void module_bug_cleanup(struct module *);
11725 void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) kmem_cache_init(void);
11726 int slab_is_available(void);
11727 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
11730 void kmem_cache_destroy(struct kmem_cache *);
11731 int kmem_cache_shrink(struct kmem_cache *);
11732 void kmem_cache_free(struct kmem_cache *, void *);
11733 unsigned int kmem_cache_size(struct kmem_cache *);
11734 void * __attribute__((warn_unused_result)) __krealloc(const void *, size_t, gfp_t);
11735 void * __attribute__((warn_unused_result)) krealloc(const void *, size_t, gfp_t);
11736 void kfree(const void *);
11737 void kzfree(const void *);
11738 size_t ksize(const void *);
11739 static inline __attribute__((always_inline)) void kmemleak_init(void)
11742 static inline __attribute__((always_inline)) void kmemleak_alloc(const void *ptr, size_t size, int min_count,
11746 static inline __attribute__((always_inline)) void kmemleak_alloc_recursive(const void *ptr, size_t size,
11747 int min_count, unsigned long flags,
11751 static inline __attribute__((always_inline)) void kmemleak_free(const void *ptr)
11754 static inline __attribute__((always_inline)) void kmemleak_free_part(const void *ptr, size_t size)
11757 static inline __attribute__((always_inline)) void kmemleak_free_recursive(const void *ptr, unsigned long flags)
11760 static inline __attribute__((always_inline)) void kmemleak_not_leak(const void *ptr)
11763 static inline __attribute__((always_inline)) void kmemleak_ignore(const void *ptr)
11766 static inline __attribute__((always_inline)) void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
11769 static inline __attribute__((always_inline)) void kmemleak_erase(void **ptr)
11772 static inline __attribute__((always_inline)) void kmemleak_no_scan(const void *ptr)
11782 FREE_REMOVE_PARTIAL,
11783 ALLOC_FROM_PARTIAL,
11790 DEACTIVATE_TO_HEAD,
11791 DEACTIVATE_TO_TAIL,
11792 DEACTIVATE_REMOTE_FREES,
11794 CMPXCHG_DOUBLE_CPU_FAIL,
11795 NR_SLUB_STAT_ITEMS };
11796 struct kmem_cache_cpu {
11802 struct kmem_cache_node {
11803 spinlock_t list_lock;
11804 unsigned long nr_partial;
11805 struct list_head partial;
11806 atomic_long_t nr_slabs;
11807 atomic_long_t total_objects;
11808 struct list_head full;
11810 struct kmem_cache_order_objects {
11813 struct kmem_cache {
11814 struct kmem_cache_cpu *cpu_slab;
11815 unsigned long flags;
11816 unsigned long min_partial;
11820 struct kmem_cache_order_objects oo;
11821 struct kmem_cache_order_objects max;
11822 struct kmem_cache_order_objects min;
11825 void (*ctor)(void *);
11830 struct list_head list;
11831 struct kobject kobj;
11832 struct kmem_cache_node *node[(1 << 0)];
11834 extern struct kmem_cache *kmalloc_caches[(12 + 2)];
11835 static inline __attribute__((always_inline)) __attribute__((always_inline)) int kmalloc_index(size_t size)
11837 if (__builtin_constant_p(((!size))) ? !!((!size)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 158, }; ______r = !!((!size)); ______f.miss_hit[______r]++; ______r; }))
11839 if (__builtin_constant_p(((size <= 8))) ? !!((size <= 8)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 161, }; ______r = !!((size <= 8)); ______f.miss_hit[______r]++; ______r; }))
11840 return ( __builtin_constant_p(8) ? ( (8) < 1 ? ____ilog2_NaN() : (8) & (1ULL << 63) ? 63 : (8) & (1ULL << 62) ? 62 : (8) & (1ULL << 61) ? 61 : (8) & (1ULL << 60) ? 60 : (8) & (1ULL << 59) ? 59 : (8) & (1ULL << 58) ? 58 : (8) & (1ULL << 57) ? 57 : (8) & (1ULL << 56) ? 56 : (8) & (1ULL << 55) ? 55 : (8) & (1ULL << 54) ? 54 : (8) & (1ULL << 53) ? 53 : (8) & (1ULL << 52) ? 52 : (8) & (1ULL << 51) ? 51 : (8) & (1ULL << 50) ? 50 : (8) & (1ULL << 49) ? 49 : (8) & (1ULL << 48) ? 48 : (8) & (1ULL << 47) ? 47 : (8) & (1ULL << 46) ? 46 : (8) & (1ULL << 45) ? 45 : (8) & (1ULL << 44) ? 44 : (8) & (1ULL << 43) ? 43 : (8) & (1ULL << 42) ? 42 : (8) & (1ULL << 41) ? 41 : (8) & (1ULL << 40) ? 40 : (8) & (1ULL << 39) ? 39 : (8) & (1ULL << 38) ? 38 : (8) & (1ULL << 37) ? 37 : (8) & (1ULL << 36) ? 36 : (8) & (1ULL << 35) ? 35 : (8) & (1ULL << 34) ? 34 : (8) & (1ULL << 33) ? 33 : (8) & (1ULL << 32) ? 32 : (8) & (1ULL << 31) ? 31 : (8) & (1ULL << 30) ? 30 : (8) & (1ULL << 29) ? 29 : (8) & (1ULL << 28) ? 28 : (8) & (1ULL << 27) ? 27 : (8) & (1ULL << 26) ? 26 : (8) & (1ULL << 25) ? 25 : (8) & (1ULL << 24) ? 24 : (8) & (1ULL << 23) ? 23 : (8) & (1ULL << 22) ? 22 : (8) & (1ULL << 21) ? 21 : (8) & (1ULL << 20) ? 20 : (8) & (1ULL << 19) ? 19 : (8) & (1ULL << 18) ? 18 : (8) & (1ULL << 17) ? 17 : (8) & (1ULL << 16) ? 16 : (8) & (1ULL << 15) ? 15 : (8) & (1ULL << 14) ? 14 : (8) & (1ULL << 13) ? 13 : (8) & (1ULL << 12) ? 12 : (8) & (1ULL << 11) ? 11 : (8) & (1ULL << 10) ? 10 : (8) & (1ULL << 9) ? 9 : (8) & (1ULL << 8) ? 8 : (8) & (1ULL << 7) ? 7 : (8) & (1ULL << 6) ? 6 : (8) & (1ULL << 5) ? 5 : (8) & (1ULL << 4) ? 4 : (8) & (1ULL << 3) ? 3 : (8) & (1ULL << 2) ? 2 : (8) & (1ULL << 1) ? 1 : (8) & (1ULL << 0) ? 0 : ____ilog2_NaN() ) : (sizeof(8) <= 4) ? __ilog2_u32(8) : __ilog2_u64(8) );
11841 if (__builtin_constant_p(((8 <= 32 && size > 64 && size <= 96))) ? !!((8 <= 32 && size > 64 && size <= 96)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 164, }; ______r = !!((8 <= 32 && size > 64 && size <= 96)); ______f.miss_hit[______r]++; ______r; }))
11843 if (__builtin_constant_p(((8 <= 64 && size > 128 && size <= 192))) ? !!((8 <= 64 && size > 128 && size <= 192)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 166, }; ______r = !!((8 <= 64 && size > 128 && size <= 192)); ______f.miss_hit[______r]++; ______r; }))
11845 if (__builtin_constant_p(((size <= 8))) ? !!((size <= 8)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 168, }; ______r = !!((size <= 8)); ______f.miss_hit[______r]++; ______r; })) return 3;
11846 if (__builtin_constant_p(((size <= 16))) ? !!((size <= 16)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 169, }; ______r = !!((size <= 16)); ______f.miss_hit[______r]++; ______r; })) return 4;
11847 if (__builtin_constant_p(((size <= 32))) ? !!((size <= 32)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 170, }; ______r = !!((size <= 32)); ______f.miss_hit[______r]++; ______r; })) return 5;
11848 if (__builtin_constant_p(((size <= 64))) ? !!((size <= 64)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 171, }; ______r = !!((size <= 64)); ______f.miss_hit[______r]++; ______r; })) return 6;
11849 if (__builtin_constant_p(((size <= 128))) ? !!((size <= 128)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 172, }; ______r = !!((size <= 128)); ______f.miss_hit[______r]++; ______r; })) return 7;
11850 if (__builtin_constant_p(((size <= 256))) ? !!((size <= 256)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 173, }; ______r = !!((size <= 256)); ______f.miss_hit[______r]++; ______r; })) return 8;
11851 if (__builtin_constant_p(((size <= 512))) ? !!((size <= 512)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 174, }; ______r = !!((size <= 512)); ______f.miss_hit[______r]++; ______r; })) return 9;
11852 if (__builtin_constant_p(((size <= 1024))) ? !!((size <= 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 175, }; ______r = !!((size <= 1024)); ______f.miss_hit[______r]++; ______r; })) return 10;
11853 if (__builtin_constant_p(((size <= 2 * 1024))) ? !!((size <= 2 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 176, }; ______r = !!((size <= 2 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 11;
11854 if (__builtin_constant_p(((size <= 4 * 1024))) ? !!((size <= 4 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 177, }; ______r = !!((size <= 4 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 12;
11855 if (__builtin_constant_p(((size <= 8 * 1024))) ? !!((size <= 8 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 183, }; ______r = !!((size <= 8 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 13;
11856 if (__builtin_constant_p(((size <= 16 * 1024))) ? !!((size <= 16 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 184, }; ______r = !!((size <= 16 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 14;
11857 if (__builtin_constant_p(((size <= 32 * 1024))) ? !!((size <= 32 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 185, }; ______r = !!((size <= 32 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 15;
11858 if (__builtin_constant_p(((size <= 64 * 1024))) ? !!((size <= 64 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 186, }; ______r = !!((size <= 64 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 16;
11859 if (__builtin_constant_p(((size <= 128 * 1024))) ? !!((size <= 128 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 187, }; ______r = !!((size <= 128 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 17;
11860 if (__builtin_constant_p(((size <= 256 * 1024))) ? !!((size <= 256 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 188, }; ______r = !!((size <= 256 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 18;
11861 if (__builtin_constant_p(((size <= 512 * 1024))) ? !!((size <= 512 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 189, }; ______r = !!((size <= 512 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 19;
11862 if (__builtin_constant_p(((size <= 1024 * 1024))) ? !!((size <= 1024 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 190, }; ______r = !!((size <= 1024 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 20;
11863 if (__builtin_constant_p(((size <= 2 * 1024 * 1024))) ? !!((size <= 2 * 1024 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 191, }; ______r = !!((size <= 2 * 1024 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 21;
11864 do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/slub_def.h"), "i" (192), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0);
11867 static inline __attribute__((always_inline)) __attribute__((always_inline)) struct kmem_cache *kmalloc_slab(size_t size)
11869 int index = kmalloc_index(size);
11870 if (__builtin_constant_p(((index == 0))) ? !!((index == 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 214, }; ______r = !!((index == 0)); ______f.miss_hit[______r]++; ______r; }))
11871 return ((void *)0);
11872 return kmalloc_caches[index];
11874 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
11875 void *__kmalloc(size_t size, gfp_t flags);
11876 static inline __attribute__((always_inline)) __attribute__((always_inline)) void *
11877 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
11879 void *ret = (void *) __get_free_pages(flags | (( gfp_t)0x4000u), order);
11880 kmemleak_alloc(ret, size, 1, flags);
11884 kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
11885 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
11886 static inline __attribute__((always_inline)) __attribute__((always_inline)) void *kmalloc_large(size_t size, gfp_t flags)
11888 unsigned int order = get_order(size);
11889 return kmalloc_order_trace(size, flags, order);
11891 static inline __attribute__((always_inline)) __attribute__((always_inline)) void *kmalloc(size_t size, gfp_t flags)
11893 if (__builtin_constant_p(((__builtin_constant_p(size)))) ? !!((__builtin_constant_p(size))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 257, }; ______r = !!((__builtin_constant_p(size))); ______f.miss_hit[______r]++; ______r; })) {
11894 if (__builtin_constant_p(((size > (2 * ((1UL) << 12))))) ? !!((size > (2 * ((1UL) << 12)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 258, }; ______r = !!((size > (2 * ((1UL) << 12)))); ______f.miss_hit[______r]++; ______r; }))
11895 return kmalloc_large(size, flags);
11896 if (__builtin_constant_p(((!(flags & (( gfp_t)0x01u))))) ? !!((!(flags & (( gfp_t)0x01u)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 261, }; ______r = !!((!(flags & (( gfp_t)0x01u)))); ______f.miss_hit[______r]++; ______r; })) {
11897 struct kmem_cache *s = kmalloc_slab(size);
11898 if (__builtin_constant_p(((!s))) ? !!((!s)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 264, }; ______r = !!((!s)); ______f.miss_hit[______r]++; ______r; }))
11899 return ((void *)16);
11900 return kmem_cache_alloc_trace(s, flags, size);
11903 return __kmalloc(size, flags);
11905 static inline __attribute__((always_inline)) void *kcalloc(size_t n, size_t size, gfp_t flags)
11907 if (__builtin_constant_p(((size != 0 && n > (~0UL) / size))) ? !!((size != 0 && n > (~0UL) / size)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slab.h", .line = 225, }; ______r = !!((size != 0 && n > (~0UL) / size)); ______f.miss_hit[______r]++; ______r; }))
11908 return ((void *)0);
11909 return __kmalloc(n * size, flags | (( gfp_t)0x8000u));
11911 static inline __attribute__((always_inline)) void *kmalloc_node(size_t size, gfp_t flags, int node)
11913 return kmalloc(size, flags);
11915 static inline __attribute__((always_inline)) void *__kmalloc_node(size_t size, gfp_t flags, int node)
11917 return __kmalloc(size, flags);
11919 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
11920 static inline __attribute__((always_inline)) void *kmem_cache_alloc_node(struct kmem_cache *cachep,
11921 gfp_t flags, int node)
11923 return kmem_cache_alloc(cachep, flags);
11925 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
11926 static inline __attribute__((always_inline)) void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
11928 return kmem_cache_alloc(k, flags | (( gfp_t)0x8000u));
11930 static inline __attribute__((always_inline)) void *kzalloc(size_t size, gfp_t flags)
11932 return kmalloc(size, flags | (( gfp_t)0x8000u));
11934 static inline __attribute__((always_inline)) void *kzalloc_node(size_t size, gfp_t flags, int node)
11936 return kmalloc_node(size, flags | (( gfp_t)0x8000u), node);
11938 void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) kmem_cache_init_late(void);
11939 static inline __attribute__((always_inline)) void pagefault_disable(void)
11941 add_preempt_count(1);
11942 __asm__ __volatile__("": : :"memory");
11944 static inline __attribute__((always_inline)) void pagefault_enable(void)
11946 __asm__ __volatile__("": : :"memory");
11947 sub_preempt_count(1);
11948 __asm__ __volatile__("": : :"memory");
11949 do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/uaccess.h", .line = 38, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/uaccess.h", .line = 38, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/uaccess.h", .line = 38, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/uaccess.h", .line = 38, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0);
11951 extern long probe_kernel_read(void *dst, const void *src, size_t size);
11952 extern long __probe_kernel_read(void *dst, const void *src, size_t size);
11953 extern long __attribute__((no_instrument_function)) probe_kernel_write(void *dst, const void *src, size_t size);
11954 extern long __attribute__((no_instrument_function)) __probe_kernel_write(void *dst, const void *src, size_t size);
11955 struct scatterlist;
11956 struct crypto_ablkcipher;
11957 struct crypto_async_request;
11958 struct crypto_aead;
11959 struct crypto_blkcipher;
11960 struct crypto_hash;
11963 struct crypto_type;
11964 struct aead_givcrypt_request;
11965 struct skcipher_givcrypt_request;
11966 typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
11967 struct crypto_async_request {
11968 struct list_head list;
11969 crypto_completion_t complete;
11971 struct crypto_tfm *tfm;
11974 struct ablkcipher_request {
11975 struct crypto_async_request base;
11976 unsigned int nbytes;
11978 struct scatterlist *src;
11979 struct scatterlist *dst;
11980 void *__ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long))));
11982 struct aead_request {
11983 struct crypto_async_request base;
11984 unsigned int assoclen;
11985 unsigned int cryptlen;
11987 struct scatterlist *assoc;
11988 struct scatterlist *src;
11989 struct scatterlist *dst;
11990 void *__ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long))));
11992 struct blkcipher_desc {
11993 struct crypto_blkcipher *tfm;
11997 struct cipher_desc {
11998 struct crypto_tfm *tfm;
11999 void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
12000 unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
12001 const u8 *src, unsigned int nbytes);
12005 struct crypto_hash *tfm;
12008 struct ablkcipher_alg {
12009 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
12010 unsigned int keylen);
12011 int (*encrypt)(struct ablkcipher_request *req);
12012 int (*decrypt)(struct ablkcipher_request *req);
12013 int (*givencrypt)(struct skcipher_givcrypt_request *req);
12014 int (*givdecrypt)(struct skcipher_givcrypt_request *req);
12016 unsigned int min_keysize;
12017 unsigned int max_keysize;
12018 unsigned int ivsize;
12021 int (*setkey)(struct crypto_aead *tfm, const u8 *key,
12022 unsigned int keylen);
12023 int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
12024 int (*encrypt)(struct aead_request *req);
12025 int (*decrypt)(struct aead_request *req);
12026 int (*givencrypt)(struct aead_givcrypt_request *req);
12027 int (*givdecrypt)(struct aead_givcrypt_request *req);
12029 unsigned int ivsize;
12030 unsigned int maxauthsize;
12032 struct blkcipher_alg {
12033 int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
12034 unsigned int keylen);
12035 int (*encrypt)(struct blkcipher_desc *desc,
12036 struct scatterlist *dst, struct scatterlist *src,
12037 unsigned int nbytes);
12038 int (*decrypt)(struct blkcipher_desc *desc,
12039 struct scatterlist *dst, struct scatterlist *src,
12040 unsigned int nbytes);
12042 unsigned int min_keysize;
12043 unsigned int max_keysize;
12044 unsigned int ivsize;
12046 struct cipher_alg {
12047 unsigned int cia_min_keysize;
12048 unsigned int cia_max_keysize;
12049 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
12050 unsigned int keylen);
12051 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
12052 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
12054 struct compress_alg {
12055 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
12056 unsigned int slen, u8 *dst, unsigned int *dlen);
12057 int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
12058 unsigned int slen, u8 *dst, unsigned int *dlen);
12061 int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
12062 unsigned int dlen);
12063 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
12064 unsigned int seedsize;
12066 struct crypto_alg {
12067 struct list_head cra_list;
12068 struct list_head cra_users;
12070 unsigned int cra_blocksize;
12071 unsigned int cra_ctxsize;
12072 unsigned int cra_alignmask;
12074 atomic_t cra_refcnt;
12076 char cra_driver_name[64];
12077 const struct crypto_type *cra_type;
12079 struct ablkcipher_alg ablkcipher;
12080 struct aead_alg aead;
12081 struct blkcipher_alg blkcipher;
12082 struct cipher_alg cipher;
12083 struct compress_alg compress;
12084 struct rng_alg rng;
12086 int (*cra_init)(struct crypto_tfm *tfm);
12087 void (*cra_exit)(struct crypto_tfm *tfm);
12088 void (*cra_destroy)(struct crypto_alg *alg);
12089 struct module *cra_module;
12091 int crypto_register_alg(struct crypto_alg *alg);
12092 int crypto_unregister_alg(struct crypto_alg *alg);
12093 int crypto_has_alg(const char *name, u32 type, u32 mask);
12094 struct ablkcipher_tfm {
12095 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
12096 unsigned int keylen);
12097 int (*encrypt)(struct ablkcipher_request *req);
12098 int (*decrypt)(struct ablkcipher_request *req);
12099 int (*givencrypt)(struct skcipher_givcrypt_request *req);
12100 int (*givdecrypt)(struct skcipher_givcrypt_request *req);
12101 struct crypto_ablkcipher *base;
12102 unsigned int ivsize;
12103 unsigned int reqsize;
12106 int (*setkey)(struct crypto_aead *tfm, const u8 *key,
12107 unsigned int keylen);
12108 int (*encrypt)(struct aead_request *req);
12109 int (*decrypt)(struct aead_request *req);
12110 int (*givencrypt)(struct aead_givcrypt_request *req);
12111 int (*givdecrypt)(struct aead_givcrypt_request *req);
12112 struct crypto_aead *base;
12113 unsigned int ivsize;
12114 unsigned int authsize;
12115 unsigned int reqsize;
12117 struct blkcipher_tfm {
12119 int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
12120 unsigned int keylen);
12121 int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
12122 struct scatterlist *src, unsigned int nbytes);
12123 int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
12124 struct scatterlist *src, unsigned int nbytes);
12126 struct cipher_tfm {
12127 int (*cit_setkey)(struct crypto_tfm *tfm,
12128 const u8 *key, unsigned int keylen);
12129 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
12130 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
12133 int (*init)(struct hash_desc *desc);
12134 int (*update)(struct hash_desc *desc,
12135 struct scatterlist *sg, unsigned int nsg);
12136 int (*final)(struct hash_desc *desc, u8 *out);
12137 int (*digest)(struct hash_desc *desc, struct scatterlist *sg,
12138 unsigned int nsg, u8 *out);
12139 int (*setkey)(struct crypto_hash *tfm, const u8 *key,
12140 unsigned int keylen);
12141 unsigned int digestsize;
12143 struct compress_tfm {
12144 int (*cot_compress)(struct crypto_tfm *tfm,
12145 const u8 *src, unsigned int slen,
12146 u8 *dst, unsigned int *dlen);
12147 int (*cot_decompress)(struct crypto_tfm *tfm,
12148 const u8 *src, unsigned int slen,
12149 u8 *dst, unsigned int *dlen);
12152 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
12153 unsigned int dlen);
12154 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
12156 struct crypto_tfm {
12159 struct ablkcipher_tfm ablkcipher;
12160 struct aead_tfm aead;
12161 struct blkcipher_tfm blkcipher;
12162 struct cipher_tfm cipher;
12163 struct hash_tfm hash;
12164 struct compress_tfm compress;
12165 struct rng_tfm rng;
12167 void (*exit)(struct crypto_tfm *tfm);
12168 struct crypto_alg *__crt_alg;
12169 void *__crt_ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long))));
12171 struct crypto_ablkcipher {
12172 struct crypto_tfm base;
12174 struct crypto_aead {
12175 struct crypto_tfm base;
12177 struct crypto_blkcipher {
12178 struct crypto_tfm base;
12180 struct crypto_cipher {
12181 struct crypto_tfm base;
12183 struct crypto_comp {
12184 struct crypto_tfm base;
12186 struct crypto_hash {
12187 struct crypto_tfm base;
12189 struct crypto_rng {
12190 struct crypto_tfm base;
12199 struct crypto_attr_alg {
12202 struct crypto_attr_type {
12206 struct crypto_attr_u32 {
12209 struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
12210 void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
12211 static inline __attribute__((always_inline)) void crypto_free_tfm(struct crypto_tfm *tfm)
12213 return crypto_destroy_tfm(tfm, tfm);
12215 int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
12216 static inline __attribute__((always_inline)) const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
12218 return tfm->__crt_alg->cra_name;
12220 static inline __attribute__((always_inline)) const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
12222 return tfm->__crt_alg->cra_driver_name;
12224 static inline __attribute__((always_inline)) int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
12226 return tfm->__crt_alg->cra_priority;
12228 static inline __attribute__((always_inline)) const char *crypto_tfm_alg_modname(struct crypto_tfm *tfm)
12230 return ({ struct module *__mod = (tfm->__crt_alg->cra_module); __mod ? __mod->name : "kernel"; });
12232 static inline __attribute__((always_inline)) u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
12234 return tfm->__crt_alg->cra_flags & 0x0000000f;
12236 static inline __attribute__((always_inline)) unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
12238 return tfm->__crt_alg->cra_blocksize;
12240 static inline __attribute__((always_inline)) unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
12242 return tfm->__crt_alg->cra_alignmask;
12244 static inline __attribute__((always_inline)) u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
12246 return tfm->crt_flags;
12248 static inline __attribute__((always_inline)) void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags)
12250 tfm->crt_flags |= flags;
12252 static inline __attribute__((always_inline)) void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
12254 tfm->crt_flags &= ~flags;
12256 static inline __attribute__((always_inline)) void *crypto_tfm_ctx(struct crypto_tfm *tfm)
12258 return tfm->__crt_ctx;
12260 static inline __attribute__((always_inline)) unsigned int crypto_tfm_ctx_alignment(void)
12262 struct crypto_tfm *tfm;
12263 return __alignof__(tfm->__crt_ctx);
12265 static inline __attribute__((always_inline)) struct crypto_ablkcipher *__crypto_ablkcipher_cast(
12266 struct crypto_tfm *tfm)
12268 return (struct crypto_ablkcipher *)tfm;
12270 static inline __attribute__((always_inline)) u32 crypto_skcipher_type(u32 type)
12272 type &= ~(0x0000000f | 0x00000200);
12273 type |= 0x00000004;
12276 static inline __attribute__((always_inline)) u32 crypto_skcipher_mask(u32 mask)
12278 mask &= ~(0x0000000f | 0x00000200);
12279 mask |= 0x0000000c;
12282 struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
12283 u32 type, u32 mask);
12284 static inline __attribute__((always_inline)) struct crypto_tfm *crypto_ablkcipher_tfm(
12285 struct crypto_ablkcipher *tfm)
12289 static inline __attribute__((always_inline)) void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
12291 crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
12293 static inline __attribute__((always_inline)) int crypto_has_ablkcipher(const char *alg_name, u32 type,
12296 return crypto_has_alg(alg_name, crypto_skcipher_type(type),
12297 crypto_skcipher_mask(mask));
12299 static inline __attribute__((always_inline)) struct ablkcipher_tfm *crypto_ablkcipher_crt(
12300 struct crypto_ablkcipher *tfm)
12302 return &crypto_ablkcipher_tfm(tfm)->crt_u.ablkcipher;
12304 static inline __attribute__((always_inline)) unsigned int crypto_ablkcipher_ivsize(
12305 struct crypto_ablkcipher *tfm)
12307 return crypto_ablkcipher_crt(tfm)->ivsize;
12309 static inline __attribute__((always_inline)) unsigned int crypto_ablkcipher_blocksize(
12310 struct crypto_ablkcipher *tfm)
12312 return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm));
12314 static inline __attribute__((always_inline)) unsigned int crypto_ablkcipher_alignmask(
12315 struct crypto_ablkcipher *tfm)
12317 return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm));
12319 static inline __attribute__((always_inline)) u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm)
12321 return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm));
12323 static inline __attribute__((always_inline)) void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm,
12326 crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags);
12328 static inline __attribute__((always_inline)) void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
12331 crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
12333 static inline __attribute__((always_inline)) int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
12334 const u8 *key, unsigned int keylen)
12336 struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm);
12337 return crt->setkey(crt->base, key, keylen);
12339 static inline __attribute__((always_inline)) struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
12340 struct ablkcipher_request *req)
12342 return __crypto_ablkcipher_cast(req->base.tfm);
12344 static inline __attribute__((always_inline)) int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
12346 struct ablkcipher_tfm *crt =
12347 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
12348 return crt->encrypt(req);
12350 static inline __attribute__((always_inline)) int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
12352 struct ablkcipher_tfm *crt =
12353 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
12354 return crt->decrypt(req);
12356 static inline __attribute__((always_inline)) unsigned int crypto_ablkcipher_reqsize(
12357 struct crypto_ablkcipher *tfm)
12359 return crypto_ablkcipher_crt(tfm)->reqsize;
12361 static inline __attribute__((always_inline)) void ablkcipher_request_set_tfm(
12362 struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
12364 req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base);
12366 static inline __attribute__((always_inline)) struct ablkcipher_request *ablkcipher_request_cast(
12367 struct crypto_async_request *req)
12369 return ({ const typeof( ((struct ablkcipher_request *)0)->base ) *__mptr = (req); (struct ablkcipher_request *)( (char *)__mptr - __builtin_offsetof(struct ablkcipher_request,base) );});
12371 static inline __attribute__((always_inline)) struct ablkcipher_request *ablkcipher_request_alloc(
12372 struct crypto_ablkcipher *tfm, gfp_t gfp)
12374 struct ablkcipher_request *req;
12375 req = kmalloc(sizeof(struct ablkcipher_request) +
12376 crypto_ablkcipher_reqsize(tfm), gfp);
12377 if (__builtin_constant_p((((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 693, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 693, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 693, }; ______r = !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 693, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
12378 ablkcipher_request_set_tfm(req, tfm);
12381 static inline __attribute__((always_inline)) void ablkcipher_request_free(struct ablkcipher_request *req)
12385 static inline __attribute__((always_inline)) void ablkcipher_request_set_callback(
12386 struct ablkcipher_request *req,
12387 u32 flags, crypto_completion_t complete, void *data)
12389 req->base.complete = complete;
12390 req->base.data = data;
12391 req->base.flags = flags;
12393 static inline __attribute__((always_inline)) void ablkcipher_request_set_crypt(
12394 struct ablkcipher_request *req,
12395 struct scatterlist *src, struct scatterlist *dst,
12396 unsigned int nbytes, void *iv)
12400 req->nbytes = nbytes;
12403 static inline __attribute__((always_inline)) struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
12405 return (struct crypto_aead *)tfm;
12407 struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
12408 static inline __attribute__((always_inline)) struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
12412 static inline __attribute__((always_inline)) void crypto_free_aead(struct crypto_aead *tfm)
12414 crypto_free_tfm(crypto_aead_tfm(tfm));
12416 static inline __attribute__((always_inline)) struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm)
12418 return &crypto_aead_tfm(tfm)->crt_u.aead;
12420 static inline __attribute__((always_inline)) unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
12422 return crypto_aead_crt(tfm)->ivsize;
12424 static inline __attribute__((always_inline)) unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
12426 return crypto_aead_crt(tfm)->authsize;
12428 static inline __attribute__((always_inline)) unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
12430 return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
12432 static inline __attribute__((always_inline)) unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
12434 return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
12436 static inline __attribute__((always_inline)) u32 crypto_aead_get_flags(struct crypto_aead *tfm)
12438 return crypto_tfm_get_flags(crypto_aead_tfm(tfm));
12440 static inline __attribute__((always_inline)) void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags)
12442 crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags);
12444 static inline __attribute__((always_inline)) void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
12446 crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
12448 static inline __attribute__((always_inline)) int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
12449 unsigned int keylen)
12451 struct aead_tfm *crt = crypto_aead_crt(tfm);
12452 return crt->setkey(crt->base, key, keylen);
12454 int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
12455 static inline __attribute__((always_inline)) struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
12457 return __crypto_aead_cast(req->base.tfm);
12459 static inline __attribute__((always_inline)) int crypto_aead_encrypt(struct aead_request *req)
12461 return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req);
12463 static inline __attribute__((always_inline)) int crypto_aead_decrypt(struct aead_request *req)
12465 return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
12467 static inline __attribute__((always_inline)) unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
12469 return crypto_aead_crt(tfm)->reqsize;
12471 static inline __attribute__((always_inline)) void aead_request_set_tfm(struct aead_request *req,
12472 struct crypto_aead *tfm)
12474 req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
12476 static inline __attribute__((always_inline)) struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
12479 struct aead_request *req;
12480 req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp);
12481 if (__builtin_constant_p((((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 824, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 824, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 824, }; ______r = !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 824, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
12482 aead_request_set_tfm(req, tfm);
12485 static inline __attribute__((always_inline)) void aead_request_free(struct aead_request *req)
12489 static inline __attribute__((always_inline)) void aead_request_set_callback(struct aead_request *req,
12491 crypto_completion_t complete,
12494 req->base.complete = complete;
12495 req->base.data = data;
12496 req->base.flags = flags;
12498 static inline __attribute__((always_inline)) void aead_request_set_crypt(struct aead_request *req,
12499 struct scatterlist *src,
12500 struct scatterlist *dst,
12501 unsigned int cryptlen, u8 *iv)
12505 req->cryptlen = cryptlen;
12508 static inline __attribute__((always_inline)) void aead_request_set_assoc(struct aead_request *req,
12509 struct scatterlist *assoc,
12510 unsigned int assoclen)
12512 req->assoc = assoc;
12513 req->assoclen = assoclen;
12515 static inline __attribute__((always_inline)) struct crypto_blkcipher *__crypto_blkcipher_cast(
12516 struct crypto_tfm *tfm)
12518 return (struct crypto_blkcipher *)tfm;
12520 static inline __attribute__((always_inline)) struct crypto_blkcipher *crypto_blkcipher_cast(
12521 struct crypto_tfm *tfm)
12523 do { if (__builtin_constant_p((((__builtin_constant_p(crypto_tfm_alg_type(tfm) != 0x00000004) ? !!(crypto_tfm_alg_type(tfm) != 0x00000004) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 873, }; ______r = __builtin_expect(!!(crypto_tfm_alg_type(tfm) != 0x00000004), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(crypto_tfm_alg_type(tfm) != 0x00000004) ? !!(crypto_tfm_alg_type(tfm) != 0x00000004) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 873, }; ______r = __builtin_expect(!!(crypto_tfm_alg_type(tfm) != 0x00000004), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 873, }; ______r = !!(((__builtin_constant_p(crypto_tfm_alg_type(tfm) != 0x00000004) ? !!(crypto_tfm_alg_type(tfm) != 0x00000004) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 873, }; ______r = __builtin_expect(!!(crypto_tfm_alg_type(tfm) != 0x00000004), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/crypto.h"), "i" (873), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
12524 return __crypto_blkcipher_cast(tfm);
12526 static inline __attribute__((always_inline)) struct crypto_blkcipher *crypto_alloc_blkcipher(
12527 const char *alg_name, u32 type, u32 mask)
12529 type &= ~0x0000000f;
12530 type |= 0x00000004;
12531 mask |= 0x0000000f;
12532 return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
12534 static inline __attribute__((always_inline)) struct crypto_tfm *crypto_blkcipher_tfm(
12535 struct crypto_blkcipher *tfm)
12539 static inline __attribute__((always_inline)) void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
12541 crypto_free_tfm(crypto_blkcipher_tfm(tfm));
12543 static inline __attribute__((always_inline)) int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
12545 type &= ~0x0000000f;
12546 type |= 0x00000004;
12547 mask |= 0x0000000f;
12548 return crypto_has_alg(alg_name, type, mask);
12550 static inline __attribute__((always_inline)) const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
12552 return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
12554 static inline __attribute__((always_inline)) struct blkcipher_tfm *crypto_blkcipher_crt(
12555 struct crypto_blkcipher *tfm)
12557 return &crypto_blkcipher_tfm(tfm)->crt_u.blkcipher;
12559 static inline __attribute__((always_inline)) struct blkcipher_alg *crypto_blkcipher_alg(
12560 struct crypto_blkcipher *tfm)
12562 return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_u.blkcipher;
12564 static inline __attribute__((always_inline)) unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
12566 return crypto_blkcipher_alg(tfm)->ivsize;
12568 static inline __attribute__((always_inline)) unsigned int crypto_blkcipher_blocksize(
12569 struct crypto_blkcipher *tfm)
12571 return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
12573 static inline __attribute__((always_inline)) unsigned int crypto_blkcipher_alignmask(
12574 struct crypto_blkcipher *tfm)
12576 return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
12578 static inline __attribute__((always_inline)) u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
12580 return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
12582 static inline __attribute__((always_inline)) void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
12585 crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
12587 static inline __attribute__((always_inline)) void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
12590 crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
12592 static inline __attribute__((always_inline)) int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
12593 const u8 *key, unsigned int keylen)
12595 return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
12598 static inline __attribute__((always_inline)) int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
12599 struct scatterlist *dst,
12600 struct scatterlist *src,
12601 unsigned int nbytes)
12603 desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
12604 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
12606 static inline __attribute__((always_inline)) int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
12607 struct scatterlist *dst,
12608 struct scatterlist *src,
12609 unsigned int nbytes)
12611 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
12613 static inline __attribute__((always_inline)) int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
12614 struct scatterlist *dst,
12615 struct scatterlist *src,
12616 unsigned int nbytes)
12618 desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
12619 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
12621 static inline __attribute__((always_inline)) int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
12622 struct scatterlist *dst,
12623 struct scatterlist *src,
12624 unsigned int nbytes)
12626 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
12628 static inline __attribute__((always_inline)) void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
12629 const u8 *src, unsigned int len)
12631 __builtin_memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
12633 static inline __attribute__((always_inline)) void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
12634 u8 *dst, unsigned int len)
12636 __builtin_memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
12638 static inline __attribute__((always_inline)) struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
12640 return (struct crypto_cipher *)tfm;
12642 static inline __attribute__((always_inline)) struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
12644 do { if (__builtin_constant_p((((__builtin_constant_p(crypto_tfm_alg_type(tfm) != 0x00000001) ? !!(crypto_tfm_alg_type(tfm) != 0x00000001) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1018, }; ______r = __builtin_expect(!!(crypto_tfm_alg_type(tfm) != 0x00000001), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(crypto_tfm_alg_type(tfm) != 0x00000001) ? !!(crypto_tfm_alg_type(tfm) != 0x00000001) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1018, }; ______r = __builtin_expect(!!(crypto_tfm_alg_type(tfm) != 0x00000001), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1018, }; ______r = !!(((__builtin_constant_p(crypto_tfm_alg_type(tfm) != 0x00000001) ? !!(crypto_tfm_alg_type(tfm) != 0x00000001) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1018, }; ______r = __builtin_expect(!!(crypto_tfm_alg_type(tfm) != 0x00000001), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/crypto.h"), "i" (1018), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
12645 return __crypto_cipher_cast(tfm);
12647 static inline __attribute__((always_inline)) struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
12648 u32 type, u32 mask)
12650 type &= ~0x0000000f;
12651 type |= 0x00000001;
12652 mask |= 0x0000000f;
12653 return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
12655 static inline __attribute__((always_inline)) struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
12659 static inline __attribute__((always_inline)) void crypto_free_cipher(struct crypto_cipher *tfm)
12661 crypto_free_tfm(crypto_cipher_tfm(tfm));
12663 static inline __attribute__((always_inline)) int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
12665 type &= ~0x0000000f;
12666 type |= 0x00000001;
12667 mask |= 0x0000000f;
12668 return crypto_has_alg(alg_name, type, mask);
12670 static inline __attribute__((always_inline)) struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
12672 return &crypto_cipher_tfm(tfm)->crt_u.cipher;
12674 static inline __attribute__((always_inline)) unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
12676 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
12678 static inline __attribute__((always_inline)) unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
12680 return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
12682 static inline __attribute__((always_inline)) u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
12684 return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
12686 static inline __attribute__((always_inline)) void crypto_cipher_set_flags(struct crypto_cipher *tfm,
12689 crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
12691 static inline __attribute__((always_inline)) void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
12694 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
12696 static inline __attribute__((always_inline)) int crypto_cipher_setkey(struct crypto_cipher *tfm,
12697 const u8 *key, unsigned int keylen)
12699 return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
12702 static inline __attribute__((always_inline)) void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
12703 u8 *dst, const u8 *src)
12705 crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
12708 static inline __attribute__((always_inline)) void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
12709 u8 *dst, const u8 *src)
12711 crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
12714 static inline __attribute__((always_inline)) struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
12716 return (struct crypto_hash *)tfm;
12718 static inline __attribute__((always_inline)) struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
12720 do { if (__builtin_constant_p((((__builtin_constant_p((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e) ? !!((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1112, }; ______r = __builtin_expect(!!((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e) ? !!((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1112, }; ______r = __builtin_expect(!!((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
12721 "include/linux/crypto.h"
12724 , }; ______r = !!(((__builtin_constant_p((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e) ? !!((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1112, }; ______r = __builtin_expect(!!((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" (
12725 "include/linux/crypto.h"
12728 ), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0)
12730 return __crypto_hash_cast(tfm);
12732 static inline __attribute__((always_inline)) struct crypto_hash *crypto_alloc_hash(const char *alg_name,
12733 u32 type, u32 mask)
12735 type &= ~0x0000000f;
12736 mask &= ~0x0000000f;
12737 type |= 0x00000008;
12738 mask |= 0x0000000e;
12739 return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask));
12741 static inline __attribute__((always_inline)) struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm)
12745 static inline __attribute__((always_inline)) void crypto_free_hash(struct crypto_hash *tfm)
12747 crypto_free_tfm(crypto_hash_tfm(tfm));
12749 static inline __attribute__((always_inline)) int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
12751 type &= ~0x0000000f;
12752 mask &= ~0x0000000f;
12753 type |= 0x00000008;
12754 mask |= 0x0000000e;
12755 return crypto_has_alg(alg_name, type, mask);
12757 static inline __attribute__((always_inline)) struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm)
12759 return &crypto_hash_tfm(tfm)->crt_u.hash;
12761 static inline __attribute__((always_inline)) unsigned int crypto_hash_blocksize(struct crypto_hash *tfm)
12763 return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm));
12765 static inline __attribute__((always_inline)) unsigned int crypto_hash_alignmask(struct crypto_hash *tfm)
12767 return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm));
12769 static inline __attribute__((always_inline)) unsigned int crypto_hash_digestsize(struct crypto_hash *tfm)
12771 return crypto_hash_crt(tfm)->digestsize;
12773 static inline __attribute__((always_inline)) u32 crypto_hash_get_flags(struct crypto_hash *tfm)
12775 return crypto_tfm_get_flags(crypto_hash_tfm(tfm));
12777 static inline __attribute__((always_inline)) void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags)
12779 crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags);
12781 static inline __attribute__((always_inline)) void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags)
12783 crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags);
12785 static inline __attribute__((always_inline)) int crypto_hash_init(struct hash_desc *desc)
12787 return crypto_hash_crt(desc->tfm)->init(desc);
12789 static inline __attribute__((always_inline)) int crypto_hash_update(struct hash_desc *desc,
12790 struct scatterlist *sg,
12791 unsigned int nbytes)
12793 return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes);
12795 static inline __attribute__((always_inline)) int crypto_hash_final(struct hash_desc *desc, u8 *out)
12797 return crypto_hash_crt(desc->tfm)->final(desc, out);
12799 static inline __attribute__((always_inline)) int crypto_hash_digest(struct hash_desc *desc,
12800 struct scatterlist *sg,
12801 unsigned int nbytes, u8 *out)
12803 return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out);
12805 static inline __attribute__((always_inline)) int crypto_hash_setkey(struct crypto_hash *hash,
12806 const u8 *key, unsigned int keylen)
12808 return crypto_hash_crt(hash)->setkey(hash, key, keylen);
12810 static inline __attribute__((always_inline)) struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
12812 return (struct crypto_comp *)tfm;
12814 static inline __attribute__((always_inline)) struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
12816 do { if (__builtin_constant_p((((__builtin_constant_p((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f) ? !!((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1220, }; ______r = __builtin_expect(!!((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f) ? !!((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1220, }; ______r = __builtin_expect(!!((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
12817 "include/linux/crypto.h"
12820 , }; ______r = !!(((__builtin_constant_p((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f) ? !!((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1220, }; ______r = __builtin_expect(!!((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" (
12821 "include/linux/crypto.h"
12824 ), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0)
12826 return __crypto_comp_cast(tfm);
12828 static inline __attribute__((always_inline)) struct crypto_comp *crypto_alloc_comp(const char *alg_name,
12829 u32 type, u32 mask)
12831 type &= ~0x0000000f;
12832 type |= 0x00000002;
12833 mask |= 0x0000000f;
12834 return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask));
12836 static inline __attribute__((always_inline)) struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
12840 static inline __attribute__((always_inline)) void crypto_free_comp(struct crypto_comp *tfm)
12842 crypto_free_tfm(crypto_comp_tfm(tfm));
12844 static inline __attribute__((always_inline)) int crypto_has_comp(const char *alg_name, u32 type, u32 mask)
12846 type &= ~0x0000000f;
12847 type |= 0x00000002;
12848 mask |= 0x0000000f;
12849 return crypto_has_alg(alg_name, type, mask);
12851 static inline __attribute__((always_inline)) const char *crypto_comp_name(struct crypto_comp *tfm)
12853 return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
12855 static inline __attribute__((always_inline)) struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
12857 return &crypto_comp_tfm(tfm)->crt_u.compress;
12859 static inline __attribute__((always_inline)) int crypto_comp_compress(struct crypto_comp *tfm,
12860 const u8 *src, unsigned int slen,
12861 u8 *dst, unsigned int *dlen)
12863 return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
12864 src, slen, dst, dlen);
12866 static inline __attribute__((always_inline)) int crypto_comp_decompress(struct crypto_comp *tfm,
12867 const u8 *src, unsigned int slen,
12868 u8 *dst, unsigned int *dlen)
12870 return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
12871 src, slen, dst, dlen);
12876 struct crypto_type {
12877 unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
12878 unsigned int (*extsize)(struct crypto_alg *alg);
12879 int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
12880 int (*init_tfm)(struct crypto_tfm *tfm);
12881 void (*show)(struct seq_file *m, struct crypto_alg *alg);
12882 struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
12884 unsigned int maskclear;
12885 unsigned int maskset;
12886 unsigned int tfmsize;
12888 struct crypto_instance {
12889 struct crypto_alg alg;
12890 struct crypto_template *tmpl;
12891 struct hlist_node list;
12892 void *__ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long))));
12894 struct crypto_template {
12895 struct list_head list;
12896 struct hlist_head instances;
12897 struct module *module;
12898 struct crypto_instance *(*alloc)(struct rtattr **tb);
12899 void (*free)(struct crypto_instance *inst);
12900 int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
12903 struct crypto_spawn {
12904 struct list_head list;
12905 struct crypto_alg *alg;
12906 struct crypto_instance *inst;
12907 const struct crypto_type *frontend;
12910 struct crypto_queue {
12911 struct list_head list;
12912 struct list_head *backlog;
12914 unsigned int max_qlen;
12916 struct scatter_walk {
12917 struct scatterlist *sg;
12918 unsigned int offset;
12920 struct blkcipher_walk {
12924 unsigned long offset;
12931 struct scatter_walk in;
12932 unsigned int nbytes;
12933 struct scatter_walk out;
12934 unsigned int total;
12939 unsigned int blocksize;
12941 struct ablkcipher_walk {
12944 unsigned int offset;
12946 struct scatter_walk in;
12947 unsigned int nbytes;
12948 struct scatter_walk out;
12949 unsigned int total;
12950 struct list_head buffers;
12954 unsigned int blocksize;
12956 extern const struct crypto_type crypto_ablkcipher_type;
12957 extern const struct crypto_type crypto_aead_type;
12958 extern const struct crypto_type crypto_blkcipher_type;
12959 void crypto_mod_put(struct crypto_alg *alg);
12960 int crypto_register_template(struct crypto_template *tmpl);
12961 void crypto_unregister_template(struct crypto_template *tmpl);
12962 struct crypto_template *crypto_lookup_template(const char *name);
12963 int crypto_register_instance(struct crypto_template *tmpl,
12964 struct crypto_instance *inst);
12965 int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
12966 struct crypto_instance *inst, u32 mask);
12967 int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
12968 struct crypto_instance *inst,
12969 const struct crypto_type *frontend);
12970 void crypto_drop_spawn(struct crypto_spawn *spawn);
12971 struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
12973 void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
12974 static inline __attribute__((always_inline)) void crypto_set_spawn(struct crypto_spawn *spawn,
12975 struct crypto_instance *inst)
12977 spawn->inst = inst;
12979 struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
12980 int crypto_check_attr_type(struct rtattr **tb, u32 type);
12981 const char *crypto_attr_alg_name(struct rtattr *rta);
12982 struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
12983 const struct crypto_type *frontend,
12984 u32 type, u32 mask);
12985 static inline __attribute__((always_inline)) struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
12986 u32 type, u32 mask)
12988 return crypto_attr_alg2(rta, ((void *)0), type, mask);
12990 int crypto_attr_u32(struct rtattr *rta, u32 *num);
12991 void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
12992 unsigned int head);
12993 struct crypto_instance *crypto_alloc_instance(const char *name,
12994 struct crypto_alg *alg);
12995 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
12996 int crypto_enqueue_request(struct crypto_queue *queue,
12997 struct crypto_async_request *request);
12998 void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset);
12999 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
13000 int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
13001 void crypto_inc(u8 *a, unsigned int size);
13002 void crypto_xor(u8 *dst, const u8 *src, unsigned int size);
13003 int blkcipher_walk_done(struct blkcipher_desc *desc,
13004 struct blkcipher_walk *walk, int err);
13005 int blkcipher_walk_virt(struct blkcipher_desc *desc,
13006 struct blkcipher_walk *walk);
13007 int blkcipher_walk_phys(struct blkcipher_desc *desc,
13008 struct blkcipher_walk *walk);
13009 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
13010 struct blkcipher_walk *walk,
13011 unsigned int blocksize);
13012 int ablkcipher_walk_done(struct ablkcipher_request *req,
13013 struct ablkcipher_walk *walk, int err);
13014 int ablkcipher_walk_phys(struct ablkcipher_request *req,
13015 struct ablkcipher_walk *walk);
13016 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
13017 static inline __attribute__((always_inline)) void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
13019 return ((typeof(crypto_tfm_ctx(tfm)))(((((unsigned long)(crypto_tfm_ctx(tfm)))) + ((typeof(((unsigned long)(crypto_tfm_ctx(tfm)))))(((crypto_tfm_alg_alignmask(tfm) + 1))) - 1)) & ~((typeof(((unsigned long)(crypto_tfm_ctx(tfm)))))(((crypto_tfm_alg_alignmask(tfm) + 1))) - 1)))
13022 static inline __attribute__((always_inline)) struct crypto_instance *crypto_tfm_alg_instance(
13023 struct crypto_tfm *tfm)
13025 return ({ const typeof( ((struct crypto_instance *)0)->alg ) *__mptr = (tfm->__crt_alg); (struct crypto_instance *)( (char *)__mptr - __builtin_offsetof(struct crypto_instance,alg) );});
13027 static inline __attribute__((always_inline)) void *crypto_instance_ctx(struct crypto_instance *inst)
13029 return inst->__ctx;
13031 static inline __attribute__((always_inline)) struct ablkcipher_alg *crypto_ablkcipher_alg(
13032 struct crypto_ablkcipher *tfm)
13034 return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_u.ablkcipher;
13036 static inline __attribute__((always_inline)) void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
13038 return crypto_tfm_ctx(&tfm->base);
13040 static inline __attribute__((always_inline)) void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
13042 return crypto_tfm_ctx_aligned(&tfm->base);
13044 static inline __attribute__((always_inline)) struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
13046 return &crypto_aead_tfm(tfm)->__crt_alg->cra_u.aead;
13048 static inline __attribute__((always_inline)) void *crypto_aead_ctx(struct crypto_aead *tfm)
13050 return crypto_tfm_ctx(&tfm->base);
13052 static inline __attribute__((always_inline)) struct crypto_instance *crypto_aead_alg_instance(
13053 struct crypto_aead *aead)
13055 return crypto_tfm_alg_instance(&aead->base);
13057 static inline __attribute__((always_inline)) struct crypto_blkcipher *crypto_spawn_blkcipher(
13058 struct crypto_spawn *spawn)
13060 u32 type = 0x00000004;
13061 u32 mask = 0x0000000f;
13062 return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
13064 static inline __attribute__((always_inline)) void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
13066 return crypto_tfm_ctx(&tfm->base);
13068 static inline __attribute__((always_inline)) void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
13070 return crypto_tfm_ctx_aligned(&tfm->base);
13072 static inline __attribute__((always_inline)) struct crypto_cipher *crypto_spawn_cipher(
13073 struct crypto_spawn *spawn)
13075 u32 type = 0x00000001;
13076 u32 mask = 0x0000000f;
13077 return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
13079 static inline __attribute__((always_inline)) struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
13081 return &crypto_cipher_tfm(tfm)->__crt_alg->cra_u.cipher;
13083 static inline __attribute__((always_inline)) struct crypto_hash *crypto_spawn_hash(struct crypto_spawn *spawn)
13085 u32 type = 0x00000008;
13086 u32 mask = 0x0000000e;
13087 return __crypto_hash_cast(crypto_spawn_tfm(spawn, type, mask));
13089 static inline __attribute__((always_inline)) void *crypto_hash_ctx(struct crypto_hash *tfm)
13091 return crypto_tfm_ctx(&tfm->base);
13093 static inline __attribute__((always_inline)) void *crypto_hash_ctx_aligned(struct crypto_hash *tfm)
13095 return crypto_tfm_ctx_aligned(&tfm->base);
13097 static inline __attribute__((always_inline)) void blkcipher_walk_init(struct blkcipher_walk *walk,
13098 struct scatterlist *dst,
13099 struct scatterlist *src,
13100 unsigned int nbytes)
13103 walk->out.sg = dst;
13104 walk->total = nbytes;
13106 static inline __attribute__((always_inline)) void ablkcipher_walk_init(struct ablkcipher_walk *walk,
13107 struct scatterlist *dst,
13108 struct scatterlist *src,
13109 unsigned int nbytes)
13112 walk->out.sg = dst;
13113 walk->total = nbytes;
13114 INIT_LIST_HEAD(&walk->buffers);
13116 static inline __attribute__((always_inline)) void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
13118 if (__builtin_constant_p((((__builtin_constant_p(!list_empty(&walk->buffers)) ? !!(!list_empty(&walk->buffers)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/algapi.h", .line = 322, }; ______r = __builtin_expect(!!(!list_empty(&walk->buffers)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(!list_empty(&walk->buffers)) ? !!(!list_empty(&walk->buffers)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/algapi.h", .line = 322, }; ______r = __builtin_expect(!!(!list_empty(&walk->buffers)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/algapi.h", .line = 322, }; ______r = !!(((__builtin_constant_p(!list_empty(&walk->buffers)) ? !!(!list_empty(&walk->buffers)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/algapi.h", .line = 322, }; ______r = __builtin_expect(!!(!list_empty(&walk->buffers)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
13119 __ablkcipher_walk_complete(walk);
13121 static inline __attribute__((always_inline)) struct crypto_async_request *crypto_get_backlog(
13122 struct crypto_queue *queue)
13124 return queue->backlog == &queue->list ? ((void *)0) :
13125 ({ const typeof( ((struct crypto_async_request *)0)->list ) *__mptr = (queue->backlog); (struct crypto_async_request *)( (char *)__mptr - __builtin_offsetof(struct crypto_async_request,list) );});
13127 static inline __attribute__((always_inline)) int ablkcipher_enqueue_request(struct crypto_queue *queue,
13128 struct ablkcipher_request *request)
13130 return crypto_enqueue_request(queue, &request->base);
13132 static inline __attribute__((always_inline)) struct ablkcipher_request *ablkcipher_dequeue_request(
13133 struct crypto_queue *queue)
13135 return ablkcipher_request_cast(crypto_dequeue_request(queue));
13137 static inline __attribute__((always_inline)) void *ablkcipher_request_ctx(struct ablkcipher_request *req)
13141 static inline __attribute__((always_inline)) int ablkcipher_tfm_in_queue(struct crypto_queue *queue,
13142 struct crypto_ablkcipher *tfm)
13144 return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm));
13146 static inline __attribute__((always_inline)) void *aead_request_ctx(struct aead_request *req)
13150 static inline __attribute__((always_inline)) void aead_request_complete(struct aead_request *req, int err)
13152 req->base.complete(&req->base, err);
13154 static inline __attribute__((always_inline)) u32 aead_request_flags(struct aead_request *req)
13156 return req->base.flags;
13158 static inline __attribute__((always_inline)) struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
13159 u32 type, u32 mask)
13161 return crypto_attr_alg(tb[1], type, mask);
13163 static inline __attribute__((always_inline)) int crypto_requires_sync(u32 type, u32 mask)
13165 return (type ^ 0x00000080) & mask & 0x00000080;
13167 struct crypto_aes_ctx {
13168 u32 key_enc[((15 * 16) / sizeof(u32))];
13169 u32 key_dec[((15 * 16) / sizeof(u32))];
13172 extern const u32 crypto_ft_tab[4][256];
13173 extern const u32 crypto_fl_tab[4][256];
13174 extern const u32 crypto_it_tab[4][256];
13175 extern const u32 crypto_il_tab[4][256];
13176 int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
13177 unsigned int key_len);
13178 int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
13179 unsigned int key_len);
13180 struct crypto_ahash;
13181 struct hash_alg_common {
13182 unsigned int digestsize;
13183 unsigned int statesize;
13184 struct crypto_alg base;
13186 struct ahash_request {
13187 struct crypto_async_request base;
13188 unsigned int nbytes;
13189 struct scatterlist *src;
13192 void *__ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long))));
13195 int (*init)(struct ahash_request *req);
13196 int (*update)(struct ahash_request *req);
13197 int (*final)(struct ahash_request *req);
13198 int (*finup)(struct ahash_request *req);
13199 int (*digest)(struct ahash_request *req);
13200 int (*export)(struct ahash_request *req, void *out);
13201 int (*import)(struct ahash_request *req, const void *in);
13202 int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
13203 unsigned int keylen);
13204 struct hash_alg_common halg;
13206 struct shash_desc {
13207 struct crypto_shash *tfm;
13209 void *__ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long))));
13212 int (*init)(struct shash_desc *desc);
13213 int (*update)(struct shash_desc *desc, const u8 *data,
13215 int (*final)(struct shash_desc *desc, u8 *out);
13216 int (*finup)(struct shash_desc *desc, const u8 *data,
13217 unsigned int len, u8 *out);
13218 int (*digest)(struct shash_desc *desc, const u8 *data,
13219 unsigned int len, u8 *out);
13220 int (*export)(struct shash_desc *desc, void *out);
13221 int (*import)(struct shash_desc *desc, const void *in);
13222 int (*setkey)(struct crypto_shash *tfm, const u8 *key,
13223 unsigned int keylen);
13224 unsigned int descsize;
13225 unsigned int digestsize
13226 __attribute__ ((aligned(__alignof__(struct hash_alg_common))));
13227 unsigned int statesize;
13228 struct crypto_alg base;
13230 struct crypto_ahash {
13231 int (*init)(struct ahash_request *req);
13232 int (*update)(struct ahash_request *req);
13233 int (*final)(struct ahash_request *req);
13234 int (*finup)(struct ahash_request *req);
13235 int (*digest)(struct ahash_request *req);
13236 int (*export)(struct ahash_request *req, void *out);
13237 int (*import)(struct ahash_request *req, const void *in);
13238 int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
13239 unsigned int keylen);
13240 unsigned int reqsize;
13241 struct crypto_tfm base;
13243 struct crypto_shash {
13244 unsigned int descsize;
13245 struct crypto_tfm base;
13247 static inline __attribute__((always_inline)) struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
13249 return ({ const typeof( ((struct crypto_ahash *)0)->base ) *__mptr = (tfm); (struct crypto_ahash *)( (char *)__mptr - __builtin_offsetof(struct crypto_ahash,base) );});
13251 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
13253 static inline __attribute__((always_inline)) struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
13257 static inline __attribute__((always_inline)) void crypto_free_ahash(struct crypto_ahash *tfm)
13259 crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm));
13261 static inline __attribute__((always_inline)) unsigned int crypto_ahash_alignmask(
13262 struct crypto_ahash *tfm)
13264 return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm));
13266 static inline __attribute__((always_inline)) struct hash_alg_common *__crypto_hash_alg_common(
13267 struct crypto_alg *alg)
13269 return ({ const typeof( ((struct hash_alg_common *)0)->base ) *__mptr = (alg); (struct hash_alg_common *)( (char *)__mptr - __builtin_offsetof(struct hash_alg_common,base) );});
13271 static inline __attribute__((always_inline)) struct hash_alg_common *crypto_hash_alg_common(
13272 struct crypto_ahash *tfm)
13274 return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg);
13276 static inline __attribute__((always_inline)) unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
13278 return crypto_hash_alg_common(tfm)->digestsize;
13280 static inline __attribute__((always_inline)) unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm)
13282 return crypto_hash_alg_common(tfm)->statesize;
13284 static inline __attribute__((always_inline)) u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
13286 return crypto_tfm_get_flags(crypto_ahash_tfm(tfm));
13288 static inline __attribute__((always_inline)) void crypto_ahash_set_flags(struct crypto_ahash *tfm, u32 flags)
13290 crypto_tfm_set_flags(crypto_ahash_tfm(tfm), flags);
13292 static inline __attribute__((always_inline)) void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags)
13294 crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags);
13296 static inline __attribute__((always_inline)) struct crypto_ahash *crypto_ahash_reqtfm(
13297 struct ahash_request *req)
13299 return __crypto_ahash_cast(req->base.tfm);
13301 static inline __attribute__((always_inline)) unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
13303 return tfm->reqsize;
13305 static inline __attribute__((always_inline)) void *ahash_request_ctx(struct ahash_request *req)
13309 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
13310 unsigned int keylen);
13311 int crypto_ahash_finup(struct ahash_request *req);
13312 int crypto_ahash_final(struct ahash_request *req);
13313 int crypto_ahash_digest(struct ahash_request *req);
13314 static inline __attribute__((always_inline)) int crypto_ahash_export(struct ahash_request *req, void *out)
13316 return crypto_ahash_reqtfm(req)->export(req, out);
13318 static inline __attribute__((always_inline)) int crypto_ahash_import(struct ahash_request *req, const void *in)
13320 return crypto_ahash_reqtfm(req)->import(req, in);
13322 static inline __attribute__((always_inline)) int crypto_ahash_init(struct ahash_request *req)
13324 return crypto_ahash_reqtfm(req)->init(req);
13326 static inline __attribute__((always_inline)) int crypto_ahash_update(struct ahash_request *req)
13328 return crypto_ahash_reqtfm(req)->update(req);
13330 static inline __attribute__((always_inline)) void ahash_request_set_tfm(struct ahash_request *req,
13331 struct crypto_ahash *tfm)
13333 req->base.tfm = crypto_ahash_tfm(tfm);
13335 static inline __attribute__((always_inline)) struct ahash_request *ahash_request_alloc(
13336 struct crypto_ahash *tfm, gfp_t gfp)
13338 struct ahash_request *req;
13339 req = kmalloc(sizeof(struct ahash_request) +
13340 crypto_ahash_reqsize(tfm), gfp);
13341 if (__builtin_constant_p((((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/hash.h", .line = 222, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/hash.h", .line = 222, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/hash.h", .line = 222, }; ______r = !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/hash.h", .line = 222, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
13342 ahash_request_set_tfm(req, tfm);
13345 static inline __attribute__((always_inline)) void ahash_request_free(struct ahash_request *req)
13349 static inline __attribute__((always_inline)) struct ahash_request *ahash_request_cast(
13350 struct crypto_async_request *req)
13352 return ({ const typeof( ((struct ahash_request *)0)->base ) *__mptr = (req); (struct ahash_request *)( (char *)__mptr - __builtin_offsetof(struct ahash_request,base) );});
13354 static inline __attribute__((always_inline)) void ahash_request_set_callback(struct ahash_request *req,
13356 crypto_completion_t complete,
13359 req->base.complete = complete;
13360 req->base.data = data;
13361 req->base.flags = flags;
13363 static inline __attribute__((always_inline)) void ahash_request_set_crypt(struct ahash_request *req,
13364 struct scatterlist *src, u8 *result,
13365 unsigned int nbytes)
13368 req->nbytes = nbytes;
13369 req->result = result;
13371 struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
13373 static inline __attribute__((always_inline)) struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
13377 static inline __attribute__((always_inline)) void crypto_free_shash(struct crypto_shash *tfm)
13379 crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
13381 static inline __attribute__((always_inline)) unsigned int crypto_shash_alignmask(
13382 struct crypto_shash *tfm)
13384 return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm));
13386 static inline __attribute__((always_inline)) unsigned int crypto_shash_blocksize(struct crypto_shash *tfm)
13388 return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm));
13390 static inline __attribute__((always_inline)) struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg)
13392 return ({ const typeof( ((struct shash_alg *)0)->base ) *__mptr = (alg); (struct shash_alg *)( (char *)__mptr - __builtin_offsetof(struct shash_alg,base) );});
13394 static inline __attribute__((always_inline)) struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm)
13396 return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg);
13398 static inline __attribute__((always_inline)) unsigned int crypto_shash_digestsize(struct crypto_shash *tfm)
13400 return crypto_shash_alg(tfm)->digestsize;
13402 static inline __attribute__((always_inline)) unsigned int crypto_shash_statesize(struct crypto_shash *tfm)
13404 return crypto_shash_alg(tfm)->statesize;
13406 static inline __attribute__((always_inline)) u32 crypto_shash_get_flags(struct crypto_shash *tfm)
13408 return crypto_tfm_get_flags(crypto_shash_tfm(tfm));
13410 static inline __attribute__((always_inline)) void crypto_shash_set_flags(struct crypto_shash *tfm, u32 flags)
13412 crypto_tfm_set_flags(crypto_shash_tfm(tfm), flags);
13414 static inline __attribute__((always_inline)) void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
13416 crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags);
13418 static inline __attribute__((always_inline)) unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
13420 return tfm->descsize;
13422 static inline __attribute__((always_inline)) void *shash_desc_ctx(struct shash_desc *desc)
13424 return desc->__ctx;
13426 int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
13427 unsigned int keylen);
13428 int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
13429 unsigned int len, u8 *out);
13430 static inline __attribute__((always_inline)) int crypto_shash_export(struct shash_desc *desc, void *out)
13432 return crypto_shash_alg(desc->tfm)->export(desc, out);
13434 static inline __attribute__((always_inline)) int crypto_shash_import(struct shash_desc *desc, const void *in)
13436 return crypto_shash_alg(desc->tfm)->import(desc, in);
13438 static inline __attribute__((always_inline)) int crypto_shash_init(struct shash_desc *desc)
13440 return crypto_shash_alg(desc->tfm)->init(desc);
13442 int crypto_shash_update(struct shash_desc *desc, const u8 *data,
13444 int crypto_shash_final(struct shash_desc *desc, u8 *out);
13445 int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
13446 unsigned int len, u8 *out);
13447 struct cryptd_ablkcipher {
13448 struct crypto_ablkcipher base;
13450 static inline __attribute__((always_inline)) struct cryptd_ablkcipher *__cryptd_ablkcipher_cast(
13451 struct crypto_ablkcipher *tfm)
13453 return (struct cryptd_ablkcipher *)tfm;
13455 struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
13456 u32 type, u32 mask);
13457 struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
13458 void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
13459 struct cryptd_ahash {
13460 struct crypto_ahash base;
13462 static inline __attribute__((always_inline)) struct cryptd_ahash *__cryptd_ahash_cast(
13463 struct crypto_ahash *tfm)
13465 return (struct cryptd_ahash *)tfm;
13467 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
13468 u32 type, u32 mask);
13469 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm);
13470 struct shash_desc *cryptd_shash_desc(struct ahash_request *req);
13471 void cryptd_free_ahash(struct cryptd_ahash *tfm);
13472 struct cryptd_aead {
13473 struct crypto_aead base;
13475 static inline __attribute__((always_inline)) struct cryptd_aead *__cryptd_aead_cast(
13476 struct crypto_aead *tfm)
13478 return (struct cryptd_aead *)tfm;
13480 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
13481 u32 type, u32 mask);
13482 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm);
13483 void cryptd_free_aead(struct cryptd_aead *tfm);
13484 struct sched_param {
13485 int sched_priority;
13487 struct task_struct;
13488 typedef struct __user_cap_header_struct {
13491 } *cap_user_header_t;
13492 typedef struct __user_cap_data_struct {
13496 } *cap_user_data_t;
13497 struct vfs_cap_data {
13501 __le32 inheritable;
13504 extern int file_caps_enabled;
13505 typedef struct kernel_cap_struct {
13508 struct cpu_vfs_cap_data {
13510 kernel_cap_t permitted;
13511 kernel_cap_t inheritable;
13514 struct user_namespace;
13515 struct user_namespace *current_user_ns(void);
13516 extern const kernel_cap_t __cap_empty_set;
13517 extern const kernel_cap_t __cap_full_set;
13518 extern const kernel_cap_t __cap_init_eff_set;
13519 static inline __attribute__((always_inline)) kernel_cap_t cap_combine(const kernel_cap_t a,
13520 const kernel_cap_t b)
13523 do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] | b.cap[__capi]; } } while (0);
13526 static inline __attribute__((always_inline)) kernel_cap_t cap_intersect(const kernel_cap_t a,
13527 const kernel_cap_t b)
13530 do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] & b.cap[__capi]; } } while (0);
13533 static inline __attribute__((always_inline)) kernel_cap_t cap_drop(const kernel_cap_t a,
13534 const kernel_cap_t drop)
13537 do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] &~ drop.cap[__capi]; } } while (0);
13540 static inline __attribute__((always_inline)) kernel_cap_t cap_invert(const kernel_cap_t c)
13543 do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = ~ c.cap[__capi]; } } while (0);
13546 static inline __attribute__((always_inline)) int cap_isclear(const kernel_cap_t a)
13549 for (__capi = 0; __capi < 2; ++__capi) {
13550 if (__builtin_constant_p(((a.cap[__capi] != 0))) ? !!((a.cap[__capi] != 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/capability.h", .line = 486, }; ______r = !!((a.cap[__capi] != 0)); ______f.miss_hit[______r]++; ______r; }))
13555 static inline __attribute__((always_inline)) int cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
13558 dest = cap_drop(a, set);
13559 return cap_isclear(dest);
13561 static inline __attribute__((always_inline)) int cap_is_fs_cap(int cap)
13563 const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } });
13564 return !!((1 << ((cap) & 31)) & __cap_fs_set.cap[((cap) >> 5)]);
13566 static inline __attribute__((always_inline)) kernel_cap_t cap_drop_fs_set(const kernel_cap_t a)
13568 const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } });
13569 return cap_drop(a, __cap_fs_set);
13571 static inline __attribute__((always_inline)) kernel_cap_t cap_raise_fs_set(const kernel_cap_t a,
13572 const kernel_cap_t permitted)
13574 const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } });
13575 return cap_combine(a,
13576 cap_intersect(permitted, __cap_fs_set));
13578 static inline __attribute__((always_inline)) kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
13580 const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((24) & 31)), ((1 << ((32) & 31))) } });
13581 return cap_drop(a, __cap_fs_set);
13583 static inline __attribute__((always_inline)) kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
13584 const kernel_cap_t permitted)
13586 const kernel_cap_t __cap_nfsd_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((24) & 31)), ((1 << ((32) & 31))) } });
13587 return cap_combine(a,
13588 cap_intersect(permitted, __cap_nfsd_set));
13590 extern bool has_capability(struct task_struct *t, int cap);
13591 extern bool has_ns_capability(struct task_struct *t,
13592 struct user_namespace *ns, int cap);
13593 extern bool has_capability_noaudit(struct task_struct *t, int cap);
13594 extern bool capable(int cap);
13595 extern bool ns_capable(struct user_namespace *ns, int cap);
13596 extern bool task_ns_capable(struct task_struct *t, int cap);
13597 extern bool nsown_capable(int cap);
13598 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
13601 unsigned long rb_parent_color;
13602 struct rb_node *rb_right;
13603 struct rb_node *rb_left;
13604 } __attribute__((aligned(sizeof(long))));
13607 struct rb_node *rb_node;
13609 static inline __attribute__((always_inline)) void rb_set_parent(struct rb_node *rb, struct rb_node *p)
13611 rb->rb_parent_color = (rb->rb_parent_color & 3) | (unsigned long)p;
13613 static inline __attribute__((always_inline)) void rb_set_color(struct rb_node *rb, int color)
13615 rb->rb_parent_color = (rb->rb_parent_color & ~1) | color;
13617 static inline __attribute__((always_inline)) void rb_init_node(struct rb_node *rb)
13619 rb->rb_parent_color = 0;
13620 rb->rb_right = ((void *)0);
13621 rb->rb_left = ((void *)0);
13622 (rb_set_parent(rb, rb));
13624 extern void rb_insert_color(struct rb_node *, struct rb_root *);
13625 extern void rb_erase(struct rb_node *, struct rb_root *);
13626 typedef void (*rb_augment_f)(struct rb_node *node, void *data);
13627 extern void rb_augment_insert(struct rb_node *node,
13628 rb_augment_f func, void *data);
13629 extern struct rb_node *rb_augment_erase_begin(struct rb_node *node);
13630 extern void rb_augment_erase_end(struct rb_node *node,
13631 rb_augment_f func, void *data);
13632 extern struct rb_node *rb_next(const struct rb_node *);
13633 extern struct rb_node *rb_prev(const struct rb_node *);
13634 extern struct rb_node *rb_first(const struct rb_root *);
13635 extern struct rb_node *rb_last(const struct rb_root *);
13636 extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
13637 struct rb_root *root);
13638 static inline __attribute__((always_inline)) void rb_link_node(struct rb_node * node, struct rb_node * parent,
13639 struct rb_node ** rb_link)
13641 node->rb_parent_color = (unsigned long )parent;
13642 node->rb_left = node->rb_right = ((void *)0);
13645 struct raw_prio_tree_node {
13646 struct prio_tree_node *left;
13647 struct prio_tree_node *right;
13648 struct prio_tree_node *parent;
13650 struct prio_tree_node {
13651 struct prio_tree_node *left;
13652 struct prio_tree_node *right;
13653 struct prio_tree_node *parent;
13654 unsigned long start;
13655 unsigned long last;
13657 struct prio_tree_root {
13658 struct prio_tree_node *prio_tree_node;
13659 unsigned short index_bits;
13660 unsigned short raw;
13662 struct prio_tree_iter {
13663 struct prio_tree_node *cur;
13664 unsigned long mask;
13665 unsigned long value;
13667 struct prio_tree_root *root;
13668 unsigned long r_index;
13669 unsigned long h_index;
13671 static inline __attribute__((always_inline)) void prio_tree_iter_init(struct prio_tree_iter *iter,
13672 struct prio_tree_root *root, unsigned long r_index, unsigned long h_index)
13675 iter->r_index = r_index;
13676 iter->h_index = h_index;
13677 iter->cur = ((void *)0);
13679 static inline __attribute__((always_inline)) int prio_tree_empty(const struct prio_tree_root *root)
13681 return root->prio_tree_node == ((void *)0);
13683 static inline __attribute__((always_inline)) int prio_tree_root(const struct prio_tree_node *node)
13685 return node->parent == node;
13687 static inline __attribute__((always_inline)) int prio_tree_left_empty(const struct prio_tree_node *node)
13689 return node->left == node;
13691 static inline __attribute__((always_inline)) int prio_tree_right_empty(const struct prio_tree_node *node)
13693 return node->right == node;
13695 struct prio_tree_node *prio_tree_replace(struct prio_tree_root *root,
13696 struct prio_tree_node *old, struct prio_tree_node *node);
13697 struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root,
13698 struct prio_tree_node *node);
13699 void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node);
13700 struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter);
13701 enum page_debug_flags {
13702 PAGE_DEBUG_FLAG_POISON,
13704 struct address_space;
13706 unsigned long flags;
13709 atomic_t _mapcount;
13717 unsigned long private;
13718 struct address_space *mapping;
13720 struct kmem_cache *slab;
13721 struct page *first_page;
13724 unsigned long index;
13727 struct list_head lru;
13729 typedef unsigned long vm_flags_t;
13731 struct rb_node vm_rb;
13732 vm_flags_t vm_flags;
13733 unsigned long vm_start;
13734 unsigned long vm_end;
13735 unsigned long vm_top;
13736 unsigned long vm_pgoff;
13737 struct file *vm_file;
13739 bool vm_icache_flushed : 1;
13741 struct vm_area_struct {
13742 struct mm_struct * vm_mm;
13743 unsigned long vm_start;
13744 unsigned long vm_end;
13745 struct vm_area_struct *vm_next, *vm_prev;
13746 pgprot_t vm_page_prot;
13747 unsigned long vm_flags;
13748 struct rb_node vm_rb;
13751 struct list_head list;
13753 struct vm_area_struct *head;
13755 struct raw_prio_tree_node prio_tree_node;
13757 struct list_head anon_vma_chain;
13758 struct anon_vma *anon_vma;
13759 const struct vm_operations_struct *vm_ops;
13760 unsigned long vm_pgoff;
13761 struct file * vm_file;
13762 void * vm_private_data;
13764 struct core_thread {
13765 struct task_struct *task;
13766 struct core_thread *next;
13768 struct core_state {
13769 atomic_t nr_threads;
13770 struct core_thread dumper;
13771 struct completion startup;
13779 struct mm_rss_stat {
13780 atomic_long_t count[NR_MM_COUNTERS];
13783 struct vm_area_struct * mmap;
13784 struct rb_root mm_rb;
13785 struct vm_area_struct * mmap_cache;
13786 unsigned long (*get_unmapped_area) (struct file *filp,
13787 unsigned long addr, unsigned long len,
13788 unsigned long pgoff, unsigned long flags);
13789 void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
13790 unsigned long mmap_base;
13791 unsigned long task_size;
13792 unsigned long cached_hole_size;
13793 unsigned long free_area_cache;
13798 spinlock_t page_table_lock;
13799 struct rw_semaphore mmap_sem;
13800 struct list_head mmlist;
13801 unsigned long hiwater_rss;
13802 unsigned long hiwater_vm;
13803 unsigned long total_vm, locked_vm, shared_vm, exec_vm;
13804 unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
13805 unsigned long start_code, end_code, start_data, end_data;
13806 unsigned long start_brk, brk, start_stack;
13807 unsigned long arg_start, arg_end, env_start, env_end;
13808 unsigned long saved_auxv[(2*(2 + 19 + 1))];
13809 struct mm_rss_stat rss_stat;
13810 struct linux_binfmt *binfmt;
13811 cpumask_var_t cpu_vm_mask_var;
13812 mm_context_t context;
13813 unsigned int faultstamp;
13814 unsigned int token_priority;
13815 unsigned int last_interval;
13816 atomic_t oom_disable_count;
13817 unsigned long flags;
13818 struct core_state *core_state;
13819 spinlock_t ioctx_lock;
13820 struct hlist_head ioctx_list;
13821 struct file *exe_file;
13822 unsigned long num_exe_file_vmas;
13823 struct mmu_notifier_mm *mmu_notifier_mm;
13824 pgtable_t pmd_huge_pte;
13826 static inline __attribute__((always_inline)) void mm_init_cpumask(struct mm_struct *mm)
13829 static inline __attribute__((always_inline)) cpumask_t *mm_cpumask(struct mm_struct *mm)
13831 return mm->cpu_vm_mask_var;
13833 typedef unsigned long cputime_t;
13834 typedef u64 cputime64_t;
13837 __kernel_key_t key;
13838 __kernel_uid_t uid;
13839 __kernel_gid_t gid;
13840 __kernel_uid_t cuid;
13841 __kernel_gid_t cgid;
13842 __kernel_mode_t mode;
13843 unsigned short seq;
13845 struct ipc64_perm {
13846 __kernel_key_t key;
13847 __kernel_uid32_t uid;
13848 __kernel_gid32_t gid;
13849 __kernel_uid32_t cuid;
13850 __kernel_gid32_t cgid;
13851 __kernel_mode_t mode;
13852 unsigned char __pad1[4 - sizeof(__kernel_mode_t)];
13853 unsigned short seq;
13854 unsigned short __pad2;
13855 unsigned long __unused1;
13856 unsigned long __unused2;
13858 struct ipc_kludge {
13859 struct msgbuf *msgp;
13862 struct kern_ipc_perm
13877 struct ipc_perm sem_perm;
13878 __kernel_time_t sem_otime;
13879 __kernel_time_t sem_ctime;
13880 struct sem *sem_base;
13881 struct sem_queue *sem_pending;
13882 struct sem_queue **sem_pending_last;
13883 struct sem_undo *undo;
13884 unsigned short sem_nsems;
13886 struct semid64_ds {
13887 struct ipc64_perm sem_perm;
13888 __kernel_time_t sem_otime;
13889 unsigned long __unused1;
13890 __kernel_time_t sem_ctime;
13891 unsigned long __unused2;
13892 unsigned long sem_nsems;
13893 unsigned long __unused3;
13894 unsigned long __unused4;
13897 unsigned short sem_num;
13903 struct semid_ds *buf;
13904 unsigned short *array;
13905 struct seminfo *__buf;
13920 struct task_struct;
13924 struct list_head sem_pending;
13927 struct kern_ipc_perm __attribute__((__aligned__((1 << (6)))))
13931 struct sem *sem_base;
13932 struct list_head sem_pending;
13933 struct list_head list_id;
13938 struct list_head simple_list;
13939 struct list_head list;
13940 struct task_struct *sleeper;
13941 struct sem_undo *undo;
13944 struct sembuf *sops;
13949 struct list_head list_proc;
13950 struct rcu_head rcu;
13951 struct sem_undo_list *ulp;
13952 struct list_head list_id;
13956 struct sem_undo_list {
13959 struct list_head list_proc;
13962 struct sem_undo_list *undo_list;
13964 extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
13965 extern void exit_sem(struct task_struct *tsk);
13967 typedef unsigned long old_sigset_t;
13969 unsigned long sig[(64 / 32)];
13971 typedef void __signalfn_t(int);
13972 typedef __signalfn_t *__sighandler_t;
13973 typedef void __restorefn_t(void);
13974 typedef __restorefn_t *__sigrestore_t;
13975 extern void do_notify_resume(struct pt_regs *, void *, __u32);
13976 struct old_sigaction {
13977 __sighandler_t sa_handler;
13978 old_sigset_t sa_mask;
13979 unsigned long sa_flags;
13980 __sigrestore_t sa_restorer;
13983 __sighandler_t sa_handler;
13984 unsigned long sa_flags;
13985 __sigrestore_t sa_restorer;
13988 struct k_sigaction {
13989 struct sigaction sa;
13991 typedef struct sigaltstack {
13996 static inline __attribute__((always_inline)) void __gen_sigaddset(sigset_t *set, int _sig)
13998 asm("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
14000 static inline __attribute__((always_inline)) void __const_sigaddset(sigset_t *set, int _sig)
14002 unsigned long sig = _sig - 1;
14003 set->sig[sig / 32] |= 1 << (sig % 32);
14005 static inline __attribute__((always_inline)) void __gen_sigdelset(sigset_t *set, int _sig)
14007 asm("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
14009 static inline __attribute__((always_inline)) void __const_sigdelset(sigset_t *set, int _sig)
14011 unsigned long sig = _sig - 1;
14012 set->sig[sig / 32] &= ~(1 << (sig % 32));
14014 static inline __attribute__((always_inline)) int __const_sigismember(sigset_t *set, int _sig)
14016 unsigned long sig = _sig - 1;
14017 return 1 & (set->sig[sig / 32] >> (sig % 32));
14019 static inline __attribute__((always_inline)) int __gen_sigismember(sigset_t *set, int _sig)
14022 asm("btl %2,%1\n\tsbbl %0,%0"
14023 : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
14026 static inline __attribute__((always_inline)) int sigfindinword(unsigned long word)
14028 asm("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc");
14032 typedef union sigval {
14036 typedef struct siginfo {
14041 int _pad[((128 - (3 * sizeof(int))) / sizeof(int))];
14043 __kernel_pid_t _pid;
14044 __kernel_uid32_t _uid;
14047 __kernel_timer_t _tid;
14049 char _pad[sizeof( __kernel_uid32_t) - sizeof(int)];
14054 __kernel_pid_t _pid;
14055 __kernel_uid32_t _uid;
14059 __kernel_pid_t _pid;
14060 __kernel_uid32_t _uid;
14062 __kernel_clock_t _utime;
14063 __kernel_clock_t _stime;
14075 typedef struct sigevent {
14076 sigval_t sigev_value;
14080 int _pad[((64 - (sizeof(int) * 2 + sizeof(sigval_t))) / sizeof(int))];
14083 void (*_function)(sigval_t);
14089 void do_schedule_next_timer(struct siginfo *info);
14090 static inline __attribute__((always_inline)) void copy_siginfo(struct siginfo *to, struct siginfo *from)
14092 if (__builtin_constant_p(((from->si_code < 0))) ? !!((from->si_code < 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/siginfo.h", .line = 289, }; ______r = !!((from->si_code < 0)); ______f.miss_hit[______r]++; ______r; }))
14093 __builtin_memcpy(to, from, sizeof(*to));
14095 __builtin_memcpy(to, from, (3 * sizeof(int)) + sizeof(from->_sifields._sigchld));
14097 extern int copy_siginfo_to_user(struct siginfo *to, struct siginfo *from);
14098 struct task_struct;
14099 extern int print_fatal_signals;
14101 struct list_head list;
14104 struct user_struct *user;
14106 struct sigpending {
14107 struct list_head list;
14110 static inline __attribute__((always_inline)) int sigisemptyset(sigset_t *set)
14112 extern void _NSIG_WORDS_is_unsupported_size(void);
14113 switch ((64 / 32)) {
14115 return (set->sig[3] | set->sig[2] |
14116 set->sig[1] | set->sig[0]) == 0;
14118 return (set->sig[1] | set->sig[0]) == 0;
14120 return set->sig[0] == 0;
14122 _NSIG_WORDS_is_unsupported_size();
14126 static inline __attribute__((always_inline)) void sigorsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) | (b3)); r->sig[2] = ((a2) | (b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) | (b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) | (b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } }
14127 static inline __attribute__((always_inline)) void sigandsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & (b3)); r->sig[2] = ((a2) & (b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & (b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & (b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } }
14128 static inline __attribute__((always_inline)) void sigandnsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & ~(b3)); r->sig[2] = ((a2) & ~(b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & ~(b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & ~(b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } }
14129 static inline __attribute__((always_inline)) void signotset(sigset_t *set) { extern void _NSIG_WORDS_is_unsupported_size(void); switch ((64 / 32)) { case 4: set->sig[3] = (~(set->sig[3])); set->sig[2] = (~(set->sig[2])); case 2: set->sig[1] = (~(set->sig[1])); case 1: set->sig[0] = (~(set->sig[0])); break; default: _NSIG_WORDS_is_unsupported_size(); } }
14130 static inline __attribute__((always_inline)) void sigemptyset(sigset_t *set)
14132 switch ((64 / 32)) {
14134 __builtin_memset(set, 0, sizeof(sigset_t));
14136 case 2: set->sig[1] = 0;
14137 case 1: set->sig[0] = 0;
14141 static inline __attribute__((always_inline)) void sigfillset(sigset_t *set)
14143 switch ((64 / 32)) {
14145 __builtin_memset(set, -1, sizeof(sigset_t));
14147 case 2: set->sig[1] = -1;
14148 case 1: set->sig[0] = -1;
14152 static inline __attribute__((always_inline)) void sigaddsetmask(sigset_t *set, unsigned long mask)
14154 set->sig[0] |= mask;
14156 static inline __attribute__((always_inline)) void sigdelsetmask(sigset_t *set, unsigned long mask)
14158 set->sig[0] &= ~mask;
14160 static inline __attribute__((always_inline)) int sigtestsetmask(sigset_t *set, unsigned long mask)
14162 return (set->sig[0] & mask) != 0;
14164 static inline __attribute__((always_inline)) void siginitset(sigset_t *set, unsigned long mask)
14166 set->sig[0] = mask;
14167 switch ((64 / 32)) {
14169 __builtin_memset(&set->sig[1], 0, sizeof(long)*((64 / 32)-1));
14171 case 2: set->sig[1] = 0;
14175 static inline __attribute__((always_inline)) void siginitsetinv(sigset_t *set, unsigned long mask)
14177 set->sig[0] = ~mask;
14178 switch ((64 / 32)) {
14180 __builtin_memset(&set->sig[1], -1, sizeof(long)*((64 / 32)-1));
14182 case 2: set->sig[1] = -1;
14186 static inline __attribute__((always_inline)) void init_sigpending(struct sigpending *sig)
14188 sigemptyset(&sig->signal);
14189 INIT_LIST_HEAD(&sig->list);
14191 extern void flush_sigqueue(struct sigpending *queue);
14192 static inline __attribute__((always_inline)) int valid_signal(unsigned long sig)
14194 return sig <= 64 ? 1 : 0;
14198 extern int next_signal(struct sigpending *pending, sigset_t *mask);
14199 extern int do_send_sig_info(int sig, struct siginfo *info,
14200 struct task_struct *p, bool group);
14201 extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
14202 extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *);
14203 extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
14205 extern long do_sigpending(void *, unsigned long);
14206 extern int do_sigtimedwait(const sigset_t *, siginfo_t *,
14207 const struct timespec *);
14208 extern int sigprocmask(int, sigset_t *, sigset_t *);
14209 extern void set_current_blocked(const sigset_t *);
14210 extern int show_unhandled_signals;
14211 extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie);
14212 extern void exit_signals(struct task_struct *tsk);
14213 extern struct kmem_cache *sighand_cachep;
14214 int unhandled_signal(struct task_struct *tsk, int sig);
14215 void signals_init(void);
14225 struct pid_namespace *ns;
14226 struct hlist_node pid_chain;
14231 unsigned int level;
14232 struct hlist_head tasks[PIDTYPE_MAX];
14233 struct rcu_head rcu;
14234 struct upid numbers[1];
14236 extern struct pid init_struct_pid;
14239 struct hlist_node node;
14242 static inline __attribute__((always_inline)) struct pid *get_pid(struct pid *pid)
14244 if (__builtin_constant_p(((pid))) ? !!((pid)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/pid.h", .line = 77, }; ______r = !!((pid)); ______f.miss_hit[______r]++; ______r; }))
14245 atomic_inc(&pid->count);
14248 extern void put_pid(struct pid *pid);
14249 extern struct task_struct *pid_task(struct pid *pid, enum pid_type);
14250 extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type);
14251 extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
14252 extern void attach_pid(struct task_struct *task, enum pid_type type,
14254 extern void detach_pid(struct task_struct *task, enum pid_type);
14255 extern void change_pid(struct task_struct *task, enum pid_type,
14257 extern void transfer_pid(struct task_struct *old, struct task_struct *new,
14259 struct pid_namespace;
14260 extern struct pid_namespace init_pid_ns;
14261 extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns);
14262 extern struct pid *find_vpid(int nr);
14263 extern struct pid *find_get_pid(int nr);
14264 extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
14265 int next_pidmap(struct pid_namespace *pid_ns, unsigned int last);
14266 extern struct pid *alloc_pid(struct pid_namespace *ns);
14267 extern void free_pid(struct pid *pid);
14268 static inline __attribute__((always_inline)) struct pid_namespace *ns_of_pid(struct pid *pid)
14270 struct pid_namespace *ns = ((void *)0);
14271 if (__builtin_constant_p(((pid))) ? !!((pid)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/pid.h", .line = 138, }; ______r = !!((pid)); ______f.miss_hit[______r]++; ______r; }))
14272 ns = pid->numbers[pid->level].ns;
14275 static inline __attribute__((always_inline)) bool is_child_reaper(struct pid *pid)
14277 return pid->numbers[pid->level].nr == 1;
14279 static inline __attribute__((always_inline)) pid_t pid_nr(struct pid *pid)
14282 if (__builtin_constant_p(((pid))) ? !!((pid)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/pid.h", .line = 168, }; ______r = !!((pid)); ______f.miss_hit[______r]++; ______r; }))
14283 nr = pid->numbers[0].nr;
14286 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns);
14287 pid_t pid_vnr(struct pid *pid);
14288 struct percpu_counter {
14291 struct list_head list;
14294 extern int percpu_counter_batch;
14295 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
14296 struct lock_class_key *key);
14297 void percpu_counter_destroy(struct percpu_counter *fbc);
14298 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
14299 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
14300 s64 __percpu_counter_sum(struct percpu_counter *fbc);
14301 int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs);
14302 static inline __attribute__((always_inline)) void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
14304 __percpu_counter_add(fbc, amount, percpu_counter_batch);
14306 static inline __attribute__((always_inline)) s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
14308 s64 ret = __percpu_counter_sum(fbc);
14309 return ret < 0 ? 0 : ret;
14311 static inline __attribute__((always_inline)) s64 percpu_counter_sum(struct percpu_counter *fbc)
14313 return __percpu_counter_sum(fbc);
14315 static inline __attribute__((always_inline)) s64 percpu_counter_read(struct percpu_counter *fbc)
14319 static inline __attribute__((always_inline)) s64 percpu_counter_read_positive(struct percpu_counter *fbc)
14321 s64 ret = fbc->count;
14322 __asm__ __volatile__("": : :"memory");
14323 if (__builtin_constant_p(((ret >= 0))) ? !!((ret >= 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/percpu_counter.h", .line = 76, }; ______r = !!((ret >= 0)); ______f.miss_hit[______r]++; ______r; }))
14327 static inline __attribute__((always_inline)) int percpu_counter_initialized(struct percpu_counter *fbc)
14329 return (fbc->counters != ((void *)0));
14331 static inline __attribute__((always_inline)) void percpu_counter_inc(struct percpu_counter *fbc)
14333 percpu_counter_add(fbc, 1);
14335 static inline __attribute__((always_inline)) void percpu_counter_dec(struct percpu_counter *fbc)
14337 percpu_counter_add(fbc, -1);
14339 static inline __attribute__((always_inline)) void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
14341 percpu_counter_add(fbc, -amount);
14343 struct prop_global {
14345 struct percpu_counter events;
14347 struct prop_descriptor {
14349 struct prop_global pg[2];
14350 struct mutex mutex;
14352 int prop_descriptor_init(struct prop_descriptor *pd, int shift);
14353 void prop_change_shift(struct prop_descriptor *pd, int new_shift);
14354 struct prop_local_percpu {
14355 struct percpu_counter events;
14357 unsigned long period;
14360 int prop_local_init_percpu(struct prop_local_percpu *pl);
14361 void prop_local_destroy_percpu(struct prop_local_percpu *pl);
14362 void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
14363 void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
14364 long *numerator, long *denominator);
14365 static inline __attribute__((always_inline))
14366 void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
14368 unsigned long flags;
14369 do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0);
14370 __prop_inc_percpu(pd, pl);
14371 do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/proportions.h", .line = 77, }; ______r = !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0);
14373 void __prop_inc_percpu_max(struct prop_descriptor *pd,
14374 struct prop_local_percpu *pl, long frac);
14375 struct prop_local_single {
14376 unsigned long events;
14377 unsigned long period;
14381 int prop_local_init_single(struct prop_local_single *pl);
14382 void prop_local_destroy_single(struct prop_local_single *pl);
14383 void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
14384 void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
14385 long *numerator, long *denominator);
14386 static inline __attribute__((always_inline))
14387 void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
14389 unsigned long flags;
14390 do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0);
14391 __prop_inc_single(pd, pl);
14392 do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/proportions.h", .line = 129, }; ______r = !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0);
14394 typedef struct { int mode; } seccomp_t;
14395 extern void __secure_computing(int);
14396 static inline __attribute__((always_inline)) void secure_computing(int this_syscall)
14398 if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 8)) ? !!(test_ti_thread_flag(current_thread_info(), 8)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seccomp.h", .line = 15, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 8)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 8)) ? !!(test_ti_thread_flag(current_thread_info(), 8)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seccomp.h", .line = 15, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 8)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/seccomp.h", .line = 15, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 8)) ? !!(test_ti_thread_flag(current_thread_info(), 8)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seccomp.h", .line = 15, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 8)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
14399 __secure_computing(this_syscall);
14401 extern long prctl_get_seccomp(void);
14402 extern long prctl_set_seccomp(unsigned long);
14403 static inline __attribute__((always_inline)) void __list_add_rcu(struct list_head *new,
14404 struct list_head *prev, struct list_head *next)
14408 ({ if (__builtin_constant_p(((!__builtin_constant_p((new)) || (((new)) != ((void *)0))))) ? !!((!__builtin_constant_p((new)) || (((new)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 38, }; ______r = !!((!__builtin_constant_p((new)) || (((new)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); (((*((struct list_head **)(&(prev)->next))))) = (typeof(*(new)) *)((new)); });
14411 static inline __attribute__((always_inline)) void list_add_rcu(struct list_head *new, struct list_head *head)
14413 __list_add_rcu(new, head, head->next);
14415 static inline __attribute__((always_inline)) void list_add_tail_rcu(struct list_head *new,
14416 struct list_head *head)
14418 __list_add_rcu(new, head->prev, head);
14420 static inline __attribute__((always_inline)) void list_del_rcu(struct list_head *entry)
14422 __list_del(entry->prev, entry->next);
14423 entry->prev = ((void *) 0x00200200 + (0x0UL));
14425 static inline __attribute__((always_inline)) void hlist_del_init_rcu(struct hlist_node *n)
14427 if (__builtin_constant_p(((!hlist_unhashed(n)))) ? !!((!hlist_unhashed(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 137, }; ______r = !!((!hlist_unhashed(n))); ______f.miss_hit[______r]++; ______r; })) {
14429 n->pprev = ((void *)0);
14432 static inline __attribute__((always_inline)) void list_replace_rcu(struct list_head *old,
14433 struct list_head *new)
14435 new->next = old->next;
14436 new->prev = old->prev;
14437 ({ if (__builtin_constant_p(((!__builtin_constant_p((new)) || (((new)) != ((void *)0))))) ? !!((!__builtin_constant_p((new)) || (((new)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 156, }; ______r = !!((!__builtin_constant_p((new)) || (((new)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); (((*((struct list_head **)(&(new->prev)->next))))) = (typeof(*(new)) *)((new)); });
14438 new->next->prev = new;
14439 old->prev = ((void *) 0x00200200 + (0x0UL));
14441 static inline __attribute__((always_inline)) void list_splice_init_rcu(struct list_head *list,
14442 struct list_head *head,
14443 void (*sync)(void))
14445 struct list_head *first = list->next;
14446 struct list_head *last = list->prev;
14447 struct list_head *at = head->next;
14448 if (__builtin_constant_p(((list_empty(head)))) ? !!((list_empty(head))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 186, }; ______r = !!((list_empty(head))); ______f.miss_hit[______r]++; ______r; }))
14450 INIT_LIST_HEAD(list);
14453 ({ if (__builtin_constant_p(((!__builtin_constant_p((first)) || (((first)) != ((void *)0))))) ? !!((!__builtin_constant_p((first)) || (((first)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 211, }; ______r = !!((!__builtin_constant_p((first)) || (((first)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); (((*((struct list_head **)(&(head)->next))))) = (typeof(*(first)) *)((first)); });
14454 first->prev = head;
14457 static inline __attribute__((always_inline)) void hlist_del_rcu(struct hlist_node *n)
14460 n->pprev = ((void *) 0x00200200 + (0x0UL));
14462 static inline __attribute__((always_inline)) void hlist_replace_rcu(struct hlist_node *old,
14463 struct hlist_node *new)
14465 struct hlist_node *next = old->next;
14467 new->pprev = old->pprev;
14468 ({ if (__builtin_constant_p(((!__builtin_constant_p((new)) || (((new)) != ((void *)0))))) ? !!((!__builtin_constant_p((new)) || (((new)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 329, }; ______r = !!((!__builtin_constant_p((new)) || (((new)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); ((*(struct hlist_node **)new->pprev)) = (typeof(*(new)) *)((new)); });
14469 if (__builtin_constant_p(((next))) ? !!((next)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 330, }; ______r = !!((next)); ______f.miss_hit[______r]++; ______r; }))
14470 new->next->pprev = &new->next;
14471 old->pprev = ((void *) 0x00200200 + (0x0UL));
14473 static inline __attribute__((always_inline)) void hlist_add_head_rcu(struct hlist_node *n,
14474 struct hlist_head *h)
14476 struct hlist_node *first = h->first;
14478 n->pprev = &h->first;
14479 ({ if (__builtin_constant_p(((!__builtin_constant_p((n)) || (((n)) != ((void *)0))))) ? !!((!__builtin_constant_p((n)) || (((n)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 368, }; ______r = !!((!__builtin_constant_p((n)) || (((n)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); (((*((struct hlist_node **)(&(h)->first))))) = (typeof(*(n)) *)((n)); });
14480 if (__builtin_constant_p(((first))) ? !!((first)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 369, }; ______r = !!((first)); ______f.miss_hit[______r]++; ______r; }))
14481 first->pprev = &n->next;
14483 static inline __attribute__((always_inline)) void hlist_add_before_rcu(struct hlist_node *n,
14484 struct hlist_node *next)
14486 n->pprev = next->pprev;
14488 ({ if (__builtin_constant_p(((!__builtin_constant_p((n)) || (((n)) != ((void *)0))))) ? !!((!__builtin_constant_p((n)) || (((n)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 396, }; ______r = !!((!__builtin_constant_p((n)) || (((n)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); (((*((struct hlist_node **)((n)->pprev))))) = (typeof(*(n)) *)((n)); });
14489 next->pprev = &n->next;
14491 static inline __attribute__((always_inline)) void hlist_add_after_rcu(struct hlist_node *prev,
14492 struct hlist_node *n)
14494 n->next = prev->next;
14495 n->pprev = &prev->next;
14496 ({ if (__builtin_constant_p(((!__builtin_constant_p((n)) || (((n)) != ((void *)0))))) ? !!((!__builtin_constant_p((n)) || (((n)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 423, }; ______r = !!((!__builtin_constant_p((n)) || (((n)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); (((*((struct hlist_node **)(&(prev)->next))))) = (typeof(*(n)) *)((n)); });
14497 if (__builtin_constant_p(((n->next))) ? !!((n->next)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 424, }; ______r = !!((n->next)); ______f.miss_hit[______r]++; ______r; }))
14498 n->next->pprev = &n->next;
14500 struct plist_head {
14501 struct list_head node_list;
14502 raw_spinlock_t *rawlock;
14503 spinlock_t *spinlock;
14505 struct plist_node {
14507 struct list_head prio_list;
14508 struct list_head node_list;
14510 static inline __attribute__((always_inline)) void
14511 plist_head_init(struct plist_head *head, spinlock_t *lock)
14513 INIT_LIST_HEAD(&head->node_list);
14514 head->spinlock = lock;
14515 head->rawlock = ((void *)0);
14517 static inline __attribute__((always_inline)) void
14518 plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
14520 INIT_LIST_HEAD(&head->node_list);
14521 head->rawlock = lock;
14522 head->spinlock = ((void *)0);
14524 static inline __attribute__((always_inline)) void plist_node_init(struct plist_node *node, int prio)
14527 INIT_LIST_HEAD(&node->prio_list);
14528 INIT_LIST_HEAD(&node->node_list);
14530 extern void plist_add(struct plist_node *node, struct plist_head *head);
14531 extern void plist_del(struct plist_node *node, struct plist_head *head);
14532 static inline __attribute__((always_inline)) int plist_head_empty(const struct plist_head *head)
14534 return list_empty(&head->node_list);
14536 static inline __attribute__((always_inline)) int plist_node_empty(const struct plist_node *node)
14538 return list_empty(&node->node_list);
14540 static inline __attribute__((always_inline)) struct plist_node *plist_first(const struct plist_head *head)
14542 return ({ const typeof( ((struct plist_node *)0)->node_list ) *__mptr = (head->node_list.next); (struct plist_node *)( (char *)__mptr - __builtin_offsetof(struct plist_node,node_list) );})
14545 static inline __attribute__((always_inline)) struct plist_node *plist_last(const struct plist_head *head)
14547 return ({ const typeof( ((struct plist_node *)0)->node_list ) *__mptr = (head->node_list.prev); (struct plist_node *)( (char *)__mptr - __builtin_offsetof(struct plist_node,node_list) );})
14550 extern int max_lock_depth;
14552 raw_spinlock_t wait_lock;
14553 struct plist_head wait_list;
14554 struct task_struct *owner;
14556 const char *name, *file;
14560 struct rt_mutex_waiter;
14561 struct hrtimer_sleeper;
14562 extern int rt_mutex_debug_check_no_locks_freed(const void *from,
14563 unsigned long len);
14564 extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task);
14565 extern void rt_mutex_debug_task_free(struct task_struct *tsk);
14566 static inline __attribute__((always_inline)) int rt_mutex_is_locked(struct rt_mutex *lock)
14568 return lock->owner != ((void *)0);
14570 extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
14571 extern void rt_mutex_destroy(struct rt_mutex *lock);
14572 extern void rt_mutex_lock(struct rt_mutex *lock);
14573 extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
14574 int detect_deadlock);
14575 extern int rt_mutex_timed_lock(struct rt_mutex *lock,
14576 struct hrtimer_sleeper *timeout,
14577 int detect_deadlock);
14578 extern int rt_mutex_trylock(struct rt_mutex *lock);
14579 extern void rt_mutex_unlock(struct rt_mutex *lock);
14581 struct timeval ru_utime;
14582 struct timeval ru_stime;
14599 unsigned long rlim_cur;
14600 unsigned long rlim_max;
14606 struct task_struct;
14607 int getrusage(struct task_struct *p, int who, struct rusage *ru);
14608 int do_prlimit(struct task_struct *tsk, unsigned int resource,
14609 struct rlimit *new_rlim, struct rlimit *old_rlim);
14610 struct timerqueue_node {
14611 struct rb_node node;
14614 struct timerqueue_head {
14615 struct rb_root head;
14616 struct timerqueue_node *next;
14618 extern void timerqueue_add(struct timerqueue_head *head,
14619 struct timerqueue_node *node);
14620 extern void timerqueue_del(struct timerqueue_head *head,
14621 struct timerqueue_node *node);
14622 extern struct timerqueue_node *timerqueue_iterate_next(
14623 struct timerqueue_node *node);
14624 static inline __attribute__((always_inline))
14625 struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head)
14629 static inline __attribute__((always_inline)) void timerqueue_init(struct timerqueue_node *node)
14631 rb_init_node(&node->node);
14633 static inline __attribute__((always_inline)) void timerqueue_init_head(struct timerqueue_head *head)
14635 head->head = (struct rb_root) { ((void *)0), };
14636 head->next = ((void *)0);
14638 struct hrtimer_clock_base;
14639 struct hrtimer_cpu_base;
14640 enum hrtimer_mode {
14641 HRTIMER_MODE_ABS = 0x0,
14642 HRTIMER_MODE_REL = 0x1,
14643 HRTIMER_MODE_PINNED = 0x02,
14644 HRTIMER_MODE_ABS_PINNED = 0x02,
14645 HRTIMER_MODE_REL_PINNED = 0x03,
14647 enum hrtimer_restart {
14652 struct timerqueue_node node;
14653 ktime_t _softexpires;
14654 enum hrtimer_restart (*function)(struct hrtimer *);
14655 struct hrtimer_clock_base *base;
14656 unsigned long state;
14659 char start_comm[16];
14661 struct hrtimer_sleeper {
14662 struct hrtimer timer;
14663 struct task_struct *task;
14665 struct hrtimer_clock_base {
14666 struct hrtimer_cpu_base *cpu_base;
14669 struct timerqueue_head active;
14670 ktime_t resolution;
14671 ktime_t (*get_time)(void);
14672 ktime_t softirq_time;
14675 enum hrtimer_base_type {
14676 HRTIMER_BASE_MONOTONIC,
14677 HRTIMER_BASE_REALTIME,
14678 HRTIMER_BASE_BOOTTIME,
14679 HRTIMER_MAX_CLOCK_BASES,
14681 struct hrtimer_cpu_base {
14682 raw_spinlock_t lock;
14683 unsigned long active_bases;
14684 ktime_t expires_next;
14687 unsigned long nr_events;
14688 unsigned long nr_retries;
14689 unsigned long nr_hangs;
14690 ktime_t max_hang_time;
14691 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
14693 static inline __attribute__((always_inline)) void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
14695 timer->node.expires = time;
14696 timer->_softexpires = time;
14698 static inline __attribute__((always_inline)) void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta)
14700 timer->_softexpires = time;
14701 timer->node.expires = ktime_add_safe(time, delta);
14703 static inline __attribute__((always_inline)) void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta)
14705 timer->_softexpires = time;
14706 timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
14708 static inline __attribute__((always_inline)) void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
14710 timer->node.expires.tv64 = tv64;
14711 timer->_softexpires.tv64 = tv64;
14713 static inline __attribute__((always_inline)) void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
14715 timer->node.expires = ktime_add_safe(timer->node.expires, time);
14716 timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
14718 static inline __attribute__((always_inline)) void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns)
14720 timer->node.expires = ({ (ktime_t){ .tv64 = (timer->node.expires).tv64 + (ns) }; });
14721 timer->_softexpires = ({ (ktime_t){ .tv64 = (timer->_softexpires).tv64 + (ns) }; });
14723 static inline __attribute__((always_inline)) ktime_t hrtimer_get_expires(const struct hrtimer *timer)
14725 return timer->node.expires;
14727 static inline __attribute__((always_inline)) ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
14729 return timer->_softexpires;
14731 static inline __attribute__((always_inline)) s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
14733 return timer->node.expires.tv64;
14735 static inline __attribute__((always_inline)) s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
14737 return timer->_softexpires.tv64;
14739 static inline __attribute__((always_inline)) s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
14741 return ((timer->node.expires).tv64);
14743 static inline __attribute__((always_inline)) ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
14745 return ({ (ktime_t){ .tv64 = (timer->node.expires).tv64 - (timer->base->get_time()).tv64 }; });
14747 struct clock_event_device;
14748 extern void hrtimer_interrupt(struct clock_event_device *dev);
14749 static inline __attribute__((always_inline)) ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
14751 return timer->base->get_time();
14753 static inline __attribute__((always_inline)) int hrtimer_is_hres_active(struct hrtimer *timer)
14755 return timer->base->cpu_base->hres_active;
14757 extern void hrtimer_peek_ahead_timers(void);
14758 extern void clock_was_set(void);
14759 extern void timerfd_clock_was_set(void);
14760 extern void hrtimers_resume(void);
14761 extern ktime_t ktime_get(void);
14762 extern ktime_t ktime_get_real(void);
14763 extern ktime_t ktime_get_boottime(void);
14764 extern ktime_t ktime_get_monotonic_offset(void);
14765 extern __attribute__((section(".data..percpu" ""))) __typeof__(struct tick_device) tick_cpu_device;
14766 extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
14767 enum hrtimer_mode mode);
14768 static inline __attribute__((always_inline)) void hrtimer_init_on_stack(struct hrtimer *timer,
14769 clockid_t which_clock,
14770 enum hrtimer_mode mode)
14772 hrtimer_init(timer, which_clock, mode);
14774 static inline __attribute__((always_inline)) void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
14775 extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
14776 const enum hrtimer_mode mode);
14777 extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
14778 unsigned long range_ns, const enum hrtimer_mode mode);
14780 __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
14781 unsigned long delta_ns,
14782 const enum hrtimer_mode mode, int wakeup);
14783 extern int hrtimer_cancel(struct hrtimer *timer);
14784 extern int hrtimer_try_to_cancel(struct hrtimer *timer);
14785 static inline __attribute__((always_inline)) int hrtimer_start_expires(struct hrtimer *timer,
14786 enum hrtimer_mode mode)
14788 unsigned long delta;
14789 ktime_t soft, hard;
14790 soft = hrtimer_get_softexpires(timer);
14791 hard = hrtimer_get_expires(timer);
14792 delta = ((({ (ktime_t){ .tv64 = (hard).tv64 - (soft).tv64 }; })).tv64);
14793 return hrtimer_start_range_ns(timer, soft, delta, mode);
14795 static inline __attribute__((always_inline)) int hrtimer_restart(struct hrtimer *timer)
14797 return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
14799 extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
14800 extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
14801 extern ktime_t hrtimer_get_next_event(void);
14802 static inline __attribute__((always_inline)) int hrtimer_active(const struct hrtimer *timer)
14804 return timer->state != 0x00;
14806 static inline __attribute__((always_inline)) int hrtimer_is_queued(struct hrtimer *timer)
14808 return timer->state & 0x01;
14810 static inline __attribute__((always_inline)) int hrtimer_callback_running(struct hrtimer *timer)
14812 return timer->state & 0x02;
14815 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
14816 static inline __attribute__((always_inline)) u64 hrtimer_forward_now(struct hrtimer *timer,
14819 return hrtimer_forward(timer, timer->base->get_time(), interval);
14821 extern long hrtimer_nanosleep(struct timespec *rqtp,
14822 struct timespec *rmtp,
14823 const enum hrtimer_mode mode,
14824 const clockid_t clockid);
14825 extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
14826 extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
14827 struct task_struct *tsk);
14828 extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
14829 const enum hrtimer_mode mode);
14830 extern int schedule_hrtimeout_range_clock(ktime_t *expires,
14831 unsigned long delta, const enum hrtimer_mode mode, int clock);
14832 extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
14833 extern void hrtimer_run_queues(void);
14834 extern void hrtimer_run_pending(void);
14835 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) hrtimers_init(void);
14836 extern u64 ktime_divns(const ktime_t kt, s64 div);
14837 extern void sysrq_timer_list_show(void);
14838 struct task_io_accounting {
14845 u64 cancelled_write_bytes;
14847 struct latency_record {
14848 unsigned long backtrace[12];
14849 unsigned int count;
14850 unsigned long time;
14853 struct task_struct;
14854 extern int latencytop_enabled;
14855 void __account_scheduler_latency(struct task_struct *task, int usecs, int inter);
14856 static inline __attribute__((always_inline)) void
14857 account_scheduler_latency(struct task_struct *task, int usecs, int inter)
14859 if (__builtin_constant_p((((__builtin_constant_p(latencytop_enabled) ? !!(latencytop_enabled) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/latencytop.h", .line = 33, }; ______r = __builtin_expect(!!(latencytop_enabled), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(latencytop_enabled) ? !!(latencytop_enabled) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/latencytop.h", .line = 33, }; ______r = __builtin_expect(!!(latencytop_enabled), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/latencytop.h", .line = 33, }; ______r = !!(((__builtin_constant_p(latencytop_enabled) ? !!(latencytop_enabled) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/latencytop.h", .line = 33, }; ______r = __builtin_expect(!!(latencytop_enabled), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
14860 __account_scheduler_latency(task, usecs, inter);
14862 void clear_all_latency_tracing(struct task_struct *p);
14863 typedef int32_t key_serial_t;
14864 typedef uint32_t key_perm_t;
14866 struct selinux_audit_rule;
14867 struct audit_context;
14868 struct kern_ipc_perm;
14869 static inline __attribute__((always_inline)) bool selinux_is_enabled(void)
14873 struct user_struct;
14876 struct group_info {
14880 gid_t small_block[32];
14883 static inline __attribute__((always_inline)) struct group_info *get_group_info(struct group_info *gi)
14885 atomic_inc(&gi->usage);
14888 extern struct group_info *groups_alloc(int);
14889 extern struct group_info init_groups;
14890 extern void groups_free(struct group_info *);
14891 extern int set_current_groups(struct group_info *);
14892 extern int set_groups(struct cred *, struct group_info *);
14893 extern int groups_search(const struct group_info *, gid_t);
14894 extern int in_group_p(gid_t);
14895 extern int in_egroup_p(gid_t);
14906 unsigned securebits;
14907 kernel_cap_t cap_inheritable;
14908 kernel_cap_t cap_permitted;
14909 kernel_cap_t cap_effective;
14910 kernel_cap_t cap_bset;
14912 struct user_struct *user;
14913 struct user_namespace *user_ns;
14914 struct group_info *group_info;
14915 struct rcu_head rcu;
14917 extern void __put_cred(struct cred *);
14918 extern void exit_creds(struct task_struct *);
14919 extern int copy_creds(struct task_struct *, unsigned long);
14920 extern const struct cred *get_task_cred(struct task_struct *);
14921 extern struct cred *cred_alloc_blank(void);
14922 extern struct cred *prepare_creds(void);
14923 extern struct cred *prepare_exec_creds(void);
14924 extern int commit_creds(struct cred *);
14925 extern void abort_creds(struct cred *);
14926 extern const struct cred *override_creds(const struct cred *);
14927 extern void revert_creds(const struct cred *);
14928 extern struct cred *prepare_kernel_cred(struct task_struct *);
14929 extern int change_create_files_as(struct cred *, struct inode *);
14930 extern int set_security_override(struct cred *, u32);
14931 extern int set_security_override_from_ctx(struct cred *, const char *);
14932 extern int set_create_files_as(struct cred *, struct inode *);
14933 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) cred_init(void);
14934 static inline __attribute__((always_inline)) void validate_creds(const struct cred *cred)
14937 static inline __attribute__((always_inline)) void validate_creds_for_do_exit(struct task_struct *tsk)
14940 static inline __attribute__((always_inline)) void validate_process_creds(void)
14943 static inline __attribute__((always_inline)) struct cred *get_new_cred(struct cred *cred)
14945 atomic_inc(&cred->usage);
14948 static inline __attribute__((always_inline)) const struct cred *get_cred(const struct cred *cred)
14950 struct cred *nonconst_cred = (struct cred *) cred;
14951 validate_creds(cred);
14952 return get_new_cred(nonconst_cred);
14954 static inline __attribute__((always_inline)) void put_cred(const struct cred *_cred)
14956 struct cred *cred = (struct cred *) _cred;
14957 validate_creds(cred);
14958 if (__builtin_constant_p(((atomic_dec_and_test(&(cred)->usage)))) ? !!((atomic_dec_and_test(&(cred)->usage))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/cred.h", .line = 261, }; ______r = !!((atomic_dec_and_test(&(cred)->usage))); ______f.miss_hit[______r]++; ______r; }))
14961 extern struct user_namespace init_user_ns;
14962 struct exec_domain;
14963 struct futex_pi_state;
14964 struct robust_list_head;
14967 struct perf_event_context;
14969 extern unsigned long avenrun[];
14970 extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
14971 extern unsigned long total_forks;
14972 extern int nr_threads;
14973 extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned long) process_counts;
14974 extern int nr_processes(void);
14975 extern unsigned long nr_running(void);
14976 extern unsigned long nr_uninterruptible(void);
14977 extern unsigned long nr_iowait(void);
14978 extern unsigned long nr_iowait_cpu(int cpu);
14979 extern unsigned long this_cpu_load(void);
14980 extern void calc_global_load(unsigned long ticks);
14981 extern unsigned long get_parent_ip(unsigned long addr);
14985 extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
14986 extern void proc_sched_set_task(struct task_struct *p);
14988 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
14989 extern char ___assert_task_state[1 - 2*!!(
14990 sizeof("RSDTtZXxKW")-1 != ( __builtin_constant_p(512) ? ( (512) < 1 ? ____ilog2_NaN() : (512) & (1ULL << 63) ? 63 : (512) & (1ULL << 62) ? 62 : (512) & (1ULL << 61) ? 61 : (512) & (1ULL << 60) ? 60 : (512) & (1ULL << 59) ? 59 : (512) & (1ULL << 58) ? 58 : (512) & (1ULL << 57) ? 57 : (512) & (1ULL << 56) ? 56 : (512) & (1ULL << 55) ? 55 : (512) & (1ULL << 54) ? 54 : (512) & (1ULL << 53) ? 53 : (512) & (1ULL << 52) ? 52 : (512) & (1ULL << 51) ? 51 : (512) & (1ULL << 50) ? 50 : (512) & (1ULL << 49) ? 49 : (512) & (1ULL << 48) ? 48 : (512) & (1ULL << 47) ? 47 : (512) & (1ULL << 46) ? 46 : (512) & (1ULL << 45) ? 45 : (512) & (1ULL << 44) ? 44 : (512) & (1ULL << 43) ? 43 : (512) & (1ULL << 42) ? 42 : (512) & (1ULL << 41) ? 41 : (512) & (1ULL << 40) ? 40 : (512) & (1ULL << 39) ? 39 : (512) & (1ULL << 38) ? 38 : (512) & (1ULL << 37) ? 37 : (512) & (1ULL << 36) ? 36 : (512) & (1ULL << 35) ? 35 : (512) & (1ULL << 34) ? 34 : (512) & (1ULL << 33) ? 33 : (512) & (1ULL << 32) ? 32 : (512) & (1ULL << 31) ? 31 : (512) & (1ULL << 30) ? 30 : (512) & (1ULL << 29) ? 29 : (512) & (1ULL << 28) ? 28 : (512) & (1ULL << 27) ? 27 : (512) & (1ULL << 26) ? 26 : (512) & (1ULL << 25) ? 25 : (512) & (1ULL << 24) ? 24 : (512) & (1ULL << 23) ? 23 : (512) & (1ULL << 22) ? 22 : (512) & (1ULL << 21) ? 21 : (512) & (1ULL << 20) ? 20 : (512) & (1ULL << 19) ? 19 : (512) & (1ULL << 18) ? 18 : (512) & (1ULL << 17) ? 17 : (512) & (1ULL << 16) ? 16 : (512) & (1ULL << 15) ? 15 : (512) & (1ULL << 14) ? 14 : (512) & (1ULL << 13) ? 13 : (512) & (1ULL << 12) ? 12 : (512) & (1ULL << 11) ? 11 : (512) & (1ULL << 10) ? 10 : (512) & (1ULL << 9) ? 9 : (512) & (1ULL << 8) ? 8 : (512) & (1ULL << 7) ? 7 : (512) & (1ULL << 6) ? 6 : (512) & (1ULL << 5) ? 5 : (512) & (1ULL << 4) ? 4 : (512) & (1ULL << 3) ? 3 : (512) & (1ULL << 2) ? 2 : (512) & (1ULL << 1) ? 1 : (512) & (1ULL << 0) ? 0 : ____ilog2_NaN() ) : (sizeof(512) <= 4) ? __ilog2_u32(512) : __ilog2_u64(512) )+1)];
14991 extern rwlock_t tasklist_lock;
14992 extern spinlock_t mmlist_lock;
14993 struct task_struct;
14994 extern void sched_init(void);
14995 extern void sched_init_smp(void);
14996 extern __attribute__((regparm(0))) void schedule_tail(struct task_struct *prev);
14997 extern void init_idle(struct task_struct *idle, int cpu);
14998 extern void init_idle_bootup_task(struct task_struct *idle);
14999 extern int runqueue_is_locked(int cpu);
15000 extern cpumask_var_t nohz_cpu_mask;
15001 static inline __attribute__((always_inline)) void select_nohz_load_balancer(int stop_tick) { }
15002 extern void show_state_filter(unsigned long state_filter);
15003 static inline __attribute__((always_inline)) void show_state(void)
15005 show_state_filter(0);
15007 extern void show_regs(struct pt_regs *);
15008 extern void show_stack(struct task_struct *task, unsigned long *sp);
15009 void io_schedule(void);
15010 long io_schedule_timeout(long timeout);
15011 extern void cpu_init (void);
15012 extern void trap_init(void);
15013 extern void update_process_times(int user);
15014 extern void scheduler_tick(void);
15015 extern void sched_show_task(struct task_struct *p);
15016 extern void touch_softlockup_watchdog(void);
15017 extern void touch_softlockup_watchdog_sync(void);
15018 extern void touch_all_softlockup_watchdogs(void);
15019 extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
15021 size_t *lenp, loff_t *ppos);
15022 extern unsigned int softlockup_panic;
15023 void lockup_detector_init(void);
15024 extern unsigned int sysctl_hung_task_panic;
15025 extern unsigned long sysctl_hung_task_check_count;
15026 extern unsigned long sysctl_hung_task_timeout_secs;
15027 extern unsigned long sysctl_hung_task_warnings;
15028 extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
15030 size_t *lenp, loff_t *ppos);
15031 extern char __sched_text_start[], __sched_text_end[];
15032 extern int in_sched_functions(unsigned long addr);
15033 extern signed long schedule_timeout(signed long timeout);
15034 extern signed long schedule_timeout_interruptible(signed long timeout);
15035 extern signed long schedule_timeout_killable(signed long timeout);
15036 extern signed long schedule_timeout_uninterruptible(signed long timeout);
15037 __attribute__((regparm(0))) void schedule(void);
15038 extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
15040 struct user_namespace;
15041 extern int sysctl_max_map_count;
15042 typedef unsigned long aio_context_t;
15044 IOCB_CMD_PREAD = 0,
15045 IOCB_CMD_PWRITE = 1,
15046 IOCB_CMD_FSYNC = 2,
15047 IOCB_CMD_FDSYNC = 3,
15049 IOCB_CMD_PREADV = 7,
15050 IOCB_CMD_PWRITEV = 8,
15060 __u32 aio_key, aio_reserved1;
15061 __u16 aio_lio_opcode;
15067 __u64 aio_reserved2;
15074 __kernel_size_t iov_len;
15080 static inline __attribute__((always_inline)) size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
15084 for (seg = 0; seg < nr_segs; seg++)
15085 ret += iov[seg].iov_len;
15088 unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
15091 struct list_head ki_run_list;
15092 unsigned long ki_flags;
15095 struct file *ki_filp;
15096 struct kioctx *ki_ctx;
15097 int (*ki_cancel)(struct kiocb *, struct io_event *);
15098 ssize_t (*ki_retry)(struct kiocb *);
15099 void (*ki_dtor)(struct kiocb *);
15102 struct task_struct *tsk;
15104 __u64 ki_user_data;
15107 unsigned short ki_opcode;
15111 struct iovec ki_inline_vec;
15112 struct iovec *ki_iovec;
15113 unsigned long ki_nr_segs;
15114 unsigned long ki_cur_seg;
15115 struct list_head ki_list;
15116 struct eventfd_ctx *ki_eventfd;
15124 unsigned compat_features;
15125 unsigned incompat_features;
15126 unsigned header_length;
15127 struct io_event io_events[0];
15129 struct aio_ring_info {
15130 unsigned long mmap_base;
15131 unsigned long mmap_size;
15132 struct page **ring_pages;
15133 spinlock_t ring_lock;
15136 struct page *internal_pages[8];
15141 struct mm_struct *mm;
15142 unsigned long user_id;
15143 struct hlist_node list;
15144 wait_queue_head_t wait;
15145 spinlock_t ctx_lock;
15147 struct list_head active_reqs;
15148 struct list_head run_list;
15150 struct aio_ring_info ring_info;
15151 struct delayed_work wq;
15152 struct rcu_head rcu_head;
15154 extern unsigned aio_max_size;
15155 extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
15156 extern int aio_put_req(struct kiocb *iocb);
15157 extern void kick_iocb(struct kiocb *iocb);
15158 extern int aio_complete(struct kiocb *iocb, long res, long res2);
15160 extern void exit_aio(struct mm_struct *mm);
15161 extern long do_io_submit(aio_context_t ctx_id, long nr,
15162 struct iocb * *iocbpp, bool compat);
15163 static inline __attribute__((always_inline)) struct kiocb *list_kiocb(struct list_head *h)
15165 return ({ const typeof( ((struct kiocb *)0)->ki_list ) *__mptr = (h); (struct kiocb *)( (char *)__mptr - __builtin_offsetof(struct kiocb,ki_list) );});
15167 extern unsigned long aio_nr;
15168 extern unsigned long aio_max_nr;
15169 extern void arch_pick_mmap_layout(struct mm_struct *mm);
15170 extern unsigned long
15171 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
15172 unsigned long, unsigned long);
15173 extern unsigned long
15174 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
15175 unsigned long len, unsigned long pgoff,
15176 unsigned long flags);
15177 extern void arch_unmap_area(struct mm_struct *, unsigned long);
15178 extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
15179 extern void set_dumpable(struct mm_struct *mm, int value);
15180 extern int get_dumpable(struct mm_struct *mm);
15181 struct sighand_struct {
15183 struct k_sigaction action[64];
15184 spinlock_t siglock;
15185 wait_queue_head_t signalfd_wqh;
15187 struct pacct_struct {
15190 unsigned long ac_mem;
15191 cputime_t ac_utime, ac_stime;
15192 unsigned long ac_minflt, ac_majflt;
15194 struct cpu_itimer {
15200 struct task_cputime {
15203 unsigned long long sum_exec_runtime;
15205 struct thread_group_cputimer {
15206 struct task_cputime cputime;
15211 struct signal_struct {
15215 wait_queue_head_t wait_chldexit;
15216 struct task_struct *curr_target;
15217 struct sigpending shared_pending;
15218 int group_exit_code;
15220 struct task_struct *group_exit_task;
15221 int group_stop_count;
15222 unsigned int flags;
15223 struct list_head posix_timers;
15224 struct hrtimer real_timer;
15225 struct pid *leader_pid;
15226 ktime_t it_real_incr;
15227 struct cpu_itimer it[2];
15228 struct thread_group_cputimer cputimer;
15229 struct task_cputime cputime_expires;
15230 struct list_head cpu_timers[3];
15231 struct pid *tty_old_pgrp;
15233 struct tty_struct *tty;
15234 struct autogroup *autogroup;
15235 cputime_t utime, stime, cutime, cstime;
15238 cputime_t prev_utime, prev_stime;
15239 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
15240 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
15241 unsigned long inblock, oublock, cinblock, coublock;
15242 unsigned long maxrss, cmaxrss;
15243 struct task_io_accounting ioac;
15244 unsigned long long sum_sched_runtime;
15245 struct rlimit rlim[16];
15246 struct pacct_struct pacct;
15247 struct taskstats *stats;
15248 unsigned audit_tty;
15249 struct tty_audit_buf *tty_audit_buf;
15250 struct rw_semaphore threadgroup_fork_lock;
15253 int oom_score_adj_min;
15254 struct mutex cred_guard_mutex;
15256 static inline __attribute__((always_inline)) int signal_group_exit(const struct signal_struct *sig)
15258 return (sig->flags & 0x00000004) ||
15259 (sig->group_exit_task != ((void *)0));
15261 struct user_struct {
15263 atomic_t processes;
15265 atomic_t sigpending;
15266 atomic_t inotify_watches;
15267 atomic_t inotify_devs;
15268 atomic_t fanotify_listeners;
15269 atomic_long_t epoll_watches;
15270 unsigned long mq_bytes;
15271 unsigned long locked_shm;
15272 struct hlist_node uidhash_node;
15274 struct user_namespace *user_ns;
15275 atomic_long_t locked_vm;
15277 extern int uids_sysfs_init(void);
15278 extern struct user_struct *find_user(uid_t);
15279 extern struct user_struct root_user;
15280 struct backing_dev_info;
15281 struct reclaim_state;
15282 struct sched_info {
15283 unsigned long pcount;
15284 unsigned long long run_delay;
15285 unsigned long long last_arrival,
15288 struct task_delay_info {
15290 unsigned int flags;
15291 struct timespec blkio_start, blkio_end;
15296 struct timespec freepages_start, freepages_end;
15297 u64 freepages_delay;
15298 u32 freepages_count;
15300 static inline __attribute__((always_inline)) int sched_info_on(void)
15304 enum cpu_idle_type {
15310 enum powersavings_balance_level {
15311 POWERSAVINGS_BALANCE_NONE = 0,
15312 POWERSAVINGS_BALANCE_BASIC,
15313 POWERSAVINGS_BALANCE_WAKEUP,
15314 MAX_POWERSAVINGS_BALANCE_LEVELS
15316 extern int sched_mc_power_savings, sched_smt_power_savings;
15317 static inline __attribute__((always_inline)) int sd_balance_for_mc_power(void)
15319 if (__builtin_constant_p(((sched_smt_power_savings))) ? !!((sched_smt_power_savings)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 864, }; ______r = !!((sched_smt_power_savings)); ______f.miss_hit[______r]++; ______r; }))
15321 if (__builtin_constant_p(((!sched_mc_power_savings))) ? !!((!sched_mc_power_savings)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 867, }; ______r = !!((!sched_mc_power_savings)); ______f.miss_hit[______r]++; ______r; }))
15325 static inline __attribute__((always_inline)) int sd_balance_for_package_power(void)
15327 if (__builtin_constant_p(((sched_mc_power_savings | sched_smt_power_savings))) ? !!((sched_mc_power_savings | sched_smt_power_savings)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 875, }; ______r = !!((sched_mc_power_savings | sched_smt_power_savings)); ______f.miss_hit[______r]++; ______r; }))
15331 extern int __attribute__((weak)) arch_sd_sibiling_asym_packing(void);
15332 static inline __attribute__((always_inline)) int sd_power_saving_flags(void)
15334 if (__builtin_constant_p(((sched_mc_power_savings | sched_smt_power_savings))) ? !!((sched_mc_power_savings | sched_smt_power_savings)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 891, }; ______r = !!((sched_mc_power_savings | sched_smt_power_savings)); ______f.miss_hit[______r]++; ______r; }))
15338 struct sched_group_power {
15340 unsigned int power, power_orig;
15342 struct sched_group {
15343 struct sched_group *next;
15345 unsigned int group_weight;
15346 struct sched_group_power *sgp;
15347 unsigned long cpumask[0];
15349 static inline __attribute__((always_inline)) struct cpumask *sched_group_cpus(struct sched_group *sg)
15351 return ((struct cpumask *)(1 ? (sg->cpumask) : (void *)sizeof(__check_is_bitmap(sg->cpumask))));
15353 struct sched_domain_attr {
15354 int relax_domain_level;
15356 extern int sched_domain_level_max;
15357 struct sched_domain {
15358 struct sched_domain *parent;
15359 struct sched_domain *child;
15360 struct sched_group *groups;
15361 unsigned long min_interval;
15362 unsigned long max_interval;
15363 unsigned int busy_factor;
15364 unsigned int imbalance_pct;
15365 unsigned int cache_nice_tries;
15366 unsigned int busy_idx;
15367 unsigned int idle_idx;
15368 unsigned int newidle_idx;
15369 unsigned int wake_idx;
15370 unsigned int forkexec_idx;
15371 unsigned int smt_gain;
15374 unsigned long last_balance;
15375 unsigned int balance_interval;
15376 unsigned int nr_balance_failed;
15378 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
15379 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
15380 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
15381 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
15382 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
15383 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
15384 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
15385 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
15386 unsigned int alb_count;
15387 unsigned int alb_failed;
15388 unsigned int alb_pushed;
15389 unsigned int sbe_count;
15390 unsigned int sbe_balanced;
15391 unsigned int sbe_pushed;
15392 unsigned int sbf_count;
15393 unsigned int sbf_balanced;
15394 unsigned int sbf_pushed;
15395 unsigned int ttwu_wake_remote;
15396 unsigned int ttwu_move_affine;
15397 unsigned int ttwu_move_balance;
15401 struct rcu_head rcu;
15403 unsigned int span_weight;
15404 unsigned long span[0];
15406 static inline __attribute__((always_inline)) struct cpumask *sched_domain_span(struct sched_domain *sd)
15408 return ((struct cpumask *)(1 ? (sd->span) : (void *)sizeof(__check_is_bitmap(sd->span))));
15410 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
15411 struct sched_domain_attr *dattr_new);
15412 cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
15413 void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
15414 static inline __attribute__((always_inline)) int test_sd_parent(struct sched_domain *sd, int flag)
15416 if (__builtin_constant_p(((sd->parent && (sd->parent->flags & flag)))) ? !!((sd->parent && (sd->parent->flags & flag))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 1029, }; ______r = !!((sd->parent && (sd->parent->flags & flag))); ______f.miss_hit[______r]++; ______r; }))
15420 unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
15421 unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
15423 static inline __attribute__((always_inline)) void prefetch_stack(struct task_struct *t) { }
15424 struct audit_context;
15426 struct pipe_inode_info;
15427 struct uts_namespace;
15429 struct sched_domain;
15430 struct sched_class {
15431 const struct sched_class *next;
15432 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
15433 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
15434 void (*yield_task) (struct rq *rq);
15435 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
15436 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
15437 struct task_struct * (*pick_next_task) (struct rq *rq);
15438 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
15439 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
15440 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
15441 void (*post_schedule) (struct rq *this_rq);
15442 void (*task_waking) (struct task_struct *task);
15443 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
15444 void (*set_cpus_allowed)(struct task_struct *p,
15445 const struct cpumask *newmask);
15446 void (*rq_online)(struct rq *rq);
15447 void (*rq_offline)(struct rq *rq);
15448 void (*set_curr_task) (struct rq *rq);
15449 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
15450 void (*task_fork) (struct task_struct *p);
15451 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
15452 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
15453 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
15455 unsigned int (*get_rr_interval) (struct rq *rq,
15456 struct task_struct *task);
15457 void (*task_move_group) (struct task_struct *p, int on_rq);
15459 struct load_weight {
15460 unsigned long weight, inv_weight;
15462 struct sched_statistics {
15471 s64 sum_sleep_runtime;
15476 u64 nr_migrations_cold;
15477 u64 nr_failed_migrations_affine;
15478 u64 nr_failed_migrations_running;
15479 u64 nr_failed_migrations_hot;
15480 u64 nr_forced_migrations;
15482 u64 nr_wakeups_sync;
15483 u64 nr_wakeups_migrate;
15484 u64 nr_wakeups_local;
15485 u64 nr_wakeups_remote;
15486 u64 nr_wakeups_affine;
15487 u64 nr_wakeups_affine_attempts;
15488 u64 nr_wakeups_passive;
15489 u64 nr_wakeups_idle;
15491 struct sched_entity {
15492 struct load_weight load;
15493 struct rb_node run_node;
15494 struct list_head group_node;
15495 unsigned int on_rq;
15497 u64 sum_exec_runtime;
15499 u64 prev_sum_exec_runtime;
15501 struct sched_statistics statistics;
15502 struct sched_entity *parent;
15503 struct cfs_rq *cfs_rq;
15504 struct cfs_rq *my_q;
15506 struct sched_rt_entity {
15507 struct list_head run_list;
15508 unsigned long timeout;
15509 unsigned int time_slice;
15510 int nr_cpus_allowed;
15511 struct sched_rt_entity *back;
15514 enum perf_event_task_context {
15515 perf_invalid_context = -1,
15516 perf_hw_context = 0,
15518 perf_nr_task_contexts,
15520 struct task_struct {
15521 volatile long state;
15524 unsigned int flags;
15525 unsigned int ptrace;
15526 struct task_struct *wake_entry;
15529 int prio, static_prio, normal_prio;
15530 unsigned int rt_priority;
15531 const struct sched_class *sched_class;
15532 struct sched_entity se;
15533 struct sched_rt_entity rt;
15534 struct hlist_head preempt_notifiers;
15535 unsigned char fpu_counter;
15536 unsigned int btrace_seq;
15537 unsigned int policy;
15538 cpumask_t cpus_allowed;
15539 int rcu_read_lock_nesting;
15540 char rcu_read_unlock_special;
15542 struct list_head rcu_node_entry;
15543 struct rcu_node *rcu_blocked_node;
15544 struct rt_mutex *rcu_boost_mutex;
15545 struct sched_info sched_info;
15546 struct list_head tasks;
15547 struct plist_node pushable_tasks;
15548 struct mm_struct *mm, *active_mm;
15550 int exit_code, exit_signal;
15552 unsigned int group_stop;
15553 unsigned int personality;
15554 unsigned did_exec:1;
15555 unsigned in_execve:1;
15556 unsigned in_iowait:1;
15557 unsigned sched_reset_on_fork:1;
15558 unsigned sched_contributes_to_load:1;
15561 unsigned long stack_canary;
15562 struct task_struct *real_parent;
15563 struct task_struct *parent;
15564 struct list_head children;
15565 struct list_head sibling;
15566 struct task_struct *group_leader;
15567 struct list_head ptraced;
15568 struct list_head ptrace_entry;
15569 struct pid_link pids[PIDTYPE_MAX];
15570 struct list_head thread_group;
15571 struct completion *vfork_done;
15572 int *set_child_tid;
15573 int *clear_child_tid;
15574 cputime_t utime, stime, utimescaled, stimescaled;
15576 cputime_t prev_utime, prev_stime;
15577 unsigned long nvcsw, nivcsw;
15578 struct timespec start_time;
15579 struct timespec real_start_time;
15580 unsigned long min_flt, maj_flt;
15581 struct task_cputime cputime_expires;
15582 struct list_head cpu_timers[3];
15583 const struct cred *real_cred;
15584 const struct cred *cred;
15585 struct cred *replacement_session_keyring;
15587 int link_count, total_link_count;
15588 struct sysv_sem sysvsem;
15589 unsigned long last_switch_count;
15590 struct thread_struct thread;
15591 struct fs_struct *fs;
15592 struct files_struct *files;
15593 struct nsproxy *nsproxy;
15594 struct signal_struct *signal;
15595 struct sighand_struct *sighand;
15596 sigset_t blocked, real_blocked;
15597 sigset_t saved_sigmask;
15598 struct sigpending pending;
15599 unsigned long sas_ss_sp;
15600 size_t sas_ss_size;
15601 int (*notifier)(void *priv);
15602 void *notifier_data;
15603 sigset_t *notifier_mask;
15604 struct audit_context *audit_context;
15606 unsigned int sessionid;
15608 u32 parent_exec_id;
15610 spinlock_t alloc_lock;
15611 struct irqaction *irqaction;
15612 raw_spinlock_t pi_lock;
15613 struct plist_head pi_waiters;
15614 struct rt_mutex_waiter *pi_blocked_on;
15615 struct mutex_waiter *blocked_on;
15616 unsigned int irq_events;
15617 unsigned long hardirq_enable_ip;
15618 unsigned long hardirq_disable_ip;
15619 unsigned int hardirq_enable_event;
15620 unsigned int hardirq_disable_event;
15621 int hardirqs_enabled;
15622 int hardirq_context;
15623 unsigned long softirq_disable_ip;
15624 unsigned long softirq_enable_ip;
15625 unsigned int softirq_disable_event;
15626 unsigned int softirq_enable_event;
15627 int softirqs_enabled;
15628 int softirq_context;
15629 u64 curr_chain_key;
15631 unsigned int lockdep_recursion;
15632 struct held_lock held_locks[48UL];
15633 gfp_t lockdep_reclaim_gfp;
15634 void *journal_info;
15635 struct bio_list *bio_list;
15636 struct blk_plug *plug;
15637 struct reclaim_state *reclaim_state;
15638 struct backing_dev_info *backing_dev_info;
15639 struct io_context *io_context;
15640 unsigned long ptrace_message;
15641 siginfo_t *last_siginfo;
15642 struct task_io_accounting ioac;
15645 cputime_t acct_timexpd;
15646 struct css_set *cgroups;
15647 struct list_head cg_list;
15648 struct robust_list_head *robust_list;
15649 struct list_head pi_state_list;
15650 struct futex_pi_state *pi_state_cache;
15651 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
15652 struct mutex perf_event_mutex;
15653 struct list_head perf_event_list;
15655 struct rcu_head rcu;
15656 struct pipe_inode_info *splice_pipe;
15657 struct task_delay_info *delays;
15658 struct prop_local_single dirties;
15659 int latency_record_count;
15660 struct latency_record latency_record[32];
15661 unsigned long timer_slack_ns;
15662 unsigned long default_timer_slack_ns;
15663 struct list_head *scm_work_list;
15664 int curr_ret_stack;
15665 struct ftrace_ret_stack *ret_stack;
15666 unsigned long long ftrace_timestamp;
15667 atomic_t trace_overrun;
15668 atomic_t tracing_graph_pause;
15669 unsigned long trace;
15670 unsigned long trace_recursion;
15671 atomic_t ptrace_bp_refcnt;
15673 static inline __attribute__((always_inline)) int rt_prio(int prio)
15675 if (__builtin_constant_p((((__builtin_constant_p(prio < 100) ? !!(prio < 100) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 1599, }; ______r = __builtin_expect(!!(prio < 100), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(prio < 100) ? !!(prio < 100) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 1599, }; ______r = __builtin_expect(!!(prio < 100), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 1599, }; ______r = !!(((__builtin_constant_p(prio < 100) ? !!(prio < 100) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 1599, }; ______r = __builtin_expect(!!(prio < 100), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
15679 static inline __attribute__((always_inline)) int rt_task(struct task_struct *p)
15681 return rt_prio(p->prio);
15683 static inline __attribute__((always_inline)) struct pid *task_pid(struct task_struct *task)
15685 return task->pids[PIDTYPE_PID].pid;
15687 static inline __attribute__((always_inline)) struct pid *task_tgid(struct task_struct *task)
15689 return task->group_leader->pids[PIDTYPE_PID].pid;
15691 static inline __attribute__((always_inline)) struct pid *task_pgrp(struct task_struct *task)
15693 return task->group_leader->pids[PIDTYPE_PGID].pid;
15695 static inline __attribute__((always_inline)) struct pid *task_session(struct task_struct *task)
15697 return task->group_leader->pids[PIDTYPE_SID].pid;
15699 struct pid_namespace;
15700 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
15701 struct pid_namespace *ns);
15702 static inline __attribute__((always_inline)) pid_t task_pid_nr(struct task_struct *tsk)
15706 static inline __attribute__((always_inline)) pid_t task_pid_nr_ns(struct task_struct *tsk,
15707 struct pid_namespace *ns)
15709 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
15711 static inline __attribute__((always_inline)) pid_t task_pid_vnr(struct task_struct *tsk)
15713 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ((void *)0));
15715 static inline __attribute__((always_inline)) pid_t task_tgid_nr(struct task_struct *tsk)
15719 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
15720 static inline __attribute__((always_inline)) pid_t task_tgid_vnr(struct task_struct *tsk)
15722 return pid_vnr(task_tgid(tsk));
15724 static inline __attribute__((always_inline)) pid_t task_pgrp_nr_ns(struct task_struct *tsk,
15725 struct pid_namespace *ns)
15727 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
15729 static inline __attribute__((always_inline)) pid_t task_pgrp_vnr(struct task_struct *tsk)
15731 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ((void *)0));
15733 static inline __attribute__((always_inline)) pid_t task_session_nr_ns(struct task_struct *tsk,
15734 struct pid_namespace *ns)
15736 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
15738 static inline __attribute__((always_inline)) pid_t task_session_vnr(struct task_struct *tsk)
15740 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ((void *)0));
15742 static inline __attribute__((always_inline)) pid_t task_pgrp_nr(struct task_struct *tsk)
15744 return task_pgrp_nr_ns(tsk, &init_pid_ns);
15746 static inline __attribute__((always_inline)) int pid_alive(struct task_struct *p)
15748 return p->pids[PIDTYPE_PID].pid != ((void *)0);
15750 static inline __attribute__((always_inline)) int is_global_init(struct task_struct *tsk)
15752 return tsk->pid == 1;
15754 extern int is_container_init(struct task_struct *tsk);
15755 extern struct pid *cad_pid;
15756 extern void free_task(struct task_struct *tsk);
15757 extern void __put_task_struct(struct task_struct *t);
15758 static inline __attribute__((always_inline)) void put_task_struct(struct task_struct *t)
15760 if (__builtin_constant_p(((atomic_dec_and_test(&t->usage)))) ? !!((atomic_dec_and_test(&t->usage))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 1750, }; ______r = !!((atomic_dec_and_test(&t->usage))); ______f.miss_hit[______r]++; ______r; }))
15761 __put_task_struct(t);
15763 extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
15764 extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
15765 extern void task_clear_group_stop_pending(struct task_struct *task);
15766 static inline __attribute__((always_inline)) void rcu_copy_process(struct task_struct *p)
15768 p->rcu_read_lock_nesting = 0;
15769 p->rcu_read_unlock_special = 0;
15770 p->rcu_blocked_node = ((void *)0);
15771 p->rcu_boost_mutex = ((void *)0);
15772 INIT_LIST_HEAD(&p->rcu_node_entry);
15774 extern void do_set_cpus_allowed(struct task_struct *p,
15775 const struct cpumask *new_mask);
15776 extern int set_cpus_allowed_ptr(struct task_struct *p,
15777 const struct cpumask *new_mask);
15778 static inline __attribute__((always_inline)) int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
15780 return set_cpus_allowed_ptr(p, &new_mask);
15782 extern unsigned long long __attribute__((no_instrument_function)) sched_clock(void);
15783 extern u64 cpu_clock(int cpu);
15784 extern u64 local_clock(void);
15785 extern u64 sched_clock_cpu(int cpu);
15786 extern void sched_clock_init(void);
15787 extern int sched_clock_stable;
15788 extern void sched_clock_tick(void);
15789 extern void sched_clock_idle_sleep_event(void);
15790 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
15791 extern void enable_sched_clock_irqtime(void);
15792 extern void disable_sched_clock_irqtime(void);
15793 extern unsigned long long
15794 task_sched_runtime(struct task_struct *task);
15795 extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
15796 extern void sched_exec(void);
15797 extern void sched_clock_idle_sleep_event(void);
15798 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
15799 extern void idle_task_exit(void);
15800 static inline __attribute__((always_inline)) void wake_up_idle_cpu(int cpu) { }
15801 extern unsigned int sysctl_sched_latency;
15802 extern unsigned int sysctl_sched_min_granularity;
15803 extern unsigned int sysctl_sched_wakeup_granularity;
15804 extern unsigned int sysctl_sched_child_runs_first;
15805 enum sched_tunable_scaling {
15806 SCHED_TUNABLESCALING_NONE,
15807 SCHED_TUNABLESCALING_LOG,
15808 SCHED_TUNABLESCALING_LINEAR,
15809 SCHED_TUNABLESCALING_END,
15811 extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
15812 extern unsigned int sysctl_sched_migration_cost;
15813 extern unsigned int sysctl_sched_nr_migrate;
15814 extern unsigned int sysctl_sched_time_avg;
15815 extern unsigned int sysctl_timer_migration;
15816 extern unsigned int sysctl_sched_shares_window;
15817 int sched_proc_update_handler(struct ctl_table *table, int write,
15818 void *buffer, size_t *length,
15820 static inline __attribute__((always_inline)) unsigned int get_sysctl_timer_migration(void)
15822 return sysctl_timer_migration;
15824 extern unsigned int sysctl_sched_rt_period;
15825 extern int sysctl_sched_rt_runtime;
15826 int sched_rt_handler(struct ctl_table *table, int write,
15827 void *buffer, size_t *lenp,
15829 extern unsigned int sysctl_sched_autogroup_enabled;
15830 extern void sched_autogroup_create_attach(struct task_struct *p);
15831 extern void sched_autogroup_detach(struct task_struct *p);
15832 extern void sched_autogroup_fork(struct signal_struct *sig);
15833 extern void sched_autogroup_exit(struct signal_struct *sig);
15834 extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
15835 extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice);
15836 extern int rt_mutex_getprio(struct task_struct *p);
15837 extern void rt_mutex_setprio(struct task_struct *p, int prio);
15838 extern void rt_mutex_adjust_pi(struct task_struct *p);
15839 extern bool yield_to(struct task_struct *p, bool preempt);
15840 extern void set_user_nice(struct task_struct *p, long nice);
15841 extern int task_prio(const struct task_struct *p);
15842 extern int task_nice(const struct task_struct *p);
15843 extern int can_nice(const struct task_struct *p, const int nice);
15844 extern int task_curr(const struct task_struct *p);
15845 extern int idle_cpu(int cpu);
15846 extern int sched_setscheduler(struct task_struct *, int,
15847 const struct sched_param *);
15848 extern int sched_setscheduler_nocheck(struct task_struct *, int,
15849 const struct sched_param *);
15850 extern struct task_struct *idle_task(int cpu);
15851 extern struct task_struct *curr_task(int cpu);
15852 extern void set_curr_task(int cpu, struct task_struct *p);
15854 extern struct exec_domain default_exec_domain;
15855 union thread_union {
15856 struct thread_info thread_info;
15857 unsigned long stack[(((1UL) << 12) << 1)/sizeof(long)];
15859 static inline __attribute__((always_inline)) int kstack_end(void *addr)
15861 return !(((unsigned long)addr+sizeof(void*)-1) & ((((1UL) << 12) << 1)-sizeof(void*)));
15863 extern union thread_union init_thread_union;
15864 extern struct task_struct init_task;
15865 extern struct mm_struct init_mm;
15866 extern struct pid_namespace init_pid_ns;
15867 extern struct task_struct *find_task_by_vpid(pid_t nr);
15868 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
15869 struct pid_namespace *ns);
15870 extern void __set_special_pids(struct pid *pid);
15871 extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
15872 static inline __attribute__((always_inline)) struct user_struct *get_uid(struct user_struct *u)
15874 atomic_inc(&u->__count);
15877 extern void free_uid(struct user_struct *);
15878 extern void release_uids(struct user_namespace *ns);
15879 extern void xtime_update(unsigned long ticks);
15880 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
15881 extern int wake_up_process(struct task_struct *tsk);
15882 extern void wake_up_new_task(struct task_struct *tsk);
15883 extern void kick_process(struct task_struct *tsk);
15884 extern void sched_fork(struct task_struct *p);
15885 extern void sched_dead(struct task_struct *p);
15886 extern void proc_caches_init(void);
15887 extern void flush_signals(struct task_struct *);
15888 extern void __flush_signals(struct task_struct *);
15889 extern void ignore_signals(struct task_struct *);
15890 extern void flush_signal_handlers(struct task_struct *, int force_default);
15891 extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
15892 static inline __attribute__((always_inline)) int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
15894 unsigned long flags;
15896 do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&tsk->sighand->siglock)); } while (0); } while (0);
15897 ret = dequeue_signal(tsk, mask, info);
15898 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
15901 extern void block_all_signals(int (*notifier)(void *priv), void *priv,
15903 extern void unblock_all_signals(void);
15904 extern void release_task(struct task_struct * p);
15905 extern int send_sig_info(int, struct siginfo *, struct task_struct *);
15906 extern int force_sigsegv(int, struct task_struct *);
15907 extern int force_sig_info(int, struct siginfo *, struct task_struct *);
15908 extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
15909 extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
15910 extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
15911 extern int kill_pgrp(struct pid *pid, int sig, int priv);
15912 extern int kill_pid(struct pid *pid, int sig, int priv);
15913 extern int kill_proc_info(int, struct siginfo *, pid_t);
15914 extern int do_notify_parent(struct task_struct *, int);
15915 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
15916 extern void force_sig(int, struct task_struct *);
15917 extern int send_sig(int, struct task_struct *, int);
15918 extern int zap_other_threads(struct task_struct *p);
15919 extern struct sigqueue *sigqueue_alloc(void);
15920 extern void sigqueue_free(struct sigqueue *);
15921 extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
15922 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
15923 extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long);
15924 static inline __attribute__((always_inline)) int kill_cad_pid(int sig, int priv)
15926 return kill_pid(cad_pid, sig, priv);
15928 static inline __attribute__((always_inline)) int on_sig_stack(unsigned long sp)
15930 return sp > get_current()->sas_ss_sp &&
15931 sp - get_current()->sas_ss_sp <= get_current()->sas_ss_size;
15933 static inline __attribute__((always_inline)) int sas_ss_flags(unsigned long sp)
15935 return (get_current()->sas_ss_size == 0 ? 2
15936 : on_sig_stack(sp) ? 1 : 0);
15938 extern struct mm_struct * mm_alloc(void);
15939 extern void __mmdrop(struct mm_struct *);
15940 static inline __attribute__((always_inline)) void mmdrop(struct mm_struct * mm)
15942 if (__builtin_constant_p((((__builtin_constant_p(atomic_dec_and_test(&mm->mm_count)) ? !!(atomic_dec_and_test(&mm->mm_count)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2204, }; ______r = __builtin_expect(!!(atomic_dec_and_test(&mm->mm_count)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(atomic_dec_and_test(&mm->mm_count)) ? !!(atomic_dec_and_test(&mm->mm_count)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2204, }; ______r = __builtin_expect(!!(atomic_dec_and_test(&mm->mm_count)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2204, }; ______r = !!(((__builtin_constant_p(atomic_dec_and_test(&mm->mm_count)) ? !!(atomic_dec_and_test(&mm->mm_count)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2204, }; ______r = __builtin_expect(!!(atomic_dec_and_test(&mm->mm_count)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
15945 extern void mmput(struct mm_struct *);
15946 extern struct mm_struct *get_task_mm(struct task_struct *task);
15947 extern void mm_release(struct task_struct *, struct mm_struct *);
15948 extern struct mm_struct *dup_mm(struct task_struct *tsk);
15949 extern int copy_thread(unsigned long, unsigned long, unsigned long,
15950 struct task_struct *, struct pt_regs *);
15951 extern void flush_thread(void);
15952 extern void exit_thread(void);
15953 extern void exit_files(struct task_struct *);
15954 extern void __cleanup_sighand(struct sighand_struct *);
15955 extern void exit_itimers(struct signal_struct *);
15956 extern void flush_itimer_signals(void);
15957 extern void do_group_exit(int);
15958 extern void daemonize(const char *, ...);
15959 extern int allow_signal(int);
15960 extern int disallow_signal(int);
15961 extern int do_execve(const char *,
15962 const char * const *,
15963 const char * const *, struct pt_regs *);
15964 extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int *, int *);
15965 struct task_struct *fork_idle(int);
15966 extern void set_task_comm(struct task_struct *tsk, char *from);
15967 extern char *get_task_comm(char *to, struct task_struct *tsk);
15968 void scheduler_ipi(void);
15969 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
15970 extern bool current_is_single_threaded(void);
15971 static inline __attribute__((always_inline)) int get_nr_threads(struct task_struct *tsk)
15973 return tsk->signal->nr_threads;
15975 static inline __attribute__((always_inline)) int has_group_leader_pid(struct task_struct *p)
15977 return p->pid == p->tgid;
15979 static inline __attribute__((always_inline))
15980 int same_thread_group(struct task_struct *p1, struct task_struct *p2)
15982 return p1->tgid == p2->tgid;
15984 static inline __attribute__((always_inline)) struct task_struct *next_thread(const struct task_struct *p)
15986 return ({typeof (*p->thread_group.next) *__ptr = (typeof (*p->thread_group.next) *)p->thread_group.next; ({ const typeof( ((struct task_struct *)0)->thread_group ) *__mptr = ((typeof(p->thread_group.next))({ typeof(*(__ptr)) *_________p1 = (typeof(*(__ptr))* )(*(volatile typeof((__ptr)) *)&((__ptr))); do { } while (0); ; do { } while (0); ((typeof(*(__ptr)) *)(_________p1)); })); (struct task_struct *)( (char *)__mptr - __builtin_offsetof(struct task_struct,thread_group) );}); })
15989 static inline __attribute__((always_inline)) int thread_group_empty(struct task_struct *p)
15991 return list_empty(&p->thread_group);
15993 static inline __attribute__((always_inline)) int task_detached(struct task_struct *p)
15995 return p->exit_signal == -1;
15997 static inline __attribute__((always_inline)) void task_lock(struct task_struct *p)
15999 spin_lock(&p->alloc_lock);
16001 static inline __attribute__((always_inline)) void task_unlock(struct task_struct *p)
16003 spin_unlock(&p->alloc_lock);
16005 extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
16006 unsigned long *flags);
16007 static inline __attribute__((always_inline)) void unlock_task_sighand(struct task_struct *tsk,
16008 unsigned long *flags)
16010 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
16012 static inline __attribute__((always_inline)) void threadgroup_fork_read_lock(struct task_struct *tsk)
16014 down_read(&tsk->signal->threadgroup_fork_lock);
16016 static inline __attribute__((always_inline)) void threadgroup_fork_read_unlock(struct task_struct *tsk)
16018 up_read(&tsk->signal->threadgroup_fork_lock);
16020 static inline __attribute__((always_inline)) void threadgroup_fork_write_lock(struct task_struct *tsk)
16022 down_write(&tsk->signal->threadgroup_fork_lock);
16024 static inline __attribute__((always_inline)) void threadgroup_fork_write_unlock(struct task_struct *tsk)
16026 up_write(&tsk->signal->threadgroup_fork_lock);
16028 static inline __attribute__((always_inline)) void setup_thread_stack(struct task_struct *p, struct task_struct *org)
16030 *((struct thread_info *)(p)->stack) = *((struct thread_info *)(org)->stack);
16031 ((struct thread_info *)(p)->stack)->task = p;
16033 static inline __attribute__((always_inline)) unsigned long *end_of_stack(struct task_struct *p)
16035 return (unsigned long *)(((struct thread_info *)(p)->stack) + 1);
16037 static inline __attribute__((always_inline)) int object_is_on_stack(void *obj)
16039 void *stack = ((get_current())->stack);
16040 return (obj >= stack) && (obj < (stack + (((1UL) << 12) << 1)));
16042 extern void thread_info_cache_init(void);
16043 static inline __attribute__((always_inline)) void set_tsk_thread_flag(struct task_struct *tsk, int flag)
16045 set_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
16047 static inline __attribute__((always_inline)) void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
16049 clear_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
16051 static inline __attribute__((always_inline)) int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
16053 return test_and_set_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
16055 static inline __attribute__((always_inline)) int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
16057 return test_and_clear_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
16059 static inline __attribute__((always_inline)) int test_tsk_thread_flag(struct task_struct *tsk, int flag)
16061 return test_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
16063 static inline __attribute__((always_inline)) void set_tsk_need_resched(struct task_struct *tsk)
16065 set_tsk_thread_flag(tsk,3);
16067 static inline __attribute__((always_inline)) void clear_tsk_need_resched(struct task_struct *tsk)
16069 clear_tsk_thread_flag(tsk,3);
16071 static inline __attribute__((always_inline)) int test_tsk_need_resched(struct task_struct *tsk)
16073 return (__builtin_constant_p(test_tsk_thread_flag(tsk,3)) ? !!(test_tsk_thread_flag(tsk,3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2458, }; ______r = __builtin_expect(!!(test_tsk_thread_flag(tsk,3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }));
16075 static inline __attribute__((always_inline)) int restart_syscall(void)
16077 set_tsk_thread_flag(get_current(), 2);
16080 static inline __attribute__((always_inline)) int signal_pending(struct task_struct *p)
16082 return (__builtin_constant_p(test_tsk_thread_flag(p,2)) ? !!(test_tsk_thread_flag(p,2)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2469, }; ______r = __builtin_expect(!!(test_tsk_thread_flag(p,2)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }));
16084 static inline __attribute__((always_inline)) int __fatal_signal_pending(struct task_struct *p)
16086 return (__builtin_constant_p((__builtin_constant_p(9) ? __const_sigismember((&p->pending.signal), (9)) : __gen_sigismember((&p->pending.signal), (9)))) ? !!((__builtin_constant_p(9) ? __const_sigismember((&p->pending.signal), (9)) : __gen_sigismember((&p->pending.signal), (9)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2474, }; ______r = __builtin_expect(!!((__builtin_constant_p(9) ? __const_sigismember((&p->pending.signal), (9)) : __gen_sigismember((&p->pending.signal), (9)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }));
16088 static inline __attribute__((always_inline)) int fatal_signal_pending(struct task_struct *p)
16090 return signal_pending(p) && __fatal_signal_pending(p);
16092 static inline __attribute__((always_inline)) int signal_pending_state(long state, struct task_struct *p)
16094 if (__builtin_constant_p(((!(state & (1 | 128))))) ? !!((!(state & (1 | 128)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2484, }; ______r = !!((!(state & (1 | 128)))); ______f.miss_hit[______r]++; ______r; }))
16096 if (__builtin_constant_p(((!signal_pending(p)))) ? !!((!signal_pending(p))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2486, }; ______r = !!((!signal_pending(p))); ______f.miss_hit[______r]++; ______r; }))
16098 return (state & 1) || __fatal_signal_pending(p);
16100 static inline __attribute__((always_inline)) int need_resched(void)
16102 return (__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2494, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }));
16104 extern int _cond_resched(void);
16105 extern int __cond_resched_lock(spinlock_t *lock);
16106 extern int __cond_resched_softirq(void);
16107 static inline __attribute__((always_inline)) int spin_needbreak(spinlock_t *lock)
16109 return spin_is_contended(lock);
16111 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
16112 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
16113 static inline __attribute__((always_inline)) void thread_group_cputime_init(struct signal_struct *sig)
16115 do { spinlock_check(&sig->cputimer.lock); do { static struct lock_class_key __key; __raw_spin_lock_init((&(&sig->cputimer.lock)->rlock), "&(&sig->cputimer.lock)->rlock", &__key); } while (0); } while (0);
16117 extern void recalc_sigpending_and_wake(struct task_struct *t);
16118 extern void recalc_sigpending(void);
16119 extern void signal_wake_up(struct task_struct *t, int resume_stopped);
16120 static inline __attribute__((always_inline)) unsigned int task_cpu(const struct task_struct *p)
16122 return ((struct thread_info *)(p)->stack)->cpu;
16124 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
16125 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
16126 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
16127 extern void normalize_rt_tasks(void);
16128 extern struct task_group root_task_group;
16129 extern struct task_group *sched_create_group(struct task_group *parent);
16130 extern void sched_destroy_group(struct task_group *tg);
16131 extern void sched_move_task(struct task_struct *tsk);
16132 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
16133 extern unsigned long sched_group_shares(struct task_group *tg);
16134 extern int task_can_switch_user(struct user_struct *up,
16135 struct task_struct *tsk);
16136 static inline __attribute__((always_inline)) void add_rchar(struct task_struct *tsk, ssize_t amt)
16138 tsk->ioac.rchar += amt;
16140 static inline __attribute__((always_inline)) void add_wchar(struct task_struct *tsk, ssize_t amt)
16142 tsk->ioac.wchar += amt;
16144 static inline __attribute__((always_inline)) void inc_syscr(struct task_struct *tsk)
16148 static inline __attribute__((always_inline)) void inc_syscw(struct task_struct *tsk)
16152 static inline __attribute__((always_inline)) void mm_update_next_owner(struct mm_struct *mm)
16155 static inline __attribute__((always_inline)) void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
16158 static inline __attribute__((always_inline)) unsigned long task_rlimit(const struct task_struct *tsk,
16159 unsigned int limit)
16161 return (*(volatile typeof(tsk->signal->rlim[limit].rlim_cur) *)&(tsk->signal->rlim[limit].rlim_cur));
16163 static inline __attribute__((always_inline)) unsigned long task_rlimit_max(const struct task_struct *tsk,
16164 unsigned int limit)
16166 return (*(volatile typeof(tsk->signal->rlim[limit].rlim_max) *)&(tsk->signal->rlim[limit].rlim_max));
16168 static inline __attribute__((always_inline)) unsigned long rlimit(unsigned int limit)
16170 return task_rlimit(get_current(), limit);
16172 static inline __attribute__((always_inline)) unsigned long rlimit_max(unsigned int limit)
16174 return task_rlimit_max(get_current(), limit);
16177 struct softirq_action;
16178 extern struct tracepoint
16179 __tracepoint_irq_handler_entry
16180 ; static inline __attribute__((always_inline)) void
16181 trace_irq_handler_entry
16182 (int irq, struct irqaction *action) { if (__builtin_constant_p(((static_branch(&__tracepoint_irq_handler_entry.key)))) ? !!((static_branch(&__tracepoint_irq_handler_entry.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16183 "include/trace/events/irq.h"
16186 , }; ______r = !!((static_branch(&__tracepoint_irq_handler_entry.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16187 "include/trace/events/irq.h"
16190 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_irq_handler_entry)->funcs)) *_________p1 = (typeof(*((&__tracepoint_irq_handler_entry)->funcs))* )(*(volatile typeof(((&__tracepoint_irq_handler_entry)->funcs)) *)&(((&__tracepoint_irq_handler_entry)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_irq_handler_entry)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16191 "include/trace/events/irq.h"
16194 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, int irq, struct irqaction *action))(it_func))(__data, irq, action); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
16195 register_trace_irq_handler_entry
16196 (void (*probe)(void *__data, int irq, struct irqaction *action), void *data) { return tracepoint_probe_register("irq_handler_entry", (void *)probe, data); } static inline __attribute__((always_inline)) int
16197 unregister_trace_irq_handler_entry
16198 (void (*probe)(void *__data, int irq, struct irqaction *action), void *data) { return tracepoint_probe_unregister("irq_handler_entry", (void *)probe, data); } static inline __attribute__((always_inline)) void
16199 check_trace_callback_type_irq_handler_entry
16200 (void (*cb)(void *__data, int irq, struct irqaction *action)) { }
16202 extern struct tracepoint
16203 __tracepoint_irq_handler_exit
16204 ; static inline __attribute__((always_inline)) void
16205 trace_irq_handler_exit
16206 (int irq, struct irqaction *action, int ret) { if (__builtin_constant_p(((static_branch(&__tracepoint_irq_handler_exit.key)))) ? !!((static_branch(&__tracepoint_irq_handler_exit.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16207 "include/trace/events/irq.h"
16210 , }; ______r = !!((static_branch(&__tracepoint_irq_handler_exit.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16211 "include/trace/events/irq.h"
16214 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_irq_handler_exit)->funcs)) *_________p1 = (typeof(*((&__tracepoint_irq_handler_exit)->funcs))* )(*(volatile typeof(((&__tracepoint_irq_handler_exit)->funcs)) *)&(((&__tracepoint_irq_handler_exit)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_irq_handler_exit)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16215 "include/trace/events/irq.h"
16218 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, int irq, struct irqaction *action, int ret))(it_func))(__data, irq, action, ret); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
16219 register_trace_irq_handler_exit
16220 (void (*probe)(void *__data, int irq, struct irqaction *action, int ret), void *data) { return tracepoint_probe_register("irq_handler_exit", (void *)probe, data); } static inline __attribute__((always_inline)) int
16221 unregister_trace_irq_handler_exit
16222 (void (*probe)(void *__data, int irq, struct irqaction *action, int ret), void *data) { return tracepoint_probe_unregister("irq_handler_exit", (void *)probe, data); } static inline __attribute__((always_inline)) void
16223 check_trace_callback_type_irq_handler_exit
16224 (void (*cb)(void *__data, int irq, struct irqaction *action, int ret)) { }
16227 extern struct tracepoint
16228 __tracepoint_softirq_entry
16229 ; static inline __attribute__((always_inline)) void
16230 trace_softirq_entry
16231 (unsigned int vec_nr) { if (__builtin_constant_p(((static_branch(&__tracepoint_softirq_entry.key)))) ? !!((static_branch(&__tracepoint_softirq_entry.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16232 "include/trace/events/irq.h"
16235 , }; ______r = !!((static_branch(&__tracepoint_softirq_entry.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16236 "include/trace/events/irq.h"
16239 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_softirq_entry)->funcs)) *_________p1 = (typeof(*((&__tracepoint_softirq_entry)->funcs))* )(*(volatile typeof(((&__tracepoint_softirq_entry)->funcs)) *)&(((&__tracepoint_softirq_entry)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_softirq_entry)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16240 "include/trace/events/irq.h"
16243 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, unsigned int vec_nr))(it_func))(__data, vec_nr); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
16244 register_trace_softirq_entry
16245 (void (*probe)(void *__data, unsigned int vec_nr), void *data) { return tracepoint_probe_register("softirq_entry", (void *)probe, data); } static inline __attribute__((always_inline)) int
16246 unregister_trace_softirq_entry
16247 (void (*probe)(void *__data, unsigned int vec_nr), void *data) { return tracepoint_probe_unregister("softirq_entry", (void *)probe, data); } static inline __attribute__((always_inline)) void
16248 check_trace_callback_type_softirq_entry
16249 (void (*cb)(void *__data, unsigned int vec_nr)) { }
16251 extern struct tracepoint
16252 __tracepoint_softirq_exit
16253 ; static inline __attribute__((always_inline)) void
16255 (unsigned int vec_nr) { if (__builtin_constant_p(((static_branch(&__tracepoint_softirq_exit.key)))) ? !!((static_branch(&__tracepoint_softirq_exit.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16256 "include/trace/events/irq.h"
16259 , }; ______r = !!((static_branch(&__tracepoint_softirq_exit.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16260 "include/trace/events/irq.h"
16263 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_softirq_exit)->funcs)) *_________p1 = (typeof(*((&__tracepoint_softirq_exit)->funcs))* )(*(volatile typeof(((&__tracepoint_softirq_exit)->funcs)) *)&(((&__tracepoint_softirq_exit)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_softirq_exit)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16264 "include/trace/events/irq.h"
16267 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, unsigned int vec_nr))(it_func))(__data, vec_nr); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
16268 register_trace_softirq_exit
16269 (void (*probe)(void *__data, unsigned int vec_nr), void *data) { return tracepoint_probe_register("softirq_exit", (void *)probe, data); } static inline __attribute__((always_inline)) int
16270 unregister_trace_softirq_exit
16271 (void (*probe)(void *__data, unsigned int vec_nr), void *data) { return tracepoint_probe_unregister("softirq_exit", (void *)probe, data); } static inline __attribute__((always_inline)) void
16272 check_trace_callback_type_softirq_exit
16273 (void (*cb)(void *__data, unsigned int vec_nr)) { }
16275 extern struct tracepoint
16276 __tracepoint_softirq_raise
16277 ; static inline __attribute__((always_inline)) void
16278 trace_softirq_raise
16279 (unsigned int vec_nr) { if (__builtin_constant_p(((static_branch(&__tracepoint_softirq_raise.key)))) ? !!((static_branch(&__tracepoint_softirq_raise.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16280 "include/trace/events/irq.h"
16283 , }; ______r = !!((static_branch(&__tracepoint_softirq_raise.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16284 "include/trace/events/irq.h"
16287 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_softirq_raise)->funcs)) *_________p1 = (typeof(*((&__tracepoint_softirq_raise)->funcs))* )(*(volatile typeof(((&__tracepoint_softirq_raise)->funcs)) *)&(((&__tracepoint_softirq_raise)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_softirq_raise)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16288 "include/trace/events/irq.h"
16291 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, unsigned int vec_nr))(it_func))(__data, vec_nr); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
16292 register_trace_softirq_raise
16293 (void (*probe)(void *__data, unsigned int vec_nr), void *data) { return tracepoint_probe_register("softirq_raise", (void *)probe, data); } static inline __attribute__((always_inline)) int
16294 unregister_trace_softirq_raise
16295 (void (*probe)(void *__data, unsigned int vec_nr), void *data) { return tracepoint_probe_unregister("softirq_raise", (void *)probe, data); } static inline __attribute__((always_inline)) void
16296 check_trace_callback_type_softirq_raise
16297 (void (*cb)(void *__data, unsigned int vec_nr)) { }
16300 IRQC_IS_HARDIRQ = 0,
16303 typedef irqreturn_t (*irq_handler_t)(int, void *);
16305 irq_handler_t handler;
16306 unsigned long flags;
16308 struct irqaction *next;
16310 irq_handler_t thread_fn;
16311 struct task_struct *thread;
16312 unsigned long thread_flags;
16313 unsigned long thread_mask;
16315 struct proc_dir_entry *dir;
16316 } __attribute__((__aligned__(1 << (6))));
16317 extern irqreturn_t no_action(int cpl, void *dev_id);
16318 extern int __attribute__((warn_unused_result))
16319 request_threaded_irq(unsigned int irq, irq_handler_t handler,
16320 irq_handler_t thread_fn,
16321 unsigned long flags, const char *name, void *dev);
16322 static inline __attribute__((always_inline)) int __attribute__((warn_unused_result))
16323 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
16324 const char *name, void *dev)
16326 return request_threaded_irq(irq, handler, ((void *)0), flags, name, dev);
16328 extern int __attribute__((warn_unused_result))
16329 request_any_context_irq(unsigned int irq, irq_handler_t handler,
16330 unsigned long flags, const char *name, void *dev_id);
16331 extern void exit_irq_thread(void);
16332 extern void free_irq(unsigned int, void *);
16334 extern int __attribute__((warn_unused_result))
16335 devm_request_threaded_irq(struct device *dev, unsigned int irq,
16336 irq_handler_t handler, irq_handler_t thread_fn,
16337 unsigned long irqflags, const char *devname,
16339 static inline __attribute__((always_inline)) int __attribute__((warn_unused_result))
16340 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
16341 unsigned long irqflags, const char *devname, void *dev_id)
16343 return devm_request_threaded_irq(dev, irq, handler, ((void *)0), irqflags,
16346 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
16347 extern void disable_irq_nosync(unsigned int irq);
16348 extern void disable_irq(unsigned int irq);
16349 extern void enable_irq(unsigned int irq);
16350 extern void suspend_device_irqs(void);
16351 extern void resume_device_irqs(void);
16352 extern int check_wakeup_irqs(void);
16353 extern cpumask_var_t irq_default_affinity;
16354 extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
16355 extern int irq_can_set_affinity(unsigned int irq);
16356 extern int irq_select_affinity(unsigned int irq);
16357 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
16358 struct irq_affinity_notify {
16361 struct work_struct work;
16362 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
16363 void (*release)(struct kref *ref);
16366 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
16367 static inline __attribute__((always_inline)) void irq_run_affinity_notifiers(void)
16369 flush_scheduled_work();
16371 static inline __attribute__((always_inline)) void disable_irq_nosync_lockdep(unsigned int irq)
16373 disable_irq_nosync(irq);
16374 do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0);
16376 static inline __attribute__((always_inline)) void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
16378 disable_irq_nosync(irq);
16379 do { do { ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); *flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0);
16381 static inline __attribute__((always_inline)) void disable_irq_lockdep(unsigned int irq)
16384 do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0);
16386 static inline __attribute__((always_inline)) void enable_irq_lockdep(unsigned int irq)
16388 do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0);
16391 static inline __attribute__((always_inline)) void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
16393 do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(*flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(*flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 333, }; ______r = !!((({ ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(*flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(*flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(*flags); } while (0); } } while (0);
16396 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
16397 static inline __attribute__((always_inline)) int enable_irq_wake(unsigned int irq)
16399 return irq_set_irq_wake(irq, 1);
16401 static inline __attribute__((always_inline)) int disable_irq_wake(unsigned int irq)
16403 return irq_set_irq_wake(irq, 0);
16405 extern bool force_irqthreads;
16413 BLOCK_IOPOLL_SOFTIRQ,
16420 extern char *softirq_to_name[NR_SOFTIRQS];
16421 struct softirq_action
16423 void (*action)(struct softirq_action *);
16425 __attribute__((regparm(0))) void do_softirq(void);
16426 __attribute__((regparm(0))) void __do_softirq(void);
16427 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
16428 extern void softirq_init(void);
16429 static inline __attribute__((always_inline)) void __raise_softirq_irqoff(unsigned int nr)
16431 trace_softirq_raise(nr);
16432 do { typedef typeof(irq_stat.__softirq_pending) pto_T__; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 443, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pto_T__ pto_tmp__; pto_tmp__ = ((1UL << nr)); (void)pto_tmp__; } switch (sizeof(irq_stat.__softirq_pending)) { case 1: asm("or" "b %1,""%%""fs"":" "%P" "0" : "+m" (irq_stat.__softirq_pending) : "qi" ((pto_T__)((1UL << nr)))); break; case 2: asm("or" "w %1,""%%""fs"":" "%P" "0" : "+m" (irq_stat.__softirq_pending) : "ri" ((pto_T__)((1UL << nr)))); break; case 4: asm("or" "l %1,""%%""fs"":" "%P" "0" : "+m" (irq_stat.__softirq_pending) : "ri" ((pto_T__)((1UL << nr)))); break; case 8: asm("or" "q %1,""%%""fs"":" "%P" "0" : "+m" (irq_stat.__softirq_pending) : "re" ((pto_T__)((1UL << nr)))); break; default: __bad_percpu_size(); } } while (0);
16434 extern void raise_softirq_irqoff(unsigned int nr);
16435 extern void raise_softirq(unsigned int nr);
16436 extern __attribute__((section(".data..percpu" ""))) __typeof__(struct list_head [NR_SOFTIRQS]) softirq_work_list;
16437 extern __attribute__((section(".data..percpu" ""))) __typeof__(struct task_struct *) ksoftirqd;
16438 static inline __attribute__((always_inline)) struct task_struct *this_cpu_ksoftirqd(void)
16440 return ({ typeof((ksoftirqd)) pscr_ret__; do { const void *__vpp_verify = (typeof(&((ksoftirqd))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((ksoftirqd))) { case 1: pscr_ret__ = ({ typeof(((ksoftirqd))) pfo_ret__; switch (sizeof(((ksoftirqd)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((ksoftirqd))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 2: pscr_ret__ = ({ typeof(((ksoftirqd))) pfo_ret__; switch (sizeof(((ksoftirqd)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((ksoftirqd))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 4: pscr_ret__ = ({ typeof(((ksoftirqd))) pfo_ret__; switch (sizeof(((ksoftirqd)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((ksoftirqd))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 8: pscr_ret__ = ({ typeof((ksoftirqd)) ret__; do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); ret__ = *({ do { const void *__vpp_verify = (typeof((&((ksoftirqd)))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((ksoftirqd)))) *)(&((ksoftirqd))))); (typeof((typeof(*(&((ksoftirqd)))) *)(&((ksoftirqd))))) (__ptr + (((__per_cpu_offset[debug_smp_processor_id()])))); }); }); do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 462, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 462, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 462, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 462, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); ret__; });break; default: __bad_size_call_parameter();break; } pscr_ret__; });
16442 extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
16443 extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
16444 int this_cpu, int softirq);
16445 struct tasklet_struct
16447 struct tasklet_struct *next;
16448 unsigned long state;
16450 void (*func)(unsigned long);
16451 unsigned long data;
16455 TASKLET_STATE_SCHED,
16458 static inline __attribute__((always_inline)) int tasklet_trylock(struct tasklet_struct *t)
16460 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
16462 static inline __attribute__((always_inline)) void tasklet_unlock(struct tasklet_struct *t)
16464 __asm__ __volatile__("": : :"memory");
16465 clear_bit(TASKLET_STATE_RUN, &(t)->state);
16467 static inline __attribute__((always_inline)) void tasklet_unlock_wait(struct tasklet_struct *t)
16469 while ((__builtin_constant_p((TASKLET_STATE_RUN)) ? constant_test_bit((TASKLET_STATE_RUN), (&(t)->state)) : variable_test_bit((TASKLET_STATE_RUN), (&(t)->state)))) { __asm__ __volatile__("": : :"memory"); }
16471 extern void __tasklet_schedule(struct tasklet_struct *t);
16472 static inline __attribute__((always_inline)) void tasklet_schedule(struct tasklet_struct *t)
16474 if (__builtin_constant_p(((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)))) ? !!((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 544, }; ______r = !!((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))); ______f.miss_hit[______r]++; ______r; }))
16475 __tasklet_schedule(t);
16477 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
16478 static inline __attribute__((always_inline)) void tasklet_hi_schedule(struct tasklet_struct *t)
16480 if (__builtin_constant_p(((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)))) ? !!((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 552, }; ______r = !!((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))); ______f.miss_hit[______r]++; ______r; }))
16481 __tasklet_hi_schedule(t);
16483 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
16484 static inline __attribute__((always_inline)) void tasklet_hi_schedule_first(struct tasklet_struct *t)
16486 if (__builtin_constant_p(((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)))) ? !!((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 566, }; ______r = !!((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))); ______f.miss_hit[______r]++; ______r; }))
16487 __tasklet_hi_schedule_first(t);
16489 static inline __attribute__((always_inline)) void tasklet_disable_nosync(struct tasklet_struct *t)
16491 atomic_inc(&t->count);
16492 __asm__ __volatile__("": : :"memory");
16494 static inline __attribute__((always_inline)) void tasklet_disable(struct tasklet_struct *t)
16496 tasklet_disable_nosync(t);
16497 tasklet_unlock_wait(t);
16498 asm volatile ("661:\n\t" "lock; addl $0,0(%%esp)" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(0*32+26)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "mfence" "\n664:\n" ".previous" : : : "memory");
16500 static inline __attribute__((always_inline)) void tasklet_enable(struct tasklet_struct *t)
16502 __asm__ __volatile__("": : :"memory");
16503 atomic_dec(&t->count);
16505 static inline __attribute__((always_inline)) void tasklet_hi_enable(struct tasklet_struct *t)
16507 __asm__ __volatile__("": : :"memory");
16508 atomic_dec(&t->count);
16510 extern void tasklet_kill(struct tasklet_struct *t);
16511 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
16512 extern void tasklet_init(struct tasklet_struct *t,
16513 void (*func)(unsigned long), unsigned long data);
16514 struct tasklet_hrtimer {
16515 struct hrtimer timer;
16516 struct tasklet_struct tasklet;
16517 enum hrtimer_restart (*function)(struct hrtimer *);
16520 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
16521 enum hrtimer_restart (*function)(struct hrtimer *),
16522 clockid_t which_clock, enum hrtimer_mode mode);
16523 static inline __attribute__((always_inline))
16524 int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
16525 const enum hrtimer_mode mode)
16527 return hrtimer_start(&ttimer->timer, time, mode);
16529 static inline __attribute__((always_inline))
16530 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
16532 hrtimer_cancel(&ttimer->timer);
16533 tasklet_kill(&ttimer->tasklet);
16535 extern unsigned long probe_irq_on(void);
16536 extern int probe_irq_off(unsigned long);
16537 extern unsigned int probe_irq_mask(unsigned long);
16538 extern void init_irq_proc(void);
16540 int show_interrupts(struct seq_file *p, void *v);
16541 int arch_show_interrupts(struct seq_file *p, int prec);
16542 extern int early_irq_init(void);
16543 extern int arch_probe_nr_irqs(void);
16544 extern int arch_early_irq_init(void);
16545 struct cpu_usage_stat {
16548 cputime64_t system;
16549 cputime64_t softirq;
16552 cputime64_t iowait;
16555 cputime64_t guest_nice;
16557 struct kernel_stat {
16558 struct cpu_usage_stat cpustat;
16559 unsigned long irqs_sum;
16560 unsigned int softirqs[NR_SOFTIRQS];
16562 extern __attribute__((section(".data..percpu" ""))) __typeof__(struct kernel_stat) kstat;
16563 extern unsigned long long nr_context_switches(void);
16564 extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
16565 static inline __attribute__((always_inline)) void kstat_incr_softirqs_this_cpu(unsigned int irq)
16567 do { do { const void *__vpp_verify = (typeof(&(((kstat.softirqs[irq])))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((kstat.softirqs[irq])))) { case 1: do { typedef typeof((((kstat.softirqs[irq])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((kstat.softirqs[irq]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((((kstat.softirqs[irq])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((kstat.softirqs[irq]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((((kstat.softirqs[irq])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((kstat.softirqs[irq]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((((kstat.softirqs[irq]))))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((((kstat.softirqs[irq])))))); (typeof(*(&((((kstat.softirqs[irq])))))) *)tcp_ptr__; }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
16569 static inline __attribute__((always_inline)) unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
16571 return (*({ do { const void *__vpp_verify = (typeof((&(kstat))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(kstat))) *)(&(kstat)))); (typeof((typeof(*(&(kstat))) *)(&(kstat)))) (__ptr + (((__per_cpu_offset[cpu])))); }); })).softirqs[irq];
16573 extern unsigned int kstat_irqs(unsigned int irq);
16574 static inline __attribute__((always_inline)) unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
16576 return (*({ do { const void *__vpp_verify = (typeof((&(kstat))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(kstat))) *)(&(kstat)))); (typeof((typeof(*(&(kstat))) *)(&(kstat)))) (__ptr + (((__per_cpu_offset[cpu])))); }); })).irqs_sum;
16578 extern unsigned long long task_delta_exec(struct task_struct *);
16579 extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
16580 extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
16581 extern void account_steal_time(cputime_t);
16582 extern void account_idle_time(cputime_t);
16583 extern void account_process_tick(struct task_struct *, int user);
16584 extern void account_steal_ticks(unsigned long ticks);
16585 extern void account_idle_ticks(unsigned long ticks);
16586 struct task_struct;
16587 struct user_regset;
16588 typedef int user_regset_active_fn(struct task_struct *target,
16589 const struct user_regset *regset);
16590 typedef int user_regset_get_fn(struct task_struct *target,
16591 const struct user_regset *regset,
16592 unsigned int pos, unsigned int count,
16593 void *kbuf, void *ubuf);
16594 typedef int user_regset_set_fn(struct task_struct *target,
16595 const struct user_regset *regset,
16596 unsigned int pos, unsigned int count,
16597 const void *kbuf, const void *ubuf);
16598 typedef int user_regset_writeback_fn(struct task_struct *target,
16599 const struct user_regset *regset,
16601 struct user_regset {
16602 user_regset_get_fn *get;
16603 user_regset_set_fn *set;
16604 user_regset_active_fn *active;
16605 user_regset_writeback_fn *writeback;
16608 unsigned int align;
16610 unsigned int core_note_type;
16612 struct user_regset_view {
16614 const struct user_regset *regsets;
16620 const struct user_regset_view *task_user_regset_view(struct task_struct *tsk);
16621 static inline __attribute__((always_inline)) int user_regset_copyout(unsigned int *pos, unsigned int *count,
16623 void **ubuf, const void *data,
16624 const int start_pos, const int end_pos)
16626 if (__builtin_constant_p(((*count == 0))) ? !!((*count == 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 224, }; ______r = !!((*count == 0)); ______f.miss_hit[______r]++; ______r; }))
16628 do { if (__builtin_constant_p((((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 226, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 226, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 226, }; ______r = !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 226, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/regset.h"), "i" (226), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
16629 if (__builtin_constant_p(((end_pos < 0 || *pos < end_pos))) ? !!((end_pos < 0 || *pos < end_pos)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 227, }; ______r = !!((end_pos < 0 || *pos < end_pos)); ______f.miss_hit[______r]++; ______r; })) {
16630 unsigned int copy = (end_pos < 0 ? *count
16631 : ({ typeof(*count) _min1 = (*count); typeof(end_pos - *pos) _min2 = (end_pos - *pos); (void) (&_min1 == &_min2); _min1 < _min2 ? _min1 : _min2; }));
16632 data += *pos - start_pos;
16633 if (__builtin_constant_p(((*kbuf))) ? !!((*kbuf)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 231, }; ______r = !!((*kbuf)); ______f.miss_hit[______r]++; ______r; })) {
16634 __builtin_memcpy(*kbuf, data, copy);
16636 } else if (__builtin_constant_p(((__copy_to_user(*ubuf, data, copy)))) ? !!((__copy_to_user(*ubuf, data, copy))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 234, }; ______r = !!((__copy_to_user(*ubuf, data, copy))); ______f.miss_hit[______r]++; ______r; }))
16645 static inline __attribute__((always_inline)) int user_regset_copyin(unsigned int *pos, unsigned int *count,
16647 const void **ubuf, void *data,
16648 const int start_pos, const int end_pos)
16650 if (__builtin_constant_p(((*count == 0))) ? !!((*count == 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 249, }; ______r = !!((*count == 0)); ______f.miss_hit[______r]++; ______r; }))
16652 do { if (__builtin_constant_p((((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 251, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 251, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 251, }; ______r = !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 251, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/regset.h"), "i" (251), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
16653 if (__builtin_constant_p(((end_pos < 0 || *pos < end_pos))) ? !!((end_pos < 0 || *pos < end_pos)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 252, }; ______r = !!((end_pos < 0 || *pos < end_pos)); ______f.miss_hit[______r]++; ______r; })) {
16654 unsigned int copy = (end_pos < 0 ? *count
16655 : ({ typeof(*count) _min1 = (*count); typeof(end_pos - *pos) _min2 = (end_pos - *pos); (void) (&_min1 == &_min2); _min1 < _min2 ? _min1 : _min2; }));
16656 data += *pos - start_pos;
16657 if (__builtin_constant_p(((*kbuf))) ? !!((*kbuf)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 256, }; ______r = !!((*kbuf)); ______f.miss_hit[______r]++; ______r; })) {
16658 __builtin_memcpy(data, *kbuf, copy);
16660 } else if (__builtin_constant_p(((__copy_from_user(data, *ubuf, copy)))) ? !!((__copy_from_user(data, *ubuf, copy))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 259, }; ______r = !!((__copy_from_user(data, *ubuf, copy))); ______f.miss_hit[______r]++; ______r; }))
16669 static inline __attribute__((always_inline)) int user_regset_copyout_zero(unsigned int *pos,
16670 unsigned int *count,
16671 void **kbuf, void **ubuf,
16672 const int start_pos,
16675 if (__builtin_constant_p(((*count == 0))) ? !!((*count == 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 279, }; ______r = !!((*count == 0)); ______f.miss_hit[______r]++; ______r; }))
16677 do { if (__builtin_constant_p((((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 281, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 281, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 281, }; ______r = !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 281, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/regset.h"), "i" (281), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
16678 if (__builtin_constant_p(((end_pos < 0 || *pos < end_pos))) ? !!((end_pos < 0 || *pos < end_pos)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 282, }; ______r = !!((end_pos < 0 || *pos < end_pos)); ______f.miss_hit[______r]++; ______r; })) {
16679 unsigned int copy = (end_pos < 0 ? *count
16680 : ({ typeof(*count) _min1 = (*count); typeof(end_pos - *pos) _min2 = (end_pos - *pos); (void) (&_min1 == &_min2); _min1 < _min2 ? _min1 : _min2; }));
16681 if (__builtin_constant_p(((*kbuf))) ? !!((*kbuf)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 285, }; ______r = !!((*kbuf)); ______f.miss_hit[______r]++; ______r; })) {
16682 __builtin_memset(*kbuf, 0, copy);
16684 } else if (__builtin_constant_p(((__clear_user(*ubuf, copy)))) ? !!((__clear_user(*ubuf, copy))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 288, }; ______r = !!((__clear_user(*ubuf, copy))); ______f.miss_hit[______r]++; ______r; }))
16693 static inline __attribute__((always_inline)) int user_regset_copyin_ignore(unsigned int *pos,
16694 unsigned int *count,
16697 const int start_pos,
16700 if (__builtin_constant_p(((*count == 0))) ? !!((*count == 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 305, }; ______r = !!((*count == 0)); ______f.miss_hit[______r]++; ______r; }))
16702 do { if (__builtin_constant_p((((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 307, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 307, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 307, }; ______r = !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 307, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/regset.h"), "i" (307), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
16703 if (__builtin_constant_p(((end_pos < 0 || *pos < end_pos))) ? !!((end_pos < 0 || *pos < end_pos)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 308, }; ______r = !!((end_pos < 0 || *pos < end_pos)); ______f.miss_hit[______r]++; ______r; })) {
16704 unsigned int copy = (end_pos < 0 ? *count
16705 : ({ typeof(*count) _min1 = (*count); typeof(end_pos - *pos) _min2 = (end_pos - *pos); (void) (&_min1 == &_min2); _min1 < _min2 ? _min1 : _min2; }));
16706 if (__builtin_constant_p(((*kbuf))) ? !!((*kbuf)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 311, }; ______r = !!((*kbuf)); ______f.miss_hit[______r]++; ______r; }))
16715 static inline __attribute__((always_inline)) int copy_regset_to_user(struct task_struct *target,
16716 const struct user_regset_view *view,
16717 unsigned int setno,
16718 unsigned int offset, unsigned int size,
16721 const struct user_regset *regset = &view->regsets[setno];
16722 if (__builtin_constant_p(((!((__builtin_constant_p(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) ? !!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 338, }; ______r = __builtin_expect(!!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))))) ? !!((!((__builtin_constant_p(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) ? !!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 338, }; ______r = __builtin_expect(!!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 338, }; ______r = !!((!((__builtin_constant_p(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) ? !!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 338, }; ______r = __builtin_expect(!!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))); ______f.miss_hit[______r]++; ______r; }))
16724 return regset->get(target, regset, offset, size, ((void *)0), data);
16726 static inline __attribute__((always_inline)) int copy_regset_from_user(struct task_struct *target,
16727 const struct user_regset_view *view,
16728 unsigned int setno,
16729 unsigned int offset, unsigned int size,
16732 const struct user_regset *regset = &view->regsets[setno];
16733 if (__builtin_constant_p(((!((__builtin_constant_p(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) ? !!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 361, }; ______r = __builtin_expect(!!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))))) ? !!((!((__builtin_constant_p(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) ? !!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 361, }; ______r = __builtin_expect(!!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 361, }; ______r = !!((!((__builtin_constant_p(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) ? !!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 361, }; ______r = __builtin_expect(!!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))); ______f.miss_hit[______r]++; ______r; }))
16735 return regset->set(target, regset, offset, size, ((void *)0), data);
16737 extern unsigned int xstate_size;
16738 extern u64 pcntxt_mask;
16739 extern u64 xstate_fx_sw_bytes[6];
16740 extern void xsave_init(void);
16741 extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
16742 extern int init_fpu(struct task_struct *child);
16743 extern int check_for_xstate(struct i387_fxsave_struct *buf,
16745 struct _fpx_sw_bytes *sw);
16746 static inline __attribute__((always_inline)) int fpu_xrstor_checking(struct fpu *fpu)
16748 struct xsave_struct *fx = &fpu->state->xsave;
16750 asm volatile("1: .byte " "0x0f,0xae,0x2f\n\t"
16752 ".section .fixup,\"ax\"\n"
16753 "3: movl $-1,%[err]\n"
16756 " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n"
16758 : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0)
16762 static inline __attribute__((always_inline)) int xsave_user(struct xsave_struct *buf)
16765 err = __clear_user(&buf->xsave_hdr,
16766 sizeof(struct xsave_hdr_struct));
16767 if (__builtin_constant_p((((__builtin_constant_p(err) ? !!(err) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 74, }; ______r = __builtin_expect(!!(err), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(err) ? !!(err) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 74, }; ______r = __builtin_expect(!!(err), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 74, }; ______r = !!(((__builtin_constant_p(err) ? !!(err) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 74, }; ______r = __builtin_expect(!!(err), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
16769 __asm__ __volatile__("1: .byte " "0x0f,0xae,0x27\n"
16771 ".section .fixup,\"ax\"\n"
16772 "3: movl $-1,%[err]\n"
16775 ".section __ex_table,\"a\"\n"
16776 " " ".balign 4" " " "\n"
16777 " " ".long" " " "1b,3b\n"
16780 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
16782 if (__builtin_constant_p((((__builtin_constant_p(err) ? !!(err) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 90, }; ______r = __builtin_expect(!!(err), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })) && __clear_user(buf, xstate_size)))) ? !!(((__builtin_constant_p(err) ? !!(err) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 90, }; ______r = __builtin_expect(!!(err), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })) && __clear_user(buf, xstate_size))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 90, }; ______r = !!(((__builtin_constant_p(err) ? !!(err) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 90, }; ______r = __builtin_expect(!!(err), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })) && __clear_user(buf, xstate_size))); ______f.miss_hit[______r]++; ______r; }))
16786 static inline __attribute__((always_inline)) int xrestore_user(struct xsave_struct *buf, u64 mask)
16789 struct xsave_struct *xstate = (( struct xsave_struct *)buf);
16791 u32 hmask = mask >> 32;
16792 __asm__ __volatile__("1: .byte " "0x0f,0xae,0x2f\n"
16794 ".section .fixup,\"ax\"\n"
16795 "3: movl $-1,%[err]\n"
16798 ".section __ex_table,\"a\"\n"
16799 " " ".balign 4" " " "\n"
16800 " " ".long" " " "1b,3b\n"
16803 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
16807 static inline __attribute__((always_inline)) void xrstor_state(struct xsave_struct *fx, u64 mask)
16810 u32 hmask = mask >> 32;
16811 asm volatile(".byte " "0x0f,0xae,0x2f\n\t"
16812 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
16815 static inline __attribute__((always_inline)) void xsave_state(struct xsave_struct *fx, u64 mask)
16818 u32 hmask = mask >> 32;
16819 asm volatile(".byte " "0x0f,0xae,0x27\n\t"
16820 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
16823 static inline __attribute__((always_inline)) void fpu_xsave(struct fpu *fpu)
16825 asm volatile ("661:\n\t" ".byte " "0x0f,0xae,0x27" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(7*32+ 4)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" ".byte " "0x0f,0xae,0x37" "\n664:\n" ".previous" : : "i" (0), [fx] "D" (&fpu->state->xsave), "a" (-1), "d" (-1) : "memory")
16828 extern unsigned int sig_xstate_size;
16829 extern void fpu_init(void);
16830 extern void mxcsr_feature_mask_init(void);
16831 extern int init_fpu(struct task_struct *child);
16832 extern __attribute__((regparm(0))) void math_state_restore(void);
16833 extern void __math_state_restore(void);
16834 extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
16835 extern user_regset_active_fn fpregs_active, xfpregs_active;
16836 extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
16838 extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
16840 extern struct _fpx_sw_bytes fx_sw_reserved;
16841 static inline __attribute__((always_inline)) void finit_soft_fpu(struct i387_soft_struct *soft) {}
16842 static inline __attribute__((always_inline)) __attribute__((always_inline)) __attribute__((pure)) bool use_xsaveopt(void)
16844 return ( __builtin_constant_p((__builtin_constant_p((7*32+ 4)) && ( ((((7*32+ 4))>>5)==0 && (1UL<<(((7*32+ 4))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((7*32+ 4))>>5)==1 && (1UL<<(((7*32+ 4))&31) & (0|0))) || ((((7*32+ 4))>>5)==2 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==3 && (1UL<<(((7*32+ 4))&31) & (0))) || ((((7*32+ 4))>>5)==4 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==5 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==6 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==7 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==8 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==9 && (1UL<<(((7*32+ 4))&31) & 0)) ) ? 1 : (__builtin_constant_p(((7*32+ 4))) ? constant_test_bit(((7*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((7*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))) ? (__builtin_constant_p((7*32+ 4)) && ( ((((7*32+ 4))>>5)==0 && (1UL<<(((7*32+ 4))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((7*32+ 4))>>5)==1 && (1UL<<(((7*32+ 4))&31) & (0|0))) || ((((7*32+ 4))>>5)==2 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==3 && (1UL<<(((7*32+ 4))&31) & (0))) || ((((7*32+ 4))>>5)==4 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==5 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==6 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==7 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==8 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==9 && (1UL<<(((7*32+ 4))&31) & 0)) ) ? 1 : (__builtin_constant_p(((7*32+ 4))) ? constant_test_bit(((7*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((7*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) : __builtin_constant_p((7*32+ 4)) ? __static_cpu_has((7*32+ 4)) : (__builtin_constant_p((7*32+ 4)) && ( ((((7*32+ 4))>>5)==0 && (1UL<<(((7*32+ 4))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((7*32+ 4))>>5)==1 && (1UL<<(((7*32+ 4))&31) & (0|0))) || ((((7*32+ 4))>>5)==2 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==3 && (1UL<<(((7*32+ 4))&31) & (0))) || ((((7*32+ 4))>>5)==4 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==5 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==6 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==7 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==8 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==9 && (1UL<<(((7*32+ 4))&31) & 0)) ) ? 1 : (__builtin_constant_p(((7*32+ 4))) ? constant_test_bit(((7*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((7*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) );
16846 static inline __attribute__((always_inline)) __attribute__((always_inline)) __attribute__((pure)) bool use_xsave(void)
16848 return ( __builtin_constant_p((__builtin_constant_p((4*32+26)) && ( ((((4*32+26))>>5)==0 && (1UL<<(((4*32+26))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+26))>>5)==1 && (1UL<<(((4*32+26))&31) & (0|0))) || ((((4*32+26))>>5)==2 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==3 && (1UL<<(((4*32+26))&31) & (0))) || ((((4*32+26))>>5)==4 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==5 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==6 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==7 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==8 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==9 && (1UL<<(((4*32+26))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+26))) ? constant_test_bit(((4*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((4*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))) ? (__builtin_constant_p((4*32+26)) && ( ((((4*32+26))>>5)==0 && (1UL<<(((4*32+26))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+26))>>5)==1 && (1UL<<(((4*32+26))&31) & (0|0))) || ((((4*32+26))>>5)==2 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==3 && (1UL<<(((4*32+26))&31) & (0))) || ((((4*32+26))>>5)==4 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==5 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==6 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==7 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==8 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==9 && (1UL<<(((4*32+26))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+26))) ? constant_test_bit(((4*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((4*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) : __builtin_constant_p((4*32+26)) ? __static_cpu_has((4*32+26)) : (__builtin_constant_p((4*32+26)) && ( ((((4*32+26))>>5)==0 && (1UL<<(((4*32+26))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+26))>>5)==1 && (1UL<<(((4*32+26))&31) & (0|0))) || ((((4*32+26))>>5)==2 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==3 && (1UL<<(((4*32+26))&31) & (0))) || ((((4*32+26))>>5)==4 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==5 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==6 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==7 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==8 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==9 && (1UL<<(((4*32+26))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+26))) ? constant_test_bit(((4*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((4*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) );
16850 static inline __attribute__((always_inline)) __attribute__((always_inline)) __attribute__((pure)) bool use_fxsr(void)
16852 return ( __builtin_constant_p((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))) ? (__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) : __builtin_constant_p((0*32+24)) ? __static_cpu_has((0*32+24)) : (__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) );
16854 extern void __sanitize_i387_state(struct task_struct *);
16855 static inline __attribute__((always_inline)) void sanitize_i387_state(struct task_struct *tsk)
16857 if (__builtin_constant_p(((!use_xsaveopt()))) ? !!((!use_xsaveopt())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 85, }; ______r = !!((!use_xsaveopt())); ______f.miss_hit[______r]++; ______r; }))
16859 __sanitize_i387_state(tsk);
16861 static inline __attribute__((always_inline)) int fxrstor_checking(struct i387_fxsave_struct *fx)
16863 asm volatile ("661:\n\t" "nop ; frstor %1" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(0*32+24)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "fxrstor %1" "\n664:\n" ".previous" : : "i" (0), "m" (*fx))
16867 static inline __attribute__((always_inline)) void fpu_fxsave(struct fpu *fpu)
16869 asm volatile("fxsave %[fx]"
16870 : [fx] "=m" (fpu->state->fxsave));
16872 static inline __attribute__((always_inline)) void fpu_save_init(struct fpu *fpu)
16874 if (__builtin_constant_p(((use_xsave()))) ? !!((use_xsave())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 229, }; ______r = !!((use_xsave())); ______f.miss_hit[______r]++; ______r; })) {
16876 if (__builtin_constant_p(((!(fpu->state->xsave.xsave_hdr.xstate_bv & 0x1)))) ? !!((!(fpu->state->xsave.xsave_hdr.xstate_bv & 0x1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 235, }; ______r = !!((!(fpu->state->xsave.xsave_hdr.xstate_bv & 0x1))); ______f.miss_hit[______r]++; ______r; }))
16878 } else if (__builtin_constant_p(((use_fxsr()))) ? !!((use_fxsr())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 237, }; ______r = !!((use_fxsr())); ______f.miss_hit[______r]++; ______r; })) {
16881 asm volatile("fnsave %[fx]; fwait"
16882 : [fx] "=m" (fpu->state->fsave));
16885 if (__builtin_constant_p((((__builtin_constant_p(fpu->state->fxsave.swd & (1 << 7)) ? !!(fpu->state->fxsave.swd & (1 << 7)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 245, }; ______r = __builtin_expect(!!(fpu->state->fxsave.swd & (1 << 7)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(fpu->state->fxsave.swd & (1 << 7)) ? !!(fpu->state->fxsave.swd & (1 << 7)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 245, }; ______r = __builtin_expect(!!(fpu->state->fxsave.swd & (1 << 7)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 245, }; ______r = !!(((__builtin_constant_p(fpu->state->fxsave.swd & (1 << 7)) ? !!(fpu->state->fxsave.swd & (1 << 7)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 245, }; ______r = __builtin_expect(!!(fpu->state->fxsave.swd & (1 << 7)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
16886 asm volatile("fnclex");
16887 asm volatile ("661:\n\t" ".byte " "0x90,0x8d,0xb4,0x26,0x00,0x00,0x00,0x00" "\n" ".byte " "0x89,0xf6" "\n" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(3*32+10)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "emms\n\t" "fildl %P[addr]" "\n664:\n" ".previous" : : "i" (0), [addr] "m" ((__per_cpu_offset[0])))
16890 static inline __attribute__((always_inline)) void __save_init_fpu(struct task_struct *tsk)
16892 fpu_save_init(&tsk->thread.fpu);
16893 ((struct thread_info *)(tsk)->stack)->status &= ~0x0001;
16895 static inline __attribute__((always_inline)) int fpu_fxrstor_checking(struct fpu *fpu)
16897 return fxrstor_checking(&fpu->state->fxsave);
16899 static inline __attribute__((always_inline)) int fpu_restore_checking(struct fpu *fpu)
16901 if (__builtin_constant_p(((use_xsave()))) ? !!((use_xsave())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 272, }; ______r = !!((use_xsave())); ______f.miss_hit[______r]++; ______r; }))
16902 return fpu_xrstor_checking(fpu);
16904 return fpu_fxrstor_checking(fpu);
16906 static inline __attribute__((always_inline)) int restore_fpu_checking(struct task_struct *tsk)
16908 return fpu_restore_checking(&tsk->thread.fpu);
16910 extern int save_i387_xstate(void *buf);
16911 extern int restore_i387_xstate(void *buf);
16912 static inline __attribute__((always_inline)) void __unlazy_fpu(struct task_struct *tsk)
16914 if (__builtin_constant_p(((((struct thread_info *)(tsk)->stack)->status & 0x0001))) ? !!((((struct thread_info *)(tsk)->stack)->status & 0x0001)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 291, }; ______r = !!((((struct thread_info *)(tsk)->stack)->status & 0x0001)); ______f.miss_hit[______r]++; ______r; })) {
16915 __save_init_fpu(tsk);
16916 write_cr0(read_cr0() | 0x00000008);
16918 tsk->fpu_counter = 0;
16920 static inline __attribute__((always_inline)) void __clear_fpu(struct task_struct *tsk)
16922 if (__builtin_constant_p(((((struct thread_info *)(tsk)->stack)->status & 0x0001))) ? !!((((struct thread_info *)(tsk)->stack)->status & 0x0001)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 300, }; ______r = !!((((struct thread_info *)(tsk)->stack)->status & 0x0001)); ______f.miss_hit[______r]++; ______r; })) {
16923 asm volatile("1: fwait\n"
16925 " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "2b" "\n" " .previous\n");
16926 ((struct thread_info *)(tsk)->stack)->status &= ~0x0001;
16927 write_cr0(read_cr0() | 0x00000008);
16930 static inline __attribute__((always_inline)) void kernel_fpu_begin(void)
16932 struct thread_info *me = current_thread_info();
16933 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
16934 if (__builtin_constant_p(((me->status & 0x0001))) ? !!((me->status & 0x0001)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 314, }; ______r = !!((me->status & 0x0001)); ______f.miss_hit[______r]++; ______r; }))
16935 __save_init_fpu(me->task);
16939 static inline __attribute__((always_inline)) void kernel_fpu_end(void)
16941 write_cr0(read_cr0() | 0x00000008);
16942 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 323, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 323, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 323, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 323, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
16944 static inline __attribute__((always_inline)) bool irq_fpu_usable(void)
16946 struct pt_regs *regs;
16947 return !(((current_thread_info()->preempt_count) & ((((1UL << (10))-1) << ((0 + 8) + 8)) | (((1UL << (8))-1) << (0 + 8)) | (((1UL << (1))-1) << (((0 + 8) + 8) + 10))))) || !(regs = get_irq_regs()) ||
16948 user_mode(regs) || (read_cr0() & 0x00000008);
16950 static inline __attribute__((always_inline)) int irq_ts_save(void)
16952 if (__builtin_constant_p(((!(((current_thread_info()->preempt_count) & ~0x10000000) != 0)))) ? !!((!(((current_thread_info()->preempt_count) & ~0x10000000) != 0))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 348, }; ______r = !!((!(((current_thread_info()->preempt_count) & ~0x10000000) != 0))); ______f.miss_hit[______r]++; ______r; }))
16954 if (__builtin_constant_p(((read_cr0() & 0x00000008))) ? !!((read_cr0() & 0x00000008)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 351, }; ______r = !!((read_cr0() & 0x00000008)); ______f.miss_hit[______r]++; ______r; })) {
16960 static inline __attribute__((always_inline)) void irq_ts_restore(int TS_state)
16962 if (__builtin_constant_p(((TS_state))) ? !!((TS_state)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 361, }; ______r = !!((TS_state)); ______f.miss_hit[______r]++; ______r; }))
16963 write_cr0(read_cr0() | 0x00000008);
16965 static inline __attribute__((always_inline)) void save_init_fpu(struct task_struct *tsk)
16967 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
16968 __save_init_fpu(tsk);
16969 write_cr0(read_cr0() | 0x00000008);
16970 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 373, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 373, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 373, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 373, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
16972 static inline __attribute__((always_inline)) void unlazy_fpu(struct task_struct *tsk)
16974 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
16976 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 380, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 380, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 380, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 380, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
16978 static inline __attribute__((always_inline)) void clear_fpu(struct task_struct *tsk)
16980 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
16982 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 387, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 387, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 387, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 387, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
16984 static inline __attribute__((always_inline)) unsigned short get_fpu_cwd(struct task_struct *tsk)
16986 if (__builtin_constant_p((((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))))) ? !!(((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 395, }; ______r = !!(((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; })) {
16987 return tsk->thread.fpu.state->fxsave.cwd;
16989 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
16992 static inline __attribute__((always_inline)) unsigned short get_fpu_swd(struct task_struct *tsk)
16994 if (__builtin_constant_p((((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))))) ? !!(((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 404, }; ______r = !!(((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; })) {
16995 return tsk->thread.fpu.state->fxsave.swd;
16997 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
17000 static inline __attribute__((always_inline)) unsigned short get_fpu_mxcsr(struct task_struct *tsk)
17002 if (__builtin_constant_p((((__builtin_constant_p((0*32+25)) && ( ((((0*32+25))>>5)==0 && (1UL<<(((0*32+25))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+25))>>5)==1 && (1UL<<(((0*32+25))&31) & (0|0))) || ((((0*32+25))>>5)==2 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==3 && (1UL<<(((0*32+25))&31) & (0))) || ((((0*32+25))>>5)==4 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==5 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==6 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==7 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==8 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==9 && (1UL<<(((0*32+25))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+25))) ? constant_test_bit(((0*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))))) ? !!(((__builtin_constant_p((0*32+25)) && ( ((((0*32+25))>>5)==0 && (1UL<<(((0*32+25))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+25))>>5)==1 && (1UL<<(((0*32+25))&31) & (0|0))) || ((((0*32+25))>>5)==2 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==3 && (1UL<<(((0*32+25))&31) & (0))) || ((((0*32+25))>>5)==4 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==5 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==6 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==7 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==8 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==9 && (1UL<<(((0*32+25))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+25))) ? constant_test_bit(((0*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 413, }; ______r = !!(((__builtin_constant_p((0*32+25)) && ( ((((0*32+25))>>5)==0 && (1UL<<(((0*32+25))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+25))>>5)==1 && (1UL<<(((0*32+25))&31) & (0|0))) || ((((0*32+25))>>5)==2 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==3 && (1UL<<(((0*32+25))&31) & (0))) || ((((0*32+25))>>5)==4 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==5 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==6 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==7 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==8 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==9 && (1UL<<(((0*32+25))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+25))) ? constant_test_bit(((0*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; })) {
17003 return tsk->thread.fpu.state->fxsave.mxcsr;
17008 static bool fpu_allocated(struct fpu *fpu)
17010 return fpu->state != ((void *)0);
17012 static inline __attribute__((always_inline)) int fpu_alloc(struct fpu *fpu)
17014 if (__builtin_constant_p(((fpu_allocated(fpu)))) ? !!((fpu_allocated(fpu))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 427, }; ______r = !!((fpu_allocated(fpu))); ______f.miss_hit[______r]++; ______r; }))
17016 fpu->state = kmem_cache_alloc(task_xstate_cachep, ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u)));
17017 if (__builtin_constant_p(((!fpu->state))) ? !!((!fpu->state)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 430, }; ______r = !!((!fpu->state)); ______f.miss_hit[______r]++; ______r; }))
17019 ({ int __ret_warn_on = !!((unsigned long)fpu->state & 15); if (__builtin_constant_p((((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 432, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 432, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 432, }; ______r = !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 432, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) warn_slowpath_null("/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", 432); (__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 432, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); });
17022 static inline __attribute__((always_inline)) void fpu_free(struct fpu *fpu)
17024 if (__builtin_constant_p(((fpu->state))) ? !!((fpu->state)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 438, }; ______r = !!((fpu->state)); ______f.miss_hit[______r]++; ______r; })) {
17025 kmem_cache_free(task_xstate_cachep, fpu->state);
17026 fpu->state = ((void *)0);
17029 static inline __attribute__((always_inline)) void fpu_copy(struct fpu *dst, struct fpu *src)
17031 __builtin_memcpy(dst->state, src->state, xstate_size);
17033 extern void fpu_finit(struct fpu *fpu);
17034 void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
17036 void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
17040 struct bio_integrity_payload;
17042 struct block_device;
17043 typedef void (bio_end_io_t) (struct bio *, int);
17044 typedef void (bio_destructor_t) (struct bio *);
17046 struct page *bv_page;
17047 unsigned int bv_len;
17048 unsigned int bv_offset;
17051 sector_t bi_sector;
17052 struct bio *bi_next;
17053 struct block_device *bi_bdev;
17054 unsigned long bi_flags;
17055 unsigned long bi_rw;
17056 unsigned short bi_vcnt;
17057 unsigned short bi_idx;
17058 unsigned int bi_phys_segments;
17059 unsigned int bi_size;
17060 unsigned int bi_seg_front_size;
17061 unsigned int bi_seg_back_size;
17062 unsigned int bi_max_vecs;
17063 unsigned int bi_comp_cpu;
17065 struct bio_vec *bi_io_vec;
17066 bio_end_io_t *bi_end_io;
17068 bio_destructor_t *bi_destructor;
17069 struct bio_vec bi_inline_vecs[0];
17071 enum rq_flag_bits {
17073 __REQ_FAILFAST_DEV,
17074 __REQ_FAILFAST_TRANSPORT,
17075 __REQ_FAILFAST_DRIVER,
17102 struct fstrim_range {
17107 struct files_stat_struct {
17108 unsigned long nr_files;
17109 unsigned long nr_free_files;
17110 unsigned long max_files;
17112 struct inodes_stat_t {
17117 static inline __attribute__((always_inline)) int old_valid_dev(dev_t dev)
17119 return ((unsigned int) ((dev) >> 20)) < 256 && ((unsigned int) ((dev) & ((1U << 20) - 1))) < 256;
17121 static inline __attribute__((always_inline)) u16 old_encode_dev(dev_t dev)
17123 return (((unsigned int) ((dev) >> 20)) << 8) | ((unsigned int) ((dev) & ((1U << 20) - 1)));
17125 static inline __attribute__((always_inline)) dev_t old_decode_dev(u16 val)
17127 return ((((val >> 8) & 255) << 20) | (val & 255));
17129 static inline __attribute__((always_inline)) int new_valid_dev(dev_t dev)
17133 static inline __attribute__((always_inline)) u32 new_encode_dev(dev_t dev)
17135 unsigned major = ((unsigned int) ((dev) >> 20));
17136 unsigned minor = ((unsigned int) ((dev) & ((1U << 20) - 1)));
17137 return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
17139 static inline __attribute__((always_inline)) dev_t new_decode_dev(u32 dev)
17141 unsigned major = (dev & 0xfff00) >> 8;
17142 unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
17143 return (((major) << 20) | (minor));
17145 static inline __attribute__((always_inline)) int huge_valid_dev(dev_t dev)
17149 static inline __attribute__((always_inline)) u64 huge_encode_dev(dev_t dev)
17151 return new_encode_dev(dev);
17153 static inline __attribute__((always_inline)) dev_t huge_decode_dev(u64 dev)
17155 return new_decode_dev(dev);
17157 static inline __attribute__((always_inline)) int sysv_valid_dev(dev_t dev)
17159 return ((unsigned int) ((dev) >> 20)) < (1<<14) && ((unsigned int) ((dev) & ((1U << 20) - 1))) < (1<<18);
17161 static inline __attribute__((always_inline)) u32 sysv_encode_dev(dev_t dev)
17163 return ((unsigned int) ((dev) & ((1U << 20) - 1))) | (((unsigned int) ((dev) >> 20)) << 18);
17165 static inline __attribute__((always_inline)) unsigned sysv_major(u32 dev)
17167 return (dev >> 18) & 0x3fff;
17169 static inline __attribute__((always_inline)) unsigned sysv_minor(u32 dev)
17171 return dev & 0x3ffff;
17173 static inline __attribute__((always_inline)) void bit_spin_lock(int bitnum, unsigned long *addr)
17175 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
17176 while ((__builtin_constant_p(test_and_set_bit_lock(bitnum, addr)) ? !!(test_and_set_bit_lock(bitnum, addr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 25, }; ______r = __builtin_expect(!!(test_and_set_bit_lock(bitnum, addr)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))) {
17177 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 26, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 26, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 26, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 26, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
17180 } while ((__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr))));
17181 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
17185 static inline __attribute__((always_inline)) int bit_spin_trylock(int bitnum, unsigned long *addr)
17187 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
17188 if (__builtin_constant_p((((__builtin_constant_p(test_and_set_bit_lock(bitnum, addr)) ? !!(test_and_set_bit_lock(bitnum, addr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 43, }; ______r = __builtin_expect(!!(test_and_set_bit_lock(bitnum, addr)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_and_set_bit_lock(bitnum, addr)) ? !!(test_and_set_bit_lock(bitnum, addr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 43, }; ______r = __builtin_expect(!!(test_and_set_bit_lock(bitnum, addr)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 43, }; ______r = !!(((__builtin_constant_p(test_and_set_bit_lock(bitnum, addr)) ? !!(test_and_set_bit_lock(bitnum, addr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 43, }; ______r = __builtin_expect(!!(test_and_set_bit_lock(bitnum, addr)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) {
17189 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 44, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 44, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 44, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 44, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
17195 static inline __attribute__((always_inline)) void bit_spin_unlock(int bitnum, unsigned long *addr)
17197 do { if (__builtin_constant_p((((__builtin_constant_p(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) ? !!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 58, }; ______r = __builtin_expect(!!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) ? !!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 58, }; ______r = __builtin_expect(!!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 58, }; ______r = !!(((__builtin_constant_p(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) ? !!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 58, }; ______r = __builtin_expect(!!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/bit_spinlock.h"), "i" (58), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
17198 clear_bit_unlock(bitnum, addr);
17199 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 63, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 63, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 63, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 63, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
17202 static inline __attribute__((always_inline)) void __bit_spin_unlock(int bitnum, unsigned long *addr)
17204 do { if (__builtin_constant_p((((__builtin_constant_p(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) ? !!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 75, }; ______r = __builtin_expect(!!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) ? !!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 75, }; ______r = __builtin_expect(!!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 75, }; ______r = !!(((__builtin_constant_p(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) ? !!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 75, }; ______r = __builtin_expect(!!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/bit_spinlock.h"), "i" (75), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
17205 __clear_bit_unlock(bitnum, addr);
17206 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 80, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 80, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 80, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 80, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
17209 static inline __attribute__((always_inline)) int bit_spin_is_locked(int bitnum, unsigned long *addr)
17211 return (__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)));
17213 struct hlist_bl_head {
17214 struct hlist_bl_node *first;
17216 struct hlist_bl_node {
17217 struct hlist_bl_node *next, **pprev;
17219 static inline __attribute__((always_inline)) void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
17221 h->next = ((void *)0);
17222 h->pprev = ((void *)0);
17224 static inline __attribute__((always_inline)) int hlist_bl_unhashed(const struct hlist_bl_node *h)
17228 static inline __attribute__((always_inline)) struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h)
17230 return (struct hlist_bl_node *)
17231 ((unsigned long)h->first & ~1UL);
17233 static inline __attribute__((always_inline)) void hlist_bl_set_first(struct hlist_bl_head *h,
17234 struct hlist_bl_node *n)
17238 h->first = (struct hlist_bl_node *)((unsigned long)n | 1UL);
17240 static inline __attribute__((always_inline)) int hlist_bl_empty(const struct hlist_bl_head *h)
17242 return !((unsigned long)h->first & ~1UL);
17244 static inline __attribute__((always_inline)) void hlist_bl_add_head(struct hlist_bl_node *n,
17245 struct hlist_bl_head *h)
17247 struct hlist_bl_node *first = hlist_bl_first(h);
17249 if (__builtin_constant_p(((first))) ? !!((first)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list_bl.h", .line = 82, }; ______r = !!((first)); ______f.miss_hit[______r]++; ______r; }))
17250 first->pprev = &n->next;
17251 n->pprev = &h->first;
17252 hlist_bl_set_first(h, n);
17254 static inline __attribute__((always_inline)) void __hlist_bl_del(struct hlist_bl_node *n)
17256 struct hlist_bl_node *next = n->next;
17257 struct hlist_bl_node **pprev = n->pprev;
17259 *pprev = (struct hlist_bl_node *)
17260 ((unsigned long)next |
17261 ((unsigned long)*pprev & 1UL));
17262 if (__builtin_constant_p(((next))) ? !!((next)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list_bl.h", .line = 99, }; ______r = !!((next)); ______f.miss_hit[______r]++; ______r; }))
17263 next->pprev = pprev;
17265 static inline __attribute__((always_inline)) void hlist_bl_del(struct hlist_bl_node *n)
17268 n->next = ((void *) 0x00100100 + (0x0UL));
17269 n->pprev = ((void *) 0x00200200 + (0x0UL));
17271 static inline __attribute__((always_inline)) void hlist_bl_del_init(struct hlist_bl_node *n)
17273 if (__builtin_constant_p(((!hlist_bl_unhashed(n)))) ? !!((!hlist_bl_unhashed(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list_bl.h", .line = 112, }; ______r = !!((!hlist_bl_unhashed(n))); ______f.miss_hit[______r]++; ______r; })) {
17275 INIT_HLIST_BL_NODE(n);
17278 static inline __attribute__((always_inline)) void hlist_bl_lock(struct hlist_bl_head *b)
17280 bit_spin_lock(0, (unsigned long *)b);
17282 static inline __attribute__((always_inline)) void hlist_bl_unlock(struct hlist_bl_head *b)
17284 __bit_spin_unlock(0, (unsigned long *)b);
17286 static inline __attribute__((always_inline)) void hlist_bl_set_first_rcu(struct hlist_bl_head *h,
17287 struct hlist_bl_node *n)
17291 ({ if (__builtin_constant_p(((!__builtin_constant_p(((struct hlist_bl_node *)((unsigned long)n | 1UL))) || ((((struct hlist_bl_node *)((unsigned long)n | 1UL))) != ((void *)0))))) ? !!((!__builtin_constant_p(((struct hlist_bl_node *)((unsigned long)n | 1UL))) || ((((struct hlist_bl_node *)((unsigned long)n | 1UL))) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
17292 "include/linux/rculist_bl.h"
17295 , }; ______r = !!((!__builtin_constant_p(((struct hlist_bl_node *)((unsigned long)n | 1UL))) || ((((struct hlist_bl_node *)((unsigned long)n | 1UL))) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); ((h->first)) = (typeof(*((struct hlist_bl_node *)((unsigned long)n | 1UL))) *)(((struct hlist_bl_node *)((unsigned long)n | 1UL))); })
17298 static inline __attribute__((always_inline)) struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h)
17300 return (struct hlist_bl_node *)
17301 ((unsigned long)({ typeof(*(h->first)) *_________p1 = (typeof(*(h->first))* )(*(volatile typeof((h->first)) *)&((h->first))); do { } while (0); ; do { } while (0); ((typeof(*(h->first)) *)(_________p1)); }) & ~1UL);
17303 static inline __attribute__((always_inline)) void hlist_bl_del_init_rcu(struct hlist_bl_node *n)
17305 if (__builtin_constant_p(((!hlist_bl_unhashed(n)))) ? !!((!hlist_bl_unhashed(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist_bl.h", .line = 48, }; ______r = !!((!hlist_bl_unhashed(n))); ______f.miss_hit[______r]++; ______r; })) {
17307 n->pprev = ((void *)0);
17310 static inline __attribute__((always_inline)) void hlist_bl_del_rcu(struct hlist_bl_node *n)
17313 n->pprev = ((void *) 0x00200200 + (0x0UL));
17315 static inline __attribute__((always_inline)) void hlist_bl_add_head_rcu(struct hlist_bl_node *n,
17316 struct hlist_bl_head *h)
17318 struct hlist_bl_node *first;
17319 first = hlist_bl_first(h);
17321 if (__builtin_constant_p(((first))) ? !!((first)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist_bl.h", .line = 107, }; ______r = !!((first)); ______f.miss_hit[______r]++; ______r; }))
17322 first->pprev = &n->next;
17323 n->pprev = &h->first;
17324 hlist_bl_set_first_rcu(h, n);
17332 const unsigned char *name;
17334 struct dentry_stat_t {
17341 extern struct dentry_stat_t dentry_stat;
17342 static inline __attribute__((always_inline)) int dentry_cmp(const unsigned char *cs, size_t scount,
17343 const unsigned char *ct, size_t tcount)
17346 if (__builtin_constant_p(((scount != tcount))) ? !!((scount != tcount)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/dcache.h", .line = 58, }; ______r = !!((scount != tcount)); ______f.miss_hit[______r]++; ______r; }))
17349 ret = (*cs != *ct);
17350 if (__builtin_constant_p(((ret))) ? !!((ret)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/dcache.h", .line = 62, }; ______r = !!((ret)); ______f.miss_hit[______r]++; ______r; }))
17358 static inline __attribute__((always_inline)) unsigned long
17359 partial_name_hash(unsigned long c, unsigned long prevhash)
17361 return (prevhash + (c << 4) + (c >> 4)) * 11;
17363 static inline __attribute__((always_inline)) unsigned long end_name_hash(unsigned long hash)
17365 return (unsigned int) hash;
17367 static inline __attribute__((always_inline)) unsigned int
17368 full_name_hash(const unsigned char *name, unsigned int len)
17370 unsigned long hash = 0;
17372 hash = partial_name_hash(*name++, hash);
17373 return end_name_hash(hash);
17376 unsigned int d_flags;
17378 struct hlist_bl_node d_hash;
17379 struct dentry *d_parent;
17380 struct qstr d_name;
17381 struct inode *d_inode;
17382 unsigned char d_iname[36];
17383 unsigned int d_count;
17385 const struct dentry_operations *d_op;
17386 struct super_block *d_sb;
17387 unsigned long d_time;
17389 struct list_head d_lru;
17391 struct list_head d_child;
17392 struct rcu_head d_rcu;
17394 struct list_head d_subdirs;
17395 struct list_head d_alias;
17397 enum dentry_d_lock_class
17399 DENTRY_D_LOCK_NORMAL,
17400 DENTRY_D_LOCK_NESTED
17402 struct dentry_operations {
17403 int (*d_revalidate)(struct dentry *, struct nameidata *);
17404 int (*d_hash)(const struct dentry *, const struct inode *,
17406 int (*d_compare)(const struct dentry *, const struct inode *,
17407 const struct dentry *, const struct inode *,
17408 unsigned int, const char *, const struct qstr *);
17409 int (*d_delete)(const struct dentry *);
17410 void (*d_release)(struct dentry *);
17411 void (*d_iput)(struct dentry *, struct inode *);
17412 char *(*d_dname)(struct dentry *, char *, int);
17413 struct vfsmount *(*d_automount)(struct path *);
17414 int (*d_manage)(struct dentry *, bool);
17415 } __attribute__((__aligned__((1 << (6)))));
17416 extern seqlock_t rename_lock;
17417 static inline __attribute__((always_inline)) int dname_external(struct dentry *dentry)
17419 return dentry->d_name.name != dentry->d_iname;
17421 extern void d_instantiate(struct dentry *, struct inode *);
17422 extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
17423 extern struct dentry * d_materialise_unique(struct dentry *, struct inode *);
17424 extern void __d_drop(struct dentry *dentry);
17425 extern void d_drop(struct dentry *dentry);
17426 extern void d_delete(struct dentry *);
17427 extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op);
17428 extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
17429 extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
17430 extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
17431 extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
17432 extern struct dentry * d_obtain_alias(struct inode *);
17433 extern void shrink_dcache_sb(struct super_block *);
17434 extern void shrink_dcache_parent(struct dentry *);
17435 extern void shrink_dcache_for_umount(struct super_block *);
17436 extern int d_invalidate(struct dentry *);
17437 extern struct dentry * d_alloc_root(struct inode *);
17438 extern void d_genocide(struct dentry *);
17439 extern struct dentry *d_find_alias(struct inode *);
17440 extern void d_prune_aliases(struct inode *);
17441 extern int have_submounts(struct dentry *);
17442 extern void d_rehash(struct dentry *);
17443 static inline __attribute__((always_inline)) void d_add(struct dentry *entry, struct inode *inode)
17445 d_instantiate(entry, inode);
17448 static inline __attribute__((always_inline)) struct dentry *d_add_unique(struct dentry *entry, struct inode *inode)
17450 struct dentry *res;
17451 res = d_instantiate_unique(entry, inode);
17452 d_rehash(res != ((void *)0) ? res : entry);
17455 extern void dentry_update_name_case(struct dentry *, struct qstr *);
17456 extern void d_move(struct dentry *, struct dentry *);
17457 extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
17458 extern struct dentry *d_lookup(struct dentry *, struct qstr *);
17459 extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *);
17460 extern struct dentry *__d_lookup(struct dentry *, struct qstr *);
17461 extern struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
17462 unsigned *seq, struct inode **inode);
17463 static inline __attribute__((always_inline)) int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
17466 assert_spin_locked(&dentry->d_lock);
17467 if (__builtin_constant_p(((!read_seqcount_retry(&dentry->d_seq, seq)))) ? !!((!read_seqcount_retry(&dentry->d_seq, seq))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/dcache.h", .line = 327, }; ______r = !!((!read_seqcount_retry(&dentry->d_seq, seq))); ______f.miss_hit[______r]++; ______r; })) {
17473 extern int d_validate(struct dentry *, struct dentry *);
17474 extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
17475 extern char *__d_path(const struct path *path, struct path *root, char *, int);
17476 extern char *d_path(const struct path *, char *, int);
17477 extern char *d_path_with_unreachable(const struct path *, char *, int);
17478 extern char *dentry_path_raw(struct dentry *, char *, int);
17479 extern char *dentry_path(struct dentry *, char *, int);
17480 static inline __attribute__((always_inline)) struct dentry *dget_dlock(struct dentry *dentry)
17482 if (__builtin_constant_p(((dentry))) ? !!((dentry)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/dcache.h", .line = 361, }; ______r = !!((dentry)); ______f.miss_hit[______r]++; ______r; }))
17486 static inline __attribute__((always_inline)) struct dentry *dget(struct dentry *dentry)
17488 if (__builtin_constant_p(((dentry))) ? !!((dentry)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/dcache.h", .line = 368, }; ______r = !!((dentry)); ______f.miss_hit[______r]++; ______r; })) {
17489 spin_lock(&dentry->d_lock);
17490 dget_dlock(dentry);
17491 spin_unlock(&dentry->d_lock);
17495 extern struct dentry *dget_parent(struct dentry *dentry);
17496 static inline __attribute__((always_inline)) int d_unhashed(struct dentry *dentry)
17498 return hlist_bl_unhashed(&dentry->d_hash);
17500 static inline __attribute__((always_inline)) int d_unlinked(struct dentry *dentry)
17502 return d_unhashed(dentry) && !((dentry) == (dentry)->d_parent);
17504 static inline __attribute__((always_inline)) int cant_mount(struct dentry *dentry)
17506 return (dentry->d_flags & 0x0100);
17508 static inline __attribute__((always_inline)) void dont_mount(struct dentry *dentry)
17510 spin_lock(&dentry->d_lock);
17511 dentry->d_flags |= 0x0100;
17512 spin_unlock(&dentry->d_lock);
17514 extern void dput(struct dentry *);
17515 static inline __attribute__((always_inline)) bool d_managed(struct dentry *dentry)
17517 return dentry->d_flags & (0x10000|0x20000|0x40000);
17519 static inline __attribute__((always_inline)) bool d_mountpoint(struct dentry *dentry)
17521 return dentry->d_flags & 0x10000;
17523 extern struct dentry *lookup_create(struct nameidata *nd, int is_dir);
17524 extern int sysctl_vfs_cache_pressure;
17528 struct vfsmount *mnt;
17529 struct dentry *dentry;
17531 extern void path_get(struct path *);
17532 extern void path_put(struct path *);
17533 static inline __attribute__((always_inline)) int path_equal(const struct path *path1, const struct path *path2)
17535 return path1->mnt == path2->mnt && path1->dentry == path2->dentry;
17537 static inline __attribute__((always_inline)) int radix_tree_is_indirect_ptr(void *ptr)
17539 return (int)((unsigned long)ptr & 1);
17541 struct radix_tree_root {
17542 unsigned int height;
17544 struct radix_tree_node *rnode;
17546 static inline __attribute__((always_inline)) void *radix_tree_deref_slot(void **pslot)
17548 return ({ typeof(*(*pslot)) *_________p1 = (typeof(*(*pslot))* )(*(volatile typeof((*pslot)) *)&((*pslot))); do { } while (0); ; do { } while (0); ((typeof(*(*pslot)) *)(_________p1)); });
17550 static inline __attribute__((always_inline)) void *radix_tree_deref_slot_protected(void **pslot,
17551 spinlock_t *treelock)
17553 return ({ do { } while (0); ; ((typeof(*(*pslot)) *)((*pslot))); });
17555 static inline __attribute__((always_inline)) int radix_tree_deref_retry(void *arg)
17557 return (__builtin_constant_p((unsigned long)arg & 1) ? !!((unsigned long)arg & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 173, }; ______r = __builtin_expect(!!((unsigned long)arg & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }));
17559 static inline __attribute__((always_inline)) void radix_tree_replace_slot(void **pslot, void *item)
17561 do { if (__builtin_constant_p((((__builtin_constant_p(radix_tree_is_indirect_ptr(item)) ? !!(radix_tree_is_indirect_ptr(item)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 186, }; ______r = __builtin_expect(!!(radix_tree_is_indirect_ptr(item)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(radix_tree_is_indirect_ptr(item)) ? !!(radix_tree_is_indirect_ptr(item)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 186, }; ______r = __builtin_expect(!!(radix_tree_is_indirect_ptr(item)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 186, }; ______r = !!(((__builtin_constant_p(radix_tree_is_indirect_ptr(item)) ? !!(radix_tree_is_indirect_ptr(item)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 186, }; ______r = __builtin_expect(!!(radix_tree_is_indirect_ptr(item)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/radix-tree.h"), "i" (186), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
17562 ({ if (__builtin_constant_p(((!__builtin_constant_p((item)) || (((item)) != ((void *)0))))) ? !!((!__builtin_constant_p((item)) || (((item)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 187, }; ______r = !!((!__builtin_constant_p((item)) || (((item)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); ((*pslot)) = (typeof(*(item)) *)((item)); });
17564 int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
17565 void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
17566 void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
17567 void *radix_tree_delete(struct radix_tree_root *, unsigned long);
17569 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
17570 unsigned long first_index, unsigned int max_items);
17572 radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
17573 unsigned long first_index, unsigned int max_items);
17574 unsigned long radix_tree_next_hole(struct radix_tree_root *root,
17575 unsigned long index, unsigned long max_scan);
17576 unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
17577 unsigned long index, unsigned long max_scan);
17578 int radix_tree_preload(gfp_t gfp_mask);
17579 void radix_tree_init(void);
17580 void *radix_tree_tag_set(struct radix_tree_root *root,
17581 unsigned long index, unsigned int tag);
17582 void *radix_tree_tag_clear(struct radix_tree_root *root,
17583 unsigned long index, unsigned int tag);
17584 int radix_tree_tag_get(struct radix_tree_root *root,
17585 unsigned long index, unsigned int tag);
17587 radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
17588 unsigned long first_index, unsigned int max_items,
17591 radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
17592 unsigned long first_index, unsigned int max_items,
17594 unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
17595 unsigned long *first_indexp, unsigned long last_index,
17596 unsigned long nr_to_tag,
17597 unsigned int fromtag, unsigned int totag);
17598 int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
17599 static inline __attribute__((always_inline)) void radix_tree_preload_end(void)
17601 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 228, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 228, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 228, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 228, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
17605 unsigned int count;
17606 struct list_head wait_list;
17608 static inline __attribute__((always_inline)) void sema_init(struct semaphore *sem, int val)
17610 static struct lock_class_key __key;
17611 *sem = (struct semaphore) { .lock = (spinlock_t ) { { .rlock = { .raw_lock = { 0 }, .magic = 0xdead4ead, .owner_cpu = -1, .owner = ((void *)-1L), .dep_map = { .name = "(*sem).lock" } } } }, .count = val, .wait_list = { &((*sem).wait_list), &((*sem).wait_list) }, };
17612 lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0);
17614 extern void down(struct semaphore *sem);
17615 extern int __attribute__((warn_unused_result)) down_interruptible(struct semaphore *sem);
17616 extern int __attribute__((warn_unused_result)) down_killable(struct semaphore *sem);
17617 extern int __attribute__((warn_unused_result)) down_trylock(struct semaphore *sem);
17618 extern int __attribute__((warn_unused_result)) down_timeout(struct semaphore *sem, long jiffies);
17619 extern void up(struct semaphore *sem);
17620 struct fiemap_extent {
17624 __u64 fe_reserved64[2];
17626 __u32 fe_reserved[3];
17632 __u32 fm_mapped_extents;
17633 __u32 fm_extent_count;
17635 struct fiemap_extent fm_extents[0];
17637 struct export_operations;
17638 struct hd_geometry;
17643 struct pipe_inode_info;
17644 struct poll_table_struct;
17646 struct vm_area_struct;
17649 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) inode_init(void);
17650 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) inode_init_early(void);
17651 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) files_init(unsigned long);
17652 extern struct files_stat_struct files_stat;
17653 extern unsigned long get_max_files(void);
17654 extern int sysctl_nr_open;
17655 extern struct inodes_stat_t inodes_stat;
17656 extern int leases_enable, lease_break_time;
17657 struct buffer_head;
17658 typedef int (get_block_t)(struct inode *inode, sector_t iblock,
17659 struct buffer_head *bh_result, int create);
17660 typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
17661 ssize_t bytes, void *private, int ret,
17664 unsigned int ia_valid;
17669 struct timespec ia_atime;
17670 struct timespec ia_mtime;
17671 struct timespec ia_ctime;
17672 struct file *ia_file;
17683 __u64 dqb_bhardlimit;
17684 __u64 dqb_bsoftlimit;
17685 __u64 dqb_curspace;
17686 __u64 dqb_ihardlimit;
17687 __u64 dqb_isoftlimit;
17688 __u64 dqb_curinodes;
17701 QUOTA_NL_C_WARNING,
17707 QUOTA_NL_A_EXCESS_ID,
17708 QUOTA_NL_A_WARNING,
17709 QUOTA_NL_A_DEV_MAJOR,
17710 QUOTA_NL_A_DEV_MINOR,
17711 QUOTA_NL_A_CAUSED_ID,
17714 typedef struct fs_disk_quota {
17719 __u64 d_blk_hardlimit;
17720 __u64 d_blk_softlimit;
17721 __u64 d_ino_hardlimit;
17722 __u64 d_ino_softlimit;
17730 __u64 d_rtb_hardlimit;
17731 __u64 d_rtb_softlimit;
17736 char d_padding4[8];
17738 typedef struct fs_qfilestat {
17741 __u32 qfs_nextents;
17743 typedef struct fs_quota_stat {
17747 fs_qfilestat_t qs_uquota;
17748 fs_qfilestat_t qs_gquota;
17749 __u32 qs_incoredqs;
17750 __s32 qs_btimelimit;
17751 __s32 qs_itimelimit;
17752 __s32 qs_rtbtimelimit;
17753 __u16 qs_bwarnlimit;
17754 __u16 qs_iwarnlimit;
17757 struct qtree_fmt_operations {
17758 void (*mem2disk_dqblk)(void *disk, struct dquot *dquot);
17759 void (*disk2mem_dqblk)(struct dquot *dquot, void *disk);
17760 int (*is_id)(void *disk, struct dquot *dquot);
17762 struct qtree_mem_dqinfo {
17763 struct super_block *dqi_sb;
17765 unsigned int dqi_blocks;
17766 unsigned int dqi_free_blk;
17767 unsigned int dqi_free_entry;
17768 unsigned int dqi_blocksize_bits;
17769 unsigned int dqi_entry_size;
17770 unsigned int dqi_usable_bs;
17771 unsigned int dqi_qtree_depth;
17772 struct qtree_fmt_operations *dqi_ops;
17774 int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
17775 int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
17776 int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
17777 int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
17778 int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk);
17779 static inline __attribute__((always_inline)) int qtree_depth(struct qtree_mem_dqinfo *info)
17781 unsigned int epb = info->dqi_usable_bs >> 2;
17782 unsigned long long entries = epb;
17784 for (i = 1; entries < (1ULL << 32); i++)
17788 typedef __kernel_uid32_t qid_t;
17789 typedef long long qsize_t;
17790 extern spinlock_t dq_data_lock;
17792 qsize_t dqb_bhardlimit;
17793 qsize_t dqb_bsoftlimit;
17794 qsize_t dqb_curspace;
17795 qsize_t dqb_rsvspace;
17796 qsize_t dqb_ihardlimit;
17797 qsize_t dqb_isoftlimit;
17798 qsize_t dqb_curinodes;
17802 struct quota_format_type;
17803 struct mem_dqinfo {
17804 struct quota_format_type *dqi_format;
17806 struct list_head dqi_dirty_list;
17807 unsigned long dqi_flags;
17808 unsigned int dqi_bgrace;
17809 unsigned int dqi_igrace;
17810 qsize_t dqi_maxblimit;
17811 qsize_t dqi_maxilimit;
17814 struct super_block;
17815 extern void mark_info_dirty(struct super_block *sb, int type);
17816 static inline __attribute__((always_inline)) int info_dirty(struct mem_dqinfo *info)
17818 return (__builtin_constant_p((16)) ? constant_test_bit((16), (&info->dqi_flags)) : variable_test_bit((16), (&info->dqi_flags)));
17832 int stat[_DQST_DQSTAT_LAST];
17833 struct percpu_counter counter[_DQST_DQSTAT_LAST];
17835 extern struct dqstats *dqstats_pcpu;
17836 extern struct dqstats dqstats;
17837 static inline __attribute__((always_inline)) void dqstats_inc(unsigned int type)
17839 percpu_counter_inc(&dqstats.counter[type]);
17841 static inline __attribute__((always_inline)) void dqstats_dec(unsigned int type)
17843 percpu_counter_dec(&dqstats.counter[type]);
17846 struct hlist_node dq_hash;
17847 struct list_head dq_inuse;
17848 struct list_head dq_free;
17849 struct list_head dq_dirty;
17850 struct mutex dq_lock;
17852 wait_queue_head_t dq_wait_unused;
17853 struct super_block *dq_sb;
17854 unsigned int dq_id;
17856 unsigned long dq_flags;
17858 struct mem_dqblk dq_dqb;
17860 struct quota_format_ops {
17861 int (*check_quota_file)(struct super_block *sb, int type);
17862 int (*read_file_info)(struct super_block *sb, int type);
17863 int (*write_file_info)(struct super_block *sb, int type);
17864 int (*free_file_info)(struct super_block *sb, int type);
17865 int (*read_dqblk)(struct dquot *dquot);
17866 int (*commit_dqblk)(struct dquot *dquot);
17867 int (*release_dqblk)(struct dquot *dquot);
17869 struct dquot_operations {
17870 int (*write_dquot) (struct dquot *);
17871 struct dquot *(*alloc_dquot)(struct super_block *, int);
17872 void (*destroy_dquot)(struct dquot *);
17873 int (*acquire_dquot) (struct dquot *);
17874 int (*release_dquot) (struct dquot *);
17875 int (*mark_dirty) (struct dquot *);
17876 int (*write_info) (struct super_block *, int);
17877 qsize_t *(*get_reserved_space) (struct inode *);
17880 struct quotactl_ops {
17881 int (*quota_on)(struct super_block *, int, int, struct path *);
17882 int (*quota_on_meta)(struct super_block *, int, int);
17883 int (*quota_off)(struct super_block *, int);
17884 int (*quota_sync)(struct super_block *, int, int);
17885 int (*get_info)(struct super_block *, int, struct if_dqinfo *);
17886 int (*set_info)(struct super_block *, int, struct if_dqinfo *);
17887 int (*get_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *);
17888 int (*set_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *);
17889 int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
17890 int (*set_xstate)(struct super_block *, unsigned int, int);
17892 struct quota_format_type {
17894 const struct quota_format_ops *qf_ops;
17895 struct module *qf_owner;
17896 struct quota_format_type *qf_next;
17899 _DQUOT_USAGE_ENABLED = 0,
17900 _DQUOT_LIMITS_ENABLED,
17904 static inline __attribute__((always_inline)) unsigned int dquot_state_flag(unsigned int flags, int type)
17906 return flags << _DQUOT_STATE_FLAGS * type;
17908 static inline __attribute__((always_inline)) unsigned int dquot_generic_flag(unsigned int flags, int type)
17910 return (flags >> _DQUOT_STATE_FLAGS * type) & ((1 << _DQUOT_USAGE_ENABLED) | (1 << _DQUOT_LIMITS_ENABLED) | (1 << _DQUOT_SUSPENDED));
17912 static inline __attribute__((always_inline)) void quota_send_warning(short type, unsigned int id, dev_t dev,
17913 const char warntype)
17917 struct quota_info {
17918 unsigned int flags;
17919 struct mutex dqio_mutex;
17920 struct mutex dqonoff_mutex;
17921 struct rw_semaphore dqptr_sem;
17922 struct inode *files[2];
17923 struct mem_dqinfo info[2];
17924 const struct quota_format_ops *ops[2];
17926 int register_quota_format(struct quota_format_type *fmt);
17927 void unregister_quota_format(struct quota_format_type *fmt);
17928 struct quota_module_name {
17932 enum positive_aop_returns {
17933 AOP_WRITEPAGE_ACTIVATE = 0x80000,
17934 AOP_TRUNCATED_PAGE = 0x80001,
17937 struct address_space;
17938 struct writeback_control;
17940 const struct iovec *iov;
17941 unsigned long nr_segs;
17945 size_t iov_iter_copy_from_user_atomic(struct page *page,
17946 struct iov_iter *i, unsigned long offset, size_t bytes);
17947 size_t iov_iter_copy_from_user(struct page *page,
17948 struct iov_iter *i, unsigned long offset, size_t bytes);
17949 void iov_iter_advance(struct iov_iter *i, size_t bytes);
17950 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
17951 size_t iov_iter_single_seg_count(struct iov_iter *i);
17952 static inline __attribute__((always_inline)) void iov_iter_init(struct iov_iter *i,
17953 const struct iovec *iov, unsigned long nr_segs,
17954 size_t count, size_t written)
17957 i->nr_segs = nr_segs;
17959 i->count = count + written;
17960 iov_iter_advance(i, written);
17962 static inline __attribute__((always_inline)) size_t iov_iter_count(struct iov_iter *i)
17974 } read_descriptor_t;
17975 typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
17976 unsigned long, unsigned long);
17977 struct address_space_operations {
17978 int (*writepage)(struct page *page, struct writeback_control *wbc);
17979 int (*readpage)(struct file *, struct page *);
17980 int (*writepages)(struct address_space *, struct writeback_control *);
17981 int (*set_page_dirty)(struct page *page);
17982 int (*readpages)(struct file *filp, struct address_space *mapping,
17983 struct list_head *pages, unsigned nr_pages);
17984 int (*write_begin)(struct file *, struct address_space *mapping,
17985 loff_t pos, unsigned len, unsigned flags,
17986 struct page **pagep, void **fsdata);
17987 int (*write_end)(struct file *, struct address_space *mapping,
17988 loff_t pos, unsigned len, unsigned copied,
17989 struct page *page, void *fsdata);
17990 sector_t (*bmap)(struct address_space *, sector_t);
17991 void (*invalidatepage) (struct page *, unsigned long);
17992 int (*releasepage) (struct page *, gfp_t);
17993 void (*freepage)(struct page *);
17994 ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
17995 loff_t offset, unsigned long nr_segs);
17996 int (*get_xip_mem)(struct address_space *, unsigned long, int,
17997 void **, unsigned long *);
17998 int (*migratepage) (struct address_space *,
17999 struct page *, struct page *);
18000 int (*launder_page) (struct page *);
18001 int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
18003 int (*error_remove_page)(struct address_space *, struct page *);
18005 extern const struct address_space_operations empty_aops;
18006 int pagecache_write_begin(struct file *, struct address_space *mapping,
18007 loff_t pos, unsigned len, unsigned flags,
18008 struct page **pagep, void **fsdata);
18009 int pagecache_write_end(struct file *, struct address_space *mapping,
18010 loff_t pos, unsigned len, unsigned copied,
18011 struct page *page, void *fsdata);
18012 struct backing_dev_info;
18013 struct address_space {
18014 struct inode *host;
18015 struct radix_tree_root page_tree;
18016 spinlock_t tree_lock;
18017 unsigned int i_mmap_writable;
18018 struct prio_tree_root i_mmap;
18019 struct list_head i_mmap_nonlinear;
18020 struct mutex i_mmap_mutex;
18021 unsigned long nrpages;
18022 unsigned long writeback_index;
18023 const struct address_space_operations *a_ops;
18024 unsigned long flags;
18025 struct backing_dev_info *backing_dev_info;
18026 spinlock_t private_lock;
18027 struct list_head private_list;
18028 struct address_space *assoc_mapping;
18029 } __attribute__((aligned(sizeof(long))));
18030 struct block_device {
18033 struct inode * bd_inode;
18034 struct super_block * bd_super;
18035 struct mutex bd_mutex;
18036 struct list_head bd_inodes;
18037 void * bd_claiming;
18040 bool bd_write_holder;
18041 struct list_head bd_holder_disks;
18042 struct block_device * bd_contains;
18043 unsigned bd_block_size;
18044 struct hd_struct * bd_part;
18045 unsigned bd_part_count;
18046 int bd_invalidated;
18047 struct gendisk * bd_disk;
18048 struct list_head bd_list;
18049 unsigned long bd_private;
18050 int bd_fsfreeze_count;
18051 struct mutex bd_fsfreeze_mutex;
18053 int mapping_tagged(struct address_space *mapping, int tag);
18054 static inline __attribute__((always_inline)) int mapping_mapped(struct address_space *mapping)
18056 return !prio_tree_empty(&mapping->i_mmap) ||
18057 !list_empty(&mapping->i_mmap_nonlinear);
18059 static inline __attribute__((always_inline)) int mapping_writably_mapped(struct address_space *mapping)
18061 return mapping->i_mmap_writable != 0;
18068 const struct inode_operations *i_op;
18069 struct super_block *i_sb;
18071 unsigned int i_flags;
18072 unsigned long i_state;
18074 struct mutex i_mutex;
18075 unsigned long dirtied_when;
18076 struct hlist_node i_hash;
18077 struct list_head i_wb_list;
18078 struct list_head i_lru;
18079 struct list_head i_sb_list;
18081 struct list_head i_dentry;
18082 struct rcu_head i_rcu;
18084 unsigned long i_ino;
18086 unsigned int i_nlink;
18088 unsigned int i_blkbits;
18091 seqcount_t i_size_seqcount;
18092 struct timespec i_atime;
18093 struct timespec i_mtime;
18094 struct timespec i_ctime;
18096 unsigned short i_bytes;
18097 struct rw_semaphore i_alloc_sem;
18098 const struct file_operations *i_fop;
18099 struct file_lock *i_flock;
18100 struct address_space *i_mapping;
18101 struct address_space i_data;
18102 struct dquot *i_dquot[2];
18103 struct list_head i_devices;
18105 struct pipe_inode_info *i_pipe;
18106 struct block_device *i_bdev;
18107 struct cdev *i_cdev;
18109 __u32 i_generation;
18110 __u32 i_fsnotify_mask;
18111 struct hlist_head i_fsnotify_marks;
18112 atomic_t i_writecount;
18113 struct posix_acl *i_acl;
18114 struct posix_acl *i_default_acl;
18117 static inline __attribute__((always_inline)) int inode_unhashed(struct inode *inode)
18119 return hlist_unhashed(&inode->i_hash);
18121 enum inode_i_mutex_lock_class
18129 static inline __attribute__((always_inline)) loff_t i_size_read(const struct inode *inode)
18134 seq = read_seqcount_begin(&inode->i_size_seqcount);
18135 i_size = inode->i_size;
18136 } while (read_seqcount_retry(&inode->i_size_seqcount, seq));
18139 static inline __attribute__((always_inline)) void i_size_write(struct inode *inode, loff_t i_size)
18141 write_seqcount_begin(&inode->i_size_seqcount);
18142 inode->i_size = i_size;
18143 write_seqcount_end(&inode->i_size_seqcount);
18145 static inline __attribute__((always_inline)) unsigned iminor(const struct inode *inode)
18147 return ((unsigned int) ((inode->i_rdev) & ((1U << 20) - 1)));
18149 static inline __attribute__((always_inline)) unsigned imajor(const struct inode *inode)
18151 return ((unsigned int) ((inode->i_rdev) >> 20));
18153 extern struct block_device *I_BDEV(struct inode *inode);
18154 struct fown_struct {
18157 enum pid_type pid_type;
18161 struct file_ra_state {
18162 unsigned long start;
18164 unsigned int async_size;
18165 unsigned int ra_pages;
18166 unsigned int mmap_miss;
18169 static inline __attribute__((always_inline)) int ra_has_index(struct file_ra_state *ra, unsigned long index)
18171 return (index >= ra->start &&
18172 index < ra->start + ra->size);
18176 struct list_head fu_list;
18177 struct rcu_head fu_rcuhead;
18179 struct path f_path;
18180 const struct file_operations *f_op;
18183 atomic_long_t f_count;
18184 unsigned int f_flags;
18187 struct fown_struct f_owner;
18188 const struct cred *f_cred;
18189 struct file_ra_state f_ra;
18192 void *private_data;
18193 struct list_head f_ep_links;
18194 struct address_space *f_mapping;
18196 struct file_handle {
18197 __u32 handle_bytes;
18199 unsigned char f_handle[0];
18201 static inline __attribute__((always_inline)) void file_take_write(struct file *filp) {}
18202 static inline __attribute__((always_inline)) void file_release_write(struct file *filp) {}
18203 static inline __attribute__((always_inline)) void file_reset_write(struct file *filp) {}
18204 static inline __attribute__((always_inline)) void file_check_state(struct file *filp) {}
18205 static inline __attribute__((always_inline)) int file_check_writeable(struct file *filp)
18209 typedef struct files_struct *fl_owner_t;
18210 struct file_lock_operations {
18211 void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
18212 void (*fl_release_private)(struct file_lock *);
18214 struct lock_manager_operations {
18215 int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
18216 void (*fl_notify)(struct file_lock *);
18217 int (*fl_grant)(struct file_lock *, struct file_lock *, int);
18218 void (*fl_release_private)(struct file_lock *);
18219 void (*fl_break)(struct file_lock *);
18220 int (*fl_change)(struct file_lock **, int);
18222 struct lock_manager {
18223 struct list_head list;
18225 void locks_start_grace(struct lock_manager *);
18226 void locks_end_grace(struct lock_manager *);
18227 int locks_in_grace(void);
18234 NFSERR_EAGAIN = 11,
18239 NFSERR_NOTDIR = 20,
18246 NFSERR_OPNOTSUPP = 45,
18247 NFSERR_NAMETOOLONG = 63,
18248 NFSERR_NOTEMPTY = 66,
18251 NFSERR_REMOTE = 71,
18252 NFSERR_WFLUSH = 99,
18253 NFSERR_BADHANDLE = 10001,
18254 NFSERR_NOT_SYNC = 10002,
18255 NFSERR_BAD_COOKIE = 10003,
18256 NFSERR_NOTSUPP = 10004,
18257 NFSERR_TOOSMALL = 10005,
18258 NFSERR_SERVERFAULT = 10006,
18259 NFSERR_BADTYPE = 10007,
18260 NFSERR_JUKEBOX = 10008,
18261 NFSERR_SAME = 10009,
18262 NFSERR_DENIED = 10010,
18263 NFSERR_EXPIRED = 10011,
18264 NFSERR_LOCKED = 10012,
18265 NFSERR_GRACE = 10013,
18266 NFSERR_FHEXPIRED = 10014,
18267 NFSERR_SHARE_DENIED = 10015,
18268 NFSERR_WRONGSEC = 10016,
18269 NFSERR_CLID_INUSE = 10017,
18270 NFSERR_RESOURCE = 10018,
18271 NFSERR_MOVED = 10019,
18272 NFSERR_NOFILEHANDLE = 10020,
18273 NFSERR_MINOR_VERS_MISMATCH = 10021,
18274 NFSERR_STALE_CLIENTID = 10022,
18275 NFSERR_STALE_STATEID = 10023,
18276 NFSERR_OLD_STATEID = 10024,
18277 NFSERR_BAD_STATEID = 10025,
18278 NFSERR_BAD_SEQID = 10026,
18279 NFSERR_NOT_SAME = 10027,
18280 NFSERR_LOCK_RANGE = 10028,
18281 NFSERR_SYMLINK = 10029,
18282 NFSERR_RESTOREFH = 10030,
18283 NFSERR_LEASE_MOVED = 10031,
18284 NFSERR_ATTRNOTSUPP = 10032,
18285 NFSERR_NO_GRACE = 10033,
18286 NFSERR_RECLAIM_BAD = 10034,
18287 NFSERR_RECLAIM_CONFLICT = 10035,
18288 NFSERR_BAD_XDR = 10036,
18289 NFSERR_LOCKS_HELD = 10037,
18290 NFSERR_OPENMODE = 10038,
18291 NFSERR_BADOWNER = 10039,
18292 NFSERR_BADCHAR = 10040,
18293 NFSERR_BADNAME = 10041,
18294 NFSERR_BAD_RANGE = 10042,
18295 NFSERR_LOCK_NOTSUPP = 10043,
18296 NFSERR_OP_ILLEGAL = 10044,
18297 NFSERR_DEADLOCK = 10045,
18298 NFSERR_FILE_OPEN = 10046,
18299 NFSERR_ADMIN_REVOKED = 10047,
18300 NFSERR_CB_PATH_DOWN = 10048,
18313 typedef u32 rpc_authflavor_t;
18314 enum rpc_auth_flavors {
18317 RPC_AUTH_SHORT = 2,
18321 RPC_AUTH_MAXFLAVOR = 8,
18322 RPC_AUTH_GSS_KRB5 = 390003,
18323 RPC_AUTH_GSS_KRB5I = 390004,
18324 RPC_AUTH_GSS_KRB5P = 390005,
18325 RPC_AUTH_GSS_LKEY = 390006,
18326 RPC_AUTH_GSS_LKEYI = 390007,
18327 RPC_AUTH_GSS_LKEYP = 390008,
18328 RPC_AUTH_GSS_SPKM = 390009,
18329 RPC_AUTH_GSS_SPKMI = 390010,
18330 RPC_AUTH_GSS_SPKMP = 390011,
18332 enum rpc_msg_type {
18336 enum rpc_reply_stat {
18337 RPC_MSG_ACCEPTED = 0,
18340 enum rpc_accept_stat {
18342 RPC_PROG_UNAVAIL = 1,
18343 RPC_PROG_MISMATCH = 2,
18344 RPC_PROC_UNAVAIL = 3,
18345 RPC_GARBAGE_ARGS = 4,
18346 RPC_SYSTEM_ERR = 5,
18347 RPC_DROP_REPLY = 60000,
18349 enum rpc_reject_stat {
18353 enum rpc_auth_stat {
18355 RPC_AUTH_BADCRED = 1,
18356 RPC_AUTH_REJECTEDCRED = 2,
18357 RPC_AUTH_BADVERF = 3,
18358 RPC_AUTH_REJECTEDVERF = 4,
18359 RPC_AUTH_TOOWEAK = 5,
18360 RPCSEC_GSS_CREDPROBLEM = 13,
18361 RPCSEC_GSS_CTXPROBLEM = 14
18363 typedef __be32 rpc_fraghdr;
18364 extern __be32 in_aton(const char *str);
18365 extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
18366 extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
18368 unsigned short size;
18369 unsigned char data[128];
18371 static inline __attribute__((always_inline)) int nfs_compare_fh(const struct nfs_fh *a, const struct nfs_fh *b)
18373 return a->size != b->size || __builtin_memcmp(a->data, b->data, a->size) != 0;
18375 static inline __attribute__((always_inline)) void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *source)
18377 target->size = source->size;
18378 __builtin_memcpy(target->data, source->data, source->size);
18380 enum nfs3_stable_how {
18385 struct nlm_lockowner;
18386 struct nfs_lock_info {
18388 struct nlm_lockowner *owner;
18389 struct list_head list;
18391 struct nfs4_lock_state;
18392 struct nfs4_lock_info {
18393 struct nfs4_lock_state *owner;
18396 struct file_lock *fl_next;
18397 struct list_head fl_link;
18398 struct list_head fl_block;
18399 fl_owner_t fl_owner;
18400 unsigned char fl_flags;
18401 unsigned char fl_type;
18402 unsigned int fl_pid;
18403 struct pid *fl_nspid;
18404 wait_queue_head_t fl_wait;
18405 struct file *fl_file;
18408 struct fasync_struct * fl_fasync;
18409 unsigned long fl_break_time;
18410 const struct file_lock_operations *fl_ops;
18411 const struct lock_manager_operations *fl_lmops;
18413 struct nfs_lock_info nfs_fl;
18414 struct nfs4_lock_info nfs4_fl;
18416 struct list_head link;
18421 struct f_owner_ex {
18423 __kernel_pid_t pid;
18428 __kernel_off_t l_start;
18429 __kernel_off_t l_len;
18430 __kernel_pid_t l_pid;
18435 __kernel_loff_t l_start;
18436 __kernel_loff_t l_len;
18437 __kernel_pid_t l_pid;
18439 extern void send_sigio(struct fown_struct *fown, int fd, int band);
18440 extern int fcntl_getlk(struct file *, struct flock *);
18441 extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
18443 extern int fcntl_getlk64(struct file *, struct flock64 *);
18444 extern int fcntl_setlk64(unsigned int, struct file *, unsigned int,
18446 extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
18447 extern int fcntl_getlease(struct file *filp);
18448 void locks_free_lock(struct file_lock *fl);
18449 extern void locks_init_lock(struct file_lock *);
18450 extern struct file_lock * locks_alloc_lock(void);
18451 extern void locks_copy_lock(struct file_lock *, struct file_lock *);
18452 extern void __locks_copy_lock(struct file_lock *, const struct file_lock *);
18453 extern void locks_remove_posix(struct file *, fl_owner_t);
18454 extern void locks_remove_flock(struct file *);
18455 extern void locks_release_private(struct file_lock *);
18456 extern void posix_test_lock(struct file *, struct file_lock *);
18457 extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
18458 extern int posix_lock_file_wait(struct file *, struct file_lock *);
18459 extern int posix_unblock_lock(struct file *, struct file_lock *);
18460 extern int vfs_test_lock(struct file *, struct file_lock *);
18461 extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
18462 extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
18463 extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
18464 extern int __break_lease(struct inode *inode, unsigned int flags);
18465 extern void lease_get_mtime(struct inode *, struct timespec *time);
18466 extern int generic_setlease(struct file *, long, struct file_lock **);
18467 extern int vfs_setlease(struct file *, long, struct file_lock **);
18468 extern int lease_modify(struct file_lock **, int);
18469 extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
18470 extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
18471 extern void lock_flocks(void);
18472 extern void unlock_flocks(void);
18473 struct fasync_struct {
18474 spinlock_t fa_lock;
18477 struct fasync_struct *fa_next;
18478 struct file *fa_file;
18479 struct rcu_head fa_rcu;
18481 extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
18482 extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *);
18483 extern int fasync_remove_entry(struct file *, struct fasync_struct **);
18484 extern struct fasync_struct *fasync_alloc(void);
18485 extern void fasync_free(struct fasync_struct *);
18486 extern void kill_fasync(struct fasync_struct **, int, int);
18487 extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
18488 extern int f_setown(struct file *filp, unsigned long arg, int force);
18489 extern void f_delown(struct file *filp);
18490 extern pid_t f_getown(struct file *filp);
18491 extern int send_sigurg(struct fown_struct *fown);
18492 extern struct list_head super_blocks;
18493 extern spinlock_t sb_lock;
18494 struct super_block {
18495 struct list_head s_list;
18497 unsigned char s_dirt;
18498 unsigned char s_blocksize_bits;
18499 unsigned long s_blocksize;
18501 struct file_system_type *s_type;
18502 const struct super_operations *s_op;
18503 const struct dquot_operations *dq_op;
18504 const struct quotactl_ops *s_qcop;
18505 const struct export_operations *s_export_op;
18506 unsigned long s_flags;
18507 unsigned long s_magic;
18508 struct dentry *s_root;
18509 struct rw_semaphore s_umount;
18510 struct mutex s_lock;
18514 const struct xattr_handler **s_xattr;
18515 struct list_head s_inodes;
18516 struct hlist_bl_head s_anon;
18517 struct list_head *s_files;
18518 struct list_head s_dentry_lru;
18519 int s_nr_dentry_unused;
18520 struct block_device *s_bdev;
18521 struct backing_dev_info *s_bdi;
18522 struct mtd_info *s_mtd;
18523 struct list_head s_instances;
18524 struct quota_info s_dquot;
18526 wait_queue_head_t s_wait_unfrozen;
18532 struct mutex s_vfs_rename_mutex;
18535 const struct dentry_operations *s_d_op;
18536 int cleancache_poolid;
18538 extern struct timespec current_fs_time(struct super_block *sb);
18541 SB_FREEZE_WRITE = 1,
18542 SB_FREEZE_TRANS = 2,
18544 extern struct user_namespace init_user_ns;
18545 extern bool inode_owner_or_capable(const struct inode *inode);
18546 extern void lock_super(struct super_block *);
18547 extern void unlock_super(struct super_block *);
18548 extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *);
18549 extern int vfs_mkdir(struct inode *, struct dentry *, int);
18550 extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t);
18551 extern int vfs_symlink(struct inode *, struct dentry *, const char *);
18552 extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
18553 extern int vfs_rmdir(struct inode *, struct dentry *);
18554 extern int vfs_unlink(struct inode *, struct dentry *);
18555 extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
18556 extern void dentry_unhash(struct dentry *dentry);
18557 extern int file_permission(struct file *, int);
18558 extern void inode_init_owner(struct inode *inode, const struct inode *dir,
18560 struct fiemap_extent_info {
18561 unsigned int fi_flags;
18562 unsigned int fi_extents_mapped;
18563 unsigned int fi_extents_max;
18564 struct fiemap_extent *fi_extents_start;
18566 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
18567 u64 phys, u64 len, u32 flags);
18568 int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
18569 typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned);
18570 struct block_device_operations;
18571 struct file_operations {
18572 struct module *owner;
18573 loff_t (*llseek) (struct file *, loff_t, int);
18574 ssize_t (*read) (struct file *, char *, size_t, loff_t *);
18575 ssize_t (*write) (struct file *, const char *, size_t, loff_t *);
18576 ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
18577 ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
18578 int (*readdir) (struct file *, void *, filldir_t);
18579 unsigned int (*poll) (struct file *, struct poll_table_struct *);
18580 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
18581 long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
18582 int (*mmap) (struct file *, struct vm_area_struct *);
18583 int (*open) (struct inode *, struct file *);
18584 int (*flush) (struct file *, fl_owner_t id);
18585 int (*release) (struct inode *, struct file *);
18586 int (*fsync) (struct file *, int datasync);
18587 int (*aio_fsync) (struct kiocb *, int datasync);
18588 int (*fasync) (int, struct file *, int);
18589 int (*lock) (struct file *, int, struct file_lock *);
18590 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
18591 unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
18592 int (*check_flags)(int);
18593 int (*flock) (struct file *, int, struct file_lock *);
18594 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
18595 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
18596 int (*setlease)(struct file *, long, struct file_lock **);
18597 long (*fallocate)(struct file *file, int mode, loff_t offset,
18600 struct inode_operations {
18601 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
18602 void * (*follow_link) (struct dentry *, struct nameidata *);
18603 int (*permission) (struct inode *, int, unsigned int);
18604 int (*check_acl)(struct inode *, int, unsigned int);
18605 int (*readlink) (struct dentry *, char *,int);
18606 void (*put_link) (struct dentry *, struct nameidata *, void *);
18607 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
18608 int (*link) (struct dentry *,struct inode *,struct dentry *);
18609 int (*unlink) (struct inode *,struct dentry *);
18610 int (*symlink) (struct inode *,struct dentry *,const char *);
18611 int (*mkdir) (struct inode *,struct dentry *,int);
18612 int (*rmdir) (struct inode *,struct dentry *);
18613 int (*mknod) (struct inode *,struct dentry *,int,dev_t);
18614 int (*rename) (struct inode *, struct dentry *,
18615 struct inode *, struct dentry *);
18616 void (*truncate) (struct inode *);
18617 int (*setattr) (struct dentry *, struct iattr *);
18618 int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
18619 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
18620 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
18621 ssize_t (*listxattr) (struct dentry *, char *, size_t);
18622 int (*removexattr) (struct dentry *, const char *);
18623 void (*truncate_range)(struct inode *, loff_t, loff_t);
18624 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
18626 } __attribute__((__aligned__((1 << (6)))));
18628 ssize_t rw_copy_check_uvector(int type, const struct iovec * uvector,
18629 unsigned long nr_segs, unsigned long fast_segs,
18630 struct iovec *fast_pointer,
18631 struct iovec **ret_pointer);
18632 extern ssize_t vfs_read(struct file *, char *, size_t, loff_t *);
18633 extern ssize_t vfs_write(struct file *, const char *, size_t, loff_t *);
18634 extern ssize_t vfs_readv(struct file *, const struct iovec *,
18635 unsigned long, loff_t *);
18636 extern ssize_t vfs_writev(struct file *, const struct iovec *,
18637 unsigned long, loff_t *);
18638 struct super_operations {
18639 struct inode *(*alloc_inode)(struct super_block *sb);
18640 void (*destroy_inode)(struct inode *);
18641 void (*dirty_inode) (struct inode *, int flags);
18642 int (*write_inode) (struct inode *, struct writeback_control *wbc);
18643 int (*drop_inode) (struct inode *);
18644 void (*evict_inode) (struct inode *);
18645 void (*put_super) (struct super_block *);
18646 void (*write_super) (struct super_block *);
18647 int (*sync_fs)(struct super_block *sb, int wait);
18648 int (*freeze_fs) (struct super_block *);
18649 int (*unfreeze_fs) (struct super_block *);
18650 int (*statfs) (struct dentry *, struct kstatfs *);
18651 int (*remount_fs) (struct super_block *, int *, char *);
18652 void (*umount_begin) (struct super_block *);
18653 int (*show_options)(struct seq_file *, struct vfsmount *);
18654 int (*show_devname)(struct seq_file *, struct vfsmount *);
18655 int (*show_path)(struct seq_file *, struct vfsmount *);
18656 int (*show_stats)(struct seq_file *, struct vfsmount *);
18657 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
18658 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
18659 int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
18661 extern void __mark_inode_dirty(struct inode *, int);
18662 static inline __attribute__((always_inline)) void mark_inode_dirty(struct inode *inode)
18664 __mark_inode_dirty(inode, ((1 << 0) | (1 << 1) | (1 << 2)));
18666 static inline __attribute__((always_inline)) void mark_inode_dirty_sync(struct inode *inode)
18668 __mark_inode_dirty(inode, (1 << 0));
18670 static inline __attribute__((always_inline)) void inc_nlink(struct inode *inode)
18674 static inline __attribute__((always_inline)) void inode_inc_link_count(struct inode *inode)
18677 mark_inode_dirty(inode);
18679 static inline __attribute__((always_inline)) void drop_nlink(struct inode *inode)
18683 static inline __attribute__((always_inline)) void clear_nlink(struct inode *inode)
18685 inode->i_nlink = 0;
18687 static inline __attribute__((always_inline)) void inode_dec_link_count(struct inode *inode)
18690 mark_inode_dirty(inode);
18692 static inline __attribute__((always_inline)) void inode_inc_iversion(struct inode *inode)
18694 spin_lock(&inode->i_lock);
18695 inode->i_version++;
18696 spin_unlock(&inode->i_lock);
18698 extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry);
18699 static inline __attribute__((always_inline)) void file_accessed(struct file *file)
18701 if (__builtin_constant_p(((!(file->f_flags & 01000000)))) ? !!((!(file->f_flags & 01000000))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/fs.h", .line = 1795, }; ______r = !!((!(file->f_flags & 01000000))); ______f.miss_hit[______r]++; ______r; }))
18702 touch_atime(file->f_path.mnt, file->f_path.dentry);
18704 int sync_inode(struct inode *inode, struct writeback_control *wbc);
18705 int sync_inode_metadata(struct inode *inode, int wait);
18706 struct file_system_type {
18709 struct dentry *(*mount) (struct file_system_type *, int,
18710 const char *, void *);
18711 void (*kill_sb) (struct super_block *);
18712 struct module *owner;
18713 struct file_system_type * next;
18714 struct list_head fs_supers;
18715 struct lock_class_key s_lock_key;
18716 struct lock_class_key s_umount_key;
18717 struct lock_class_key s_vfs_rename_key;
18718 struct lock_class_key i_lock_key;
18719 struct lock_class_key i_mutex_key;
18720 struct lock_class_key i_mutex_dir_key;
18721 struct lock_class_key i_alloc_sem_key;
18723 extern struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
18724 void *data, int (*fill_super)(struct super_block *, void *, int));
18725 extern struct dentry *mount_bdev(struct file_system_type *fs_type,
18726 int flags, const char *dev_name, void *data,
18727 int (*fill_super)(struct super_block *, void *, int));
18728 extern struct dentry *mount_single(struct file_system_type *fs_type,
18729 int flags, void *data,
18730 int (*fill_super)(struct super_block *, void *, int));
18731 extern struct dentry *mount_nodev(struct file_system_type *fs_type,
18732 int flags, void *data,
18733 int (*fill_super)(struct super_block *, void *, int));
18734 void generic_shutdown_super(struct super_block *sb);
18735 void kill_block_super(struct super_block *sb);
18736 void kill_anon_super(struct super_block *sb);
18737 void kill_litter_super(struct super_block *sb);
18738 void deactivate_super(struct super_block *sb);
18739 void deactivate_locked_super(struct super_block *sb);
18740 int set_anon_super(struct super_block *s, void *data);
18741 struct super_block *sget(struct file_system_type *type,
18742 int (*test)(struct super_block *,void *),
18743 int (*set)(struct super_block *,void *),
18745 extern struct dentry *mount_pseudo(struct file_system_type *, char *,
18746 const struct super_operations *ops,
18747 const struct dentry_operations *dops,
18749 static inline __attribute__((always_inline)) void sb_mark_dirty(struct super_block *sb)
18753 static inline __attribute__((always_inline)) void sb_mark_clean(struct super_block *sb)
18757 static inline __attribute__((always_inline)) int sb_is_dirty(struct super_block *sb)
18761 extern int register_filesystem(struct file_system_type *);
18762 extern int unregister_filesystem(struct file_system_type *);
18763 extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
18764 extern int may_umount_tree(struct vfsmount *);
18765 extern int may_umount(struct vfsmount *);
18766 extern long do_mount(char *, char *, char *, unsigned long, void *);
18767 extern struct vfsmount *collect_mounts(struct path *);
18768 extern void drop_collected_mounts(struct vfsmount *);
18769 extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
18770 struct vfsmount *);
18771 extern int vfs_statfs(struct path *, struct kstatfs *);
18772 extern int user_statfs(const char *, struct kstatfs *);
18773 extern int fd_statfs(int, struct kstatfs *);
18774 extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
18775 extern int freeze_super(struct super_block *super);
18776 extern int thaw_super(struct super_block *super);
18777 extern int current_umask(void);
18778 extern struct kobject *fs_kobj;
18779 extern int rw_verify_area(int, struct file *, loff_t *, size_t);
18780 extern int locks_mandatory_locked(struct inode *);
18781 extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t);
18782 static inline __attribute__((always_inline)) int __mandatory_lock(struct inode *ino)
18784 return (ino->i_mode & (0002000 | 00010)) == 0002000;
18786 static inline __attribute__((always_inline)) int mandatory_lock(struct inode *ino)
18788 return ((ino)->i_sb->s_flags & (64)) && __mandatory_lock(ino);
18790 static inline __attribute__((always_inline)) int locks_verify_locked(struct inode *inode)
18792 if (__builtin_constant_p(((mandatory_lock(inode)))) ? !!((mandatory_lock(inode))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/fs.h", .line = 1923, }; ______r = !!((mandatory_lock(inode))); ______f.miss_hit[______r]++; ______r; }))
18793 return locks_mandatory_locked(inode);
18796 static inline __attribute__((always_inline)) int locks_verify_truncate(struct inode *inode,
18800 if (__builtin_constant_p(((inode->i_flock && mandatory_lock(inode)))) ? !!((inode->i_flock && mandatory_lock(inode))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/fs.h", .line = 1932, }; ______r = !!((inode->i_flock && mandatory_lock(inode))); ______f.miss_hit[______r]++; ______r; }))
18801 return locks_mandatory_area(
18803 size < inode->i_size ? size : inode->i_size,
18804 (size < inode->i_size ? inode->i_size - size
18805 : size - inode->i_size)
18809 static inline __attribute__((always_inline)) int break_lease(struct inode *inode, unsigned int mode)
18811 if (__builtin_constant_p(((inode->i_flock))) ? !!((inode->i_flock)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/fs.h", .line = 1944, }; ______r = !!((inode->i_flock)); ______f.miss_hit[______r]++; ______r; }))
18812 return __break_lease(inode, mode);
18815 extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
18816 struct file *filp);
18817 extern int do_fallocate(struct file *file, int mode, loff_t offset,
18819 extern long do_sys_open(int dfd, const char *filename, int flags,
18821 extern struct file *filp_open(const char *, int, int);
18822 extern struct file *file_open_root(struct dentry *, struct vfsmount *,
18823 const char *, int);
18824 extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
18825 const struct cred *);
18826 extern int filp_close(struct file *, fl_owner_t id);
18827 extern char * getname(const char *);
18828 extern int ioctl_preallocate(struct file *filp, void *argp);
18829 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vfs_caches_init_early(void);
18830 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vfs_caches_init(unsigned long);
18831 extern struct kmem_cache *names_cachep;
18832 extern void putname(const char *name);
18833 extern int register_blkdev(unsigned int, const char *);
18834 extern void unregister_blkdev(unsigned int, const char *);
18835 extern struct block_device *bdget(dev_t);
18836 extern struct block_device *bdgrab(struct block_device *bdev);
18837 extern void bd_set_size(struct block_device *, loff_t size);
18838 extern void bd_forget(struct inode *inode);
18839 extern void bdput(struct block_device *);
18840 extern void invalidate_bdev(struct block_device *);
18841 extern int sync_blockdev(struct block_device *bdev);
18842 extern struct super_block *freeze_bdev(struct block_device *);
18843 extern void emergency_thaw_all(void);
18844 extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
18845 extern int fsync_bdev(struct block_device *);
18846 extern int sync_filesystem(struct super_block *);
18847 extern const struct file_operations def_blk_fops;
18848 extern const struct file_operations def_chr_fops;
18849 extern const struct file_operations bad_sock_fops;
18850 extern const struct file_operations def_fifo_fops;
18851 extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
18852 extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
18853 extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
18854 extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
18855 extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
18857 extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
18859 extern int blkdev_put(struct block_device *bdev, fmode_t mode);
18860 extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
18861 extern void bd_unlink_disk_holder(struct block_device *bdev,
18862 struct gendisk *disk);
18863 extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
18864 extern int register_chrdev_region(dev_t, unsigned, const char *);
18865 extern int __register_chrdev(unsigned int major, unsigned int baseminor,
18866 unsigned int count, const char *name,
18867 const struct file_operations *fops);
18868 extern void __unregister_chrdev(unsigned int major, unsigned int baseminor,
18869 unsigned int count, const char *name);
18870 extern void unregister_chrdev_region(dev_t, unsigned);
18871 extern void chrdev_show(struct seq_file *,off_t);
18872 static inline __attribute__((always_inline)) int register_chrdev(unsigned int major, const char *name,
18873 const struct file_operations *fops)
18875 return __register_chrdev(major, 0, 256, name, fops);
18877 static inline __attribute__((always_inline)) void unregister_chrdev(unsigned int major, const char *name)
18879 __unregister_chrdev(major, 0, 256, name);
18881 extern const char *__bdevname(dev_t, char *buffer);
18882 extern const char *bdevname(struct block_device *bdev, char *buffer);
18883 extern struct block_device *lookup_bdev(const char *);
18884 extern void blkdev_show(struct seq_file *,off_t);
18885 extern void init_special_inode(struct inode *, umode_t, dev_t);
18886 extern void make_bad_inode(struct inode *);
18887 extern int is_bad_inode(struct inode *);
18888 extern const struct file_operations read_pipefifo_fops;
18889 extern const struct file_operations write_pipefifo_fops;
18890 extern const struct file_operations rdwr_pipefifo_fops;
18891 extern int fs_may_remount_ro(struct super_block *);
18892 extern void check_disk_size_change(struct gendisk *disk,
18893 struct block_device *bdev);
18894 extern int revalidate_disk(struct gendisk *);
18895 extern int check_disk_change(struct block_device *);
18896 extern int __invalidate_device(struct block_device *, bool);
18897 extern int invalidate_partition(struct gendisk *, int);
18898 unsigned long invalidate_mapping_pages(struct address_space *mapping,
18899 unsigned long start, unsigned long end);
18900 static inline __attribute__((always_inline)) void invalidate_remote_inode(struct inode *inode)
18902 if (__builtin_constant_p((((((inode->i_mode) & 00170000) == 0100000) || (((inode->i_mode) & 00170000) == 0040000) || (((inode->i_mode) & 00170000) == 0120000)))) ? !!(((((inode->i_mode) & 00170000) == 0100000) || (((inode->i_mode) & 00170000) == 0040000) || (((inode->i_mode) & 00170000) == 0120000))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
18903 "include/linux/fs.h"
18906 , }; ______r = !!(((((inode->i_mode) & 00170000) == 0100000) || (((inode->i_mode) & 00170000) == 0040000) || (((inode->i_mode) & 00170000) == 0120000))); ______f.miss_hit[______r]++; ______r; }))
18907 invalidate_mapping_pages(inode->i_mapping, 0, -1);
18909 extern int invalidate_inode_pages2(struct address_space *mapping);
18910 extern int invalidate_inode_pages2_range(struct address_space *mapping,
18911 unsigned long start, unsigned long end);
18912 extern int write_inode_now(struct inode *, int);
18913 extern int filemap_fdatawrite(struct address_space *);
18914 extern int filemap_flush(struct address_space *);
18915 extern int filemap_fdatawait(struct address_space *);
18916 extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
18918 extern int filemap_write_and_wait(struct address_space *mapping);
18919 extern int filemap_write_and_wait_range(struct address_space *mapping,
18920 loff_t lstart, loff_t lend);
18921 extern int __filemap_fdatawrite_range(struct address_space *mapping,
18922 loff_t start, loff_t end, int sync_mode);
18923 extern int filemap_fdatawrite_range(struct address_space *mapping,
18924 loff_t start, loff_t end);
18925 extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
18927 extern int vfs_fsync(struct file *file, int datasync);
18928 extern int generic_write_sync(struct file *file, loff_t pos, loff_t count);
18929 extern void sync_supers(void);
18930 extern void emergency_sync(void);
18931 extern void emergency_remount(void);
18932 extern sector_t bmap(struct inode *, sector_t);
18933 extern int notify_change(struct dentry *, struct iattr *);
18934 extern int inode_permission(struct inode *, int);
18935 extern int generic_permission(struct inode *, int, unsigned int,
18936 int (*check_acl)(struct inode *, int, unsigned int));
18937 static inline __attribute__((always_inline)) bool execute_ok(struct inode *inode)
18939 return (inode->i_mode & (00100|00010|00001)) || (((inode->i_mode) & 00170000) == 0040000);
18941 extern int get_write_access(struct inode *);
18942 extern int deny_write_access(struct file *);
18943 static inline __attribute__((always_inline)) void put_write_access(struct inode * inode)
18945 atomic_dec(&inode->i_writecount);
18947 static inline __attribute__((always_inline)) void allow_write_access(struct file *file)
18949 if (__builtin_constant_p(((file))) ? !!((file)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/fs.h", .line = 2207, }; ______r = !!((file)); ______f.miss_hit[______r]++; ______r; }))
18950 atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
18952 static inline __attribute__((always_inline)) void i_readcount_dec(struct inode *inode)
18956 static inline __attribute__((always_inline)) void i_readcount_inc(struct inode *inode)
18960 extern int do_pipe_flags(int *, int);
18961 extern struct file *create_read_pipe(struct file *f, int flags);
18962 extern struct file *create_write_pipe(int flags);
18963 extern void free_write_pipe(struct file *);
18964 extern int kernel_read(struct file *, loff_t, char *, unsigned long);
18965 extern struct file * open_exec(const char *);
18966 extern int is_subdir(struct dentry *, struct dentry *);
18967 extern int path_is_under(struct path *, struct path *);
18968 extern ino_t find_inode_number(struct dentry *, struct qstr *);
18969 extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
18970 extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
18971 extern int inode_init_always(struct super_block *, struct inode *);
18972 extern void inode_init_once(struct inode *);
18973 extern void address_space_init_once(struct address_space *mapping);
18974 extern void ihold(struct inode * inode);
18975 extern void iput(struct inode *);
18976 extern struct inode * igrab(struct inode *);
18977 extern ino_t iunique(struct super_block *, ino_t);
18978 extern int inode_needs_sync(struct inode *inode);
18979 extern int generic_delete_inode(struct inode *inode);
18980 extern int generic_drop_inode(struct inode *inode);
18981 extern struct inode *ilookup5_nowait(struct super_block *sb,
18982 unsigned long hashval, int (*test)(struct inode *, void *),
18984 extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
18985 int (*test)(struct inode *, void *), void *data);
18986 extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
18987 extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
18988 extern struct inode * iget_locked(struct super_block *, unsigned long);
18989 extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
18990 extern int insert_inode_locked(struct inode *);
18991 extern void unlock_new_inode(struct inode *);
18992 extern unsigned int get_next_ino(void);
18993 extern void __iget(struct inode * inode);
18994 extern void iget_failed(struct inode *);
18995 extern void end_writeback(struct inode *);
18996 extern void __destroy_inode(struct inode *);
18997 extern struct inode *new_inode(struct super_block *);
18998 extern void free_inode_nonrcu(struct inode *inode);
18999 extern int should_remove_suid(struct dentry *);
19000 extern int file_remove_suid(struct file *);
19001 extern void __insert_inode_hash(struct inode *, unsigned long hashval);
19002 extern void remove_inode_hash(struct inode *);
19003 static inline __attribute__((always_inline)) void insert_inode_hash(struct inode *inode)
19005 __insert_inode_hash(inode, inode->i_ino);
19007 extern void inode_sb_list_add(struct inode *inode);
19008 extern void submit_bio(int, struct bio *);
19009 extern int bdev_read_only(struct block_device *);
19010 extern int set_blocksize(struct block_device *, int);
19011 extern int sb_set_blocksize(struct super_block *, int);
19012 extern int sb_min_blocksize(struct super_block *, int);
19013 extern int generic_file_mmap(struct file *, struct vm_area_struct *);
19014 extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
19015 extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
19016 int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
19017 extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
19018 extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long,
19020 extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
19021 extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *,
19022 unsigned long *, loff_t, loff_t *, size_t, size_t);
19023 extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *,
19024 unsigned long, loff_t, loff_t *, size_t, ssize_t);
19025 extern ssize_t do_sync_read(struct file *filp, char *buf, size_t len, loff_t *ppos);
19026 extern ssize_t do_sync_write(struct file *filp, const char *buf, size_t len, loff_t *ppos);
19027 extern int generic_segment_checks(const struct iovec *iov,
19028 unsigned long *nr_segs, size_t *count, int access_flags);
19029 extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
19030 unsigned long nr_segs, loff_t pos);
19031 extern int blkdev_fsync(struct file *filp, int datasync);
19032 extern ssize_t generic_file_splice_read(struct file *, loff_t *,
19033 struct pipe_inode_info *, size_t, unsigned int);
19034 extern ssize_t default_file_splice_read(struct file *, loff_t *,
19035 struct pipe_inode_info *, size_t, unsigned int);
19036 extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
19037 struct file *, loff_t *, size_t, unsigned int);
19038 extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
19039 struct file *out, loff_t *, size_t len, unsigned int flags);
19040 extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
19041 size_t len, unsigned int flags);
19043 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
19044 extern loff_t noop_llseek(struct file *file, loff_t offset, int origin);
19045 extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
19046 extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
19047 extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset,
19049 extern int generic_file_open(struct inode * inode, struct file * filp);
19050 extern int nonseekable_open(struct inode * inode, struct file * filp);
19051 static inline __attribute__((always_inline)) int xip_truncate_page(struct address_space *mapping, loff_t from)
19055 typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
19056 loff_t file_offset);
19058 DIO_LOCKING = 0x01,
19059 DIO_SKIP_HOLES = 0x02,
19061 void dio_end_io(struct bio *bio, int error);
19062 ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
19063 struct block_device *bdev, const struct iovec *iov, loff_t offset,
19064 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
19065 dio_submit_t submit_io, int flags);
19066 static inline __attribute__((always_inline)) ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
19067 struct inode *inode, struct block_device *bdev, const struct iovec *iov,
19068 loff_t offset, unsigned long nr_segs, get_block_t get_block,
19069 dio_iodone_t end_io)
19071 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
19072 nr_segs, get_block, end_io, ((void *)0),
19073 DIO_LOCKING | DIO_SKIP_HOLES);
19075 extern const struct file_operations generic_ro_fops;
19076 extern int vfs_readlink(struct dentry *, char *, int, const char *);
19077 extern int vfs_follow_link(struct nameidata *, const char *);
19078 extern int page_readlink(struct dentry *, char *, int);
19079 extern void *page_follow_link_light(struct dentry *, struct nameidata *);
19080 extern void page_put_link(struct dentry *, struct nameidata *, void *);
19081 extern int __page_symlink(struct inode *inode, const char *symname, int len,
19083 extern int page_symlink(struct inode *inode, const char *symname, int len);
19084 extern const struct inode_operations page_symlink_inode_operations;
19085 extern int generic_readlink(struct dentry *, char *, int);
19086 extern void generic_fillattr(struct inode *, struct kstat *);
19087 extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
19088 void __inode_add_bytes(struct inode *inode, loff_t bytes);
19089 void inode_add_bytes(struct inode *inode, loff_t bytes);
19090 void inode_sub_bytes(struct inode *inode, loff_t bytes);
19091 loff_t inode_get_bytes(struct inode *inode);
19092 void inode_set_bytes(struct inode *inode, loff_t bytes);
19093 extern int vfs_readdir(struct file *, filldir_t, void *);
19094 extern int vfs_stat(const char *, struct kstat *);
19095 extern int vfs_lstat(const char *, struct kstat *);
19096 extern int vfs_fstat(unsigned int, struct kstat *);
19097 extern int vfs_fstatat(int , const char *, struct kstat *, int);
19098 extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
19099 unsigned long arg);
19100 extern int __generic_block_fiemap(struct inode *inode,
19101 struct fiemap_extent_info *fieinfo,
19102 loff_t start, loff_t len,
19103 get_block_t *get_block);
19104 extern int generic_block_fiemap(struct inode *inode,
19105 struct fiemap_extent_info *fieinfo, u64 start,
19106 u64 len, get_block_t *get_block);
19107 extern void get_filesystem(struct file_system_type *fs);
19108 extern void put_filesystem(struct file_system_type *fs);
19109 extern struct file_system_type *get_fs_type(const char *name);
19110 extern struct super_block *get_super(struct block_device *);
19111 extern struct super_block *get_active_super(struct block_device *bdev);
19112 extern struct super_block *user_get_super(dev_t);
19113 extern void drop_super(struct super_block *sb);
19114 extern void iterate_supers(void (*)(struct super_block *, void *), void *);
19115 extern int dcache_dir_open(struct inode *, struct file *);
19116 extern int dcache_dir_close(struct inode *, struct file *);
19117 extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
19118 extern int dcache_readdir(struct file *, void *, filldir_t);
19119 extern int simple_setattr(struct dentry *, struct iattr *);
19120 extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
19121 extern int simple_statfs(struct dentry *, struct kstatfs *);
19122 extern int simple_link(struct dentry *, struct inode *, struct dentry *);
19123 extern int simple_unlink(struct inode *, struct dentry *);
19124 extern int simple_rmdir(struct inode *, struct dentry *);
19125 extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
19126 extern int noop_fsync(struct file *, int);
19127 extern int simple_empty(struct dentry *);
19128 extern int simple_readpage(struct file *file, struct page *page);
19129 extern int simple_write_begin(struct file *file, struct address_space *mapping,
19130 loff_t pos, unsigned len, unsigned flags,
19131 struct page **pagep, void **fsdata);
19132 extern int simple_write_end(struct file *file, struct address_space *mapping,
19133 loff_t pos, unsigned len, unsigned copied,
19134 struct page *page, void *fsdata);
19135 extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *);
19136 extern ssize_t generic_read_dir(struct file *, char *, size_t, loff_t *);
19137 extern const struct file_operations simple_dir_operations;
19138 extern const struct inode_operations simple_dir_inode_operations;
19139 struct tree_descr { char *name; const struct file_operations *ops; int mode; };
19140 struct dentry *d_alloc_name(struct dentry *, const char *);
19141 extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *);
19142 extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
19143 extern void simple_release_fs(struct vfsmount **mount, int *count);
19144 extern ssize_t simple_read_from_buffer(void *to, size_t count,
19145 loff_t *ppos, const void *from, size_t available);
19146 extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
19147 const void *from, size_t count);
19148 extern int generic_file_fsync(struct file *, int);
19149 extern int generic_check_addressable(unsigned, u64);
19150 extern int buffer_migrate_page(struct address_space *,
19151 struct page *, struct page *);
19152 extern int inode_change_ok(const struct inode *, struct iattr *);
19153 extern int inode_newsize_ok(const struct inode *, loff_t offset);
19154 extern void setattr_copy(struct inode *inode, const struct iattr *attr);
19155 extern void file_update_time(struct file *file);
19156 extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt);
19157 extern void save_mount_options(struct super_block *sb, char *options);
19158 extern void replace_mount_options(struct super_block *sb, char *options);
19159 static inline __attribute__((always_inline)) ino_t parent_ino(struct dentry *dentry)
19162 spin_lock(&dentry->d_lock);
19163 res = dentry->d_parent->d_inode->i_ino;
19164 spin_unlock(&dentry->d_lock);
19167 struct simple_transaction_argresp {
19171 char *simple_transaction_get(struct file *file, const char *buf,
19173 ssize_t simple_transaction_read(struct file *file, char *buf,
19174 size_t size, loff_t *pos);
19175 int simple_transaction_release(struct inode *inode, struct file *file);
19176 void simple_transaction_set(struct file *file, size_t n);
19177 static inline __attribute__((always_inline)) void __attribute__((format(printf, 1, 2)))
19178 __simple_attr_check_format(const char *fmt, ...)
19181 int simple_attr_open(struct inode *inode, struct file *file,
19182 int (*get)(void *, u64 *), int (*set)(void *, u64),
19184 int simple_attr_release(struct inode *inode, struct file *file);
19185 ssize_t simple_attr_read(struct file *file, char *buf,
19186 size_t len, loff_t *ppos);
19187 ssize_t simple_attr_write(struct file *file, const char *buf,
19188 size_t len, loff_t *ppos);
19190 int proc_nr_files(struct ctl_table *table, int write,
19191 void *buffer, size_t *lenp, loff_t *ppos);
19192 int proc_nr_dentry(struct ctl_table *table, int write,
19193 void *buffer, size_t *lenp, loff_t *ppos);
19194 int proc_nr_inodes(struct ctl_table *table, int write,
19195 void *buffer, size_t *lenp, loff_t *ppos);
19196 int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) get_filesystem_list(char *buf);
19197 static inline __attribute__((always_inline)) int is_sxid(mode_t mode)
19199 return (mode & 0004000) || ((mode & 0002000) && (mode & 00010));
19201 static inline __attribute__((always_inline)) void inode_has_no_xattr(struct inode *inode)
19203 if (__builtin_constant_p(((!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & (1<<28))))) ? !!((!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & (1<<28)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/fs.h", .line = 2597, }; ______r = !!((!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & (1<<28)))); ______f.miss_hit[______r]++; ______r; }))
19204 inode->i_flags |= 4096;
19210 int add_range(struct range *range, int az, int nr_range,
19211 u64 start, u64 end);
19212 int add_range_with_merge(struct range *range, int az, int nr_range,
19213 u64 start, u64 end);
19214 void subtract_range(struct range *range, int az, u64 start, u64 end);
19215 int clean_sort_range(struct range *range, int az);
19216 void sort_range(struct range *range, int nr_range);
19217 static inline __attribute__((always_inline)) resource_size_t cap_resource(u64 val)
19219 if (__builtin_constant_p(((val > ((resource_size_t)~0)))) ? !!((val > ((resource_size_t)~0))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/range.h", .line = 25, }; ______r = !!((val > ((resource_size_t)~0))); ______f.miss_hit[______r]++; ______r; }))
19220 return ((resource_size_t)~0);
19225 struct file_ra_state;
19226 struct user_struct;
19227 struct writeback_control;
19228 extern unsigned long max_mapnr;
19229 extern unsigned long num_physpages;
19230 extern unsigned long totalram_pages;
19231 extern void * high_memory;
19232 extern int page_cluster;
19233 extern int sysctl_legacy_va_layout;
19234 extern unsigned long empty_zero_page[((1UL) << 12) / sizeof(unsigned long)];
19235 extern spinlock_t pgd_lock;
19236 extern struct list_head pgd_list;
19237 extern struct mm_struct *pgd_page_get_mm(struct page *page);
19238 static inline __attribute__((always_inline)) int pte_dirty(pte_t pte)
19240 return pte_flags(pte) & (((pteval_t)(1)) << 6);
19242 static inline __attribute__((always_inline)) int pte_young(pte_t pte)
19244 return pte_flags(pte) & (((pteval_t)(1)) << 5);
19246 static inline __attribute__((always_inline)) int pmd_young(pmd_t pmd)
19248 return pmd_flags(pmd) & (((pteval_t)(1)) << 5);
19250 static inline __attribute__((always_inline)) int pte_write(pte_t pte)
19252 return pte_flags(pte) & (((pteval_t)(1)) << 1);
19254 static inline __attribute__((always_inline)) int pte_file(pte_t pte)
19256 return pte_flags(pte) & (((pteval_t)(1)) << 6);
19258 static inline __attribute__((always_inline)) int pte_huge(pte_t pte)
19260 return pte_flags(pte) & (((pteval_t)(1)) << 7);
19262 static inline __attribute__((always_inline)) int pte_global(pte_t pte)
19264 return pte_flags(pte) & (((pteval_t)(1)) << 8);
19266 static inline __attribute__((always_inline)) int pte_exec(pte_t pte)
19268 return !(pte_flags(pte) & (((pteval_t)(1)) << 63));
19270 static inline __attribute__((always_inline)) int pte_special(pte_t pte)
19272 return pte_flags(pte) & (((pteval_t)(1)) << 9);
19274 static inline __attribute__((always_inline)) unsigned long pte_pfn(pte_t pte)
19276 return (pte_val(pte) & ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1))))) >> 12;
19278 static inline __attribute__((always_inline)) unsigned long pmd_pfn(pmd_t pmd)
19280 return (pmd_val(pmd) & ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1))))) >> 12;
19282 static inline __attribute__((always_inline)) int pmd_large(pmd_t pte)
19284 return (pmd_flags(pte) & ((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 0))) ==
19285 ((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 0));
19287 static inline __attribute__((always_inline)) int pmd_trans_splitting(pmd_t pmd)
19289 return pmd_val(pmd) & (((pteval_t)(1)) << 9);
19291 static inline __attribute__((always_inline)) int pmd_trans_huge(pmd_t pmd)
19293 return pmd_val(pmd) & (((pteval_t)(1)) << 7);
19295 static inline __attribute__((always_inline)) int has_transparent_hugepage(void)
19297 return (__builtin_constant_p((0*32+ 3)) && ( ((((0*32+ 3))>>5)==0 && (1UL<<(((0*32+ 3))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+ 3))>>5)==1 && (1UL<<(((0*32+ 3))&31) & (0|0))) || ((((0*32+ 3))>>5)==2 && (1UL<<(((0*32+ 3))&31) & 0)) || ((((0*32+ 3))>>5)==3 && (1UL<<(((0*32+ 3))&31) & (0))) || ((((0*32+ 3))>>5)==4 && (1UL<<(((0*32+ 3))&31) & 0)) || ((((0*32+ 3))>>5)==5 && (1UL<<(((0*32+ 3))&31) & 0)) || ((((0*32+ 3))>>5)==6 && (1UL<<(((0*32+ 3))&31) & 0)) || ((((0*32+ 3))>>5)==7 && (1UL<<(((0*32+ 3))&31) & 0)) || ((((0*32+ 3))>>5)==8 && (1UL<<(((0*32+ 3))&31) & 0)) || ((((0*32+ 3))>>5)==9 && (1UL<<(((0*32+ 3))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+ 3))) ? constant_test_bit(((0*32+ 3)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+ 3)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))));
19299 static inline __attribute__((always_inline)) pte_t pte_set_flags(pte_t pte, pteval_t set)
19301 pteval_t v = native_pte_val(pte);
19302 return native_make_pte(v | set);
19304 static inline __attribute__((always_inline)) pte_t pte_clear_flags(pte_t pte, pteval_t clear)
19306 pteval_t v = native_pte_val(pte);
19307 return native_make_pte(v & ~clear);
19309 static inline __attribute__((always_inline)) pte_t pte_mkclean(pte_t pte)
19311 return pte_clear_flags(pte, (((pteval_t)(1)) << 6));
19313 static inline __attribute__((always_inline)) pte_t pte_mkold(pte_t pte)
19315 return pte_clear_flags(pte, (((pteval_t)(1)) << 5));
19317 static inline __attribute__((always_inline)) pte_t pte_wrprotect(pte_t pte)
19319 return pte_clear_flags(pte, (((pteval_t)(1)) << 1));
19321 static inline __attribute__((always_inline)) pte_t pte_mkexec(pte_t pte)
19323 return pte_clear_flags(pte, (((pteval_t)(1)) << 63));
19325 static inline __attribute__((always_inline)) pte_t pte_mkdirty(pte_t pte)
19327 return pte_set_flags(pte, (((pteval_t)(1)) << 6));
19329 static inline __attribute__((always_inline)) pte_t pte_mkyoung(pte_t pte)
19331 return pte_set_flags(pte, (((pteval_t)(1)) << 5));
19333 static inline __attribute__((always_inline)) pte_t pte_mkwrite(pte_t pte)
19335 return pte_set_flags(pte, (((pteval_t)(1)) << 1));
19337 static inline __attribute__((always_inline)) pte_t pte_mkhuge(pte_t pte)
19339 return pte_set_flags(pte, (((pteval_t)(1)) << 7));
19341 static inline __attribute__((always_inline)) pte_t pte_clrhuge(pte_t pte)
19343 return pte_clear_flags(pte, (((pteval_t)(1)) << 7));
19345 static inline __attribute__((always_inline)) pte_t pte_mkglobal(pte_t pte)
19347 return pte_set_flags(pte, (((pteval_t)(1)) << 8));
19349 static inline __attribute__((always_inline)) pte_t pte_clrglobal(pte_t pte)
19351 return pte_clear_flags(pte, (((pteval_t)(1)) << 8));
19353 static inline __attribute__((always_inline)) pte_t pte_mkspecial(pte_t pte)
19355 return pte_set_flags(pte, (((pteval_t)(1)) << 9));
19357 static inline __attribute__((always_inline)) pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
19359 pmdval_t v = native_pmd_val(pmd);
19360 return __pmd(v | set);
19362 static inline __attribute__((always_inline)) pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
19364 pmdval_t v = native_pmd_val(pmd);
19365 return __pmd(v & ~clear);
19367 static inline __attribute__((always_inline)) pmd_t pmd_mkold(pmd_t pmd)
19369 return pmd_clear_flags(pmd, (((pteval_t)(1)) << 5));
19371 static inline __attribute__((always_inline)) pmd_t pmd_wrprotect(pmd_t pmd)
19373 return pmd_clear_flags(pmd, (((pteval_t)(1)) << 1));
19375 static inline __attribute__((always_inline)) pmd_t pmd_mkdirty(pmd_t pmd)
19377 return pmd_set_flags(pmd, (((pteval_t)(1)) << 6));
19379 static inline __attribute__((always_inline)) pmd_t pmd_mkhuge(pmd_t pmd)
19381 return pmd_set_flags(pmd, (((pteval_t)(1)) << 7));
19383 static inline __attribute__((always_inline)) pmd_t pmd_mkyoung(pmd_t pmd)
19385 return pmd_set_flags(pmd, (((pteval_t)(1)) << 5));
19387 static inline __attribute__((always_inline)) pmd_t pmd_mkwrite(pmd_t pmd)
19389 return pmd_set_flags(pmd, (((pteval_t)(1)) << 1));
19391 static inline __attribute__((always_inline)) pmd_t pmd_mknotpresent(pmd_t pmd)
19393 return pmd_clear_flags(pmd, (((pteval_t)(1)) << 0));
19395 static inline __attribute__((always_inline)) pgprotval_t massage_pgprot(pgprot_t pgprot)
19397 pgprotval_t protval = ((pgprot).pgprot);
19398 if (__builtin_constant_p(((protval & (((pteval_t)(1)) << 0)))) ? !!((protval & (((pteval_t)(1)) << 0))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/pgtable.h", .line = 301, }; ______r = !!((protval & (((pteval_t)(1)) << 0))); ______f.miss_hit[______r]++; ______r; }))
19399 protval &= __supported_pte_mask;
19402 static inline __attribute__((always_inline)) pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
19404 return __pte(((phys_addr_t)page_nr << 12) |
19405 massage_pgprot(pgprot));
19407 static inline __attribute__((always_inline)) pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
19409 return __pmd(((phys_addr_t)page_nr << 12) |
19410 massage_pgprot(pgprot));
19412 static inline __attribute__((always_inline)) pte_t pte_modify(pte_t pte, pgprot_t newprot)
19414 pteval_t val = pte_val(pte);
19415 val &= (((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6));
19416 val |= massage_pgprot(newprot) & ~(((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6));
19419 static inline __attribute__((always_inline)) pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
19421 pmdval_t val = pmd_val(pmd);
19422 val &= ((((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6)) | (((pteval_t)(1)) << 7));
19423 val |= massage_pgprot(newprot) & ~((((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6)) | (((pteval_t)(1)) << 7));
19426 static inline __attribute__((always_inline)) pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
19428 pgprotval_t preservebits = ((oldprot).pgprot) & (((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6));
19429 pgprotval_t addbits = ((newprot).pgprot);
19430 return ((pgprot_t) { (preservebits | addbits) } );
19432 static inline __attribute__((always_inline)) int is_new_memtype_allowed(u64 paddr, unsigned long size,
19433 unsigned long flags,
19434 unsigned long new_flags)
19436 if (__builtin_constant_p(((x86_platform.is_untracked_pat_range(paddr, paddr + size)))) ? !!((x86_platform.is_untracked_pat_range(paddr, paddr + size))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/pgtable.h", .line = 363, }; ______r = !!((x86_platform.is_untracked_pat_range(paddr, paddr + size))); ______f.miss_hit[______r]++; ______r; }))
19438 if (__builtin_constant_p((((flags == ((((pteval_t)(1)) << 4)) && new_flags == (0)) || (flags == ((((pteval_t)(1)) << 3)) && new_flags == (0))))) ? !!(((flags == ((((pteval_t)(1)) << 4)) && new_flags == (0)) || (flags == ((((pteval_t)(1)) << 3)) && new_flags == (0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
19439 "/data/exp/linux-3.0.4/arch/x86/include/asm/pgtable.h"
19442 , }; ______r = !!(((flags == ((((pteval_t)(1)) << 4)) && new_flags == (0)) || (flags == ((((pteval_t)(1)) << 3)) && new_flags == (0)))); ______f.miss_hit[______r]++; ______r; }))
19448 pmd_t *populate_extra_pmd(unsigned long vaddr);
19449 pte_t *populate_extra_pte(unsigned long vaddr);
19451 struct vm_area_struct;
19452 extern pgd_t swapper_pg_dir[1024];
19453 extern pgd_t initial_page_table[1024];
19454 static inline __attribute__((always_inline)) void pgtable_cache_init(void) { }
19455 static inline __attribute__((always_inline)) void check_pgt_cache(void) { }
19456 void paging_init(void);
19457 extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
19458 static inline __attribute__((always_inline)) void native_set_pte(pte_t *ptep, pte_t pte)
19460 ptep->pte_high = pte.pte_high;
19461 __asm__ __volatile__("": : :"memory");
19462 ptep->pte_low = pte.pte_low;
19464 static inline __attribute__((always_inline)) void native_set_pte_atomic(pte_t *ptep, pte_t pte)
19466 set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
19468 static inline __attribute__((always_inline)) void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
19470 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
19472 static inline __attribute__((always_inline)) void native_set_pud(pud_t *pudp, pud_t pud)
19474 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
19476 static inline __attribute__((always_inline)) void native_pte_clear(struct mm_struct *mm, unsigned long addr,
19480 __asm__ __volatile__("": : :"memory");
19481 ptep->pte_high = 0;
19483 static inline __attribute__((always_inline)) void native_pmd_clear(pmd_t *pmd)
19485 u32 *tmp = (u32 *)pmd;
19487 __asm__ __volatile__("": : :"memory");
19490 static inline __attribute__((always_inline)) void pud_clear(pud_t *pudp)
19492 set_pud(pudp, ((pud_t) { __pgd(0) } ));
19494 static inline __attribute__((always_inline)) pte_t native_ptep_get_and_clear(pte_t *ptep)
19497 res.pte_low = ({ __typeof(*((&ptep->pte_low))) __x = ((0)); switch (sizeof(*&ptep->pte_low)) { case 1: { volatile u8 *__ptr = (volatile u8 *)((&ptep->pte_low)); asm volatile("xchgb %0,%1" : "=q" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)((&ptep->pte_low)); asm volatile("xchgw %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)((&ptep->pte_low)); asm volatile("xchgl %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } default: __xchg_wrong_size(); } __x; });
19498 res.pte_high = ptep->pte_high;
19499 ptep->pte_high = 0;
19509 static inline __attribute__((always_inline)) pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
19511 union split_pmd res, *orig = (union split_pmd *)pmdp;
19512 res.pmd_low = ({ __typeof(*((&orig->pmd_low))) __x = ((0)); switch (sizeof(*&orig->pmd_low)) { case 1: { volatile u8 *__ptr = (volatile u8 *)((&orig->pmd_low)); asm volatile("xchgb %0,%1" : "=q" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)((&orig->pmd_low)); asm volatile("xchgw %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)((&orig->pmd_low)); asm volatile("xchgl %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } default: __xchg_wrong_size(); } __x; });
19513 res.pmd_high = orig->pmd_high;
19514 orig->pmd_high = 0;
19517 static inline __attribute__((always_inline)) int pte_none(pte_t pte)
19521 static inline __attribute__((always_inline)) int pte_same(pte_t a, pte_t b)
19523 return a.pte == b.pte;
19525 static inline __attribute__((always_inline)) int pte_present(pte_t a)
19527 return pte_flags(a) & ((((pteval_t)(1)) << 0) | (((pteval_t)(1)) << 8));
19529 static inline __attribute__((always_inline)) int pte_hidden(pte_t pte)
19531 return pte_flags(pte) & (((pteval_t)(0)));
19533 static inline __attribute__((always_inline)) int pmd_present(pmd_t pmd)
19535 return pmd_flags(pmd) & (((pteval_t)(1)) << 0);
19537 static inline __attribute__((always_inline)) int pmd_none(pmd_t pmd)
19539 return (unsigned long)native_pmd_val(pmd) == 0;
19541 static inline __attribute__((always_inline)) unsigned long pmd_page_vaddr(pmd_t pmd)
19543 return (unsigned long)((void *)((unsigned long)(pmd_val(pmd) & ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))))+((unsigned long)(0xC0000000UL))));
19545 static inline __attribute__((always_inline)) unsigned long pmd_index(unsigned long address)
19547 return (address >> 21) & (512 - 1);
19549 static inline __attribute__((always_inline)) unsigned long pte_index(unsigned long address)
19551 return (address >> 12) & (512 - 1);
19553 static inline __attribute__((always_inline)) pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
19555 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
19557 static inline __attribute__((always_inline)) int pmd_bad(pmd_t pmd)
19559 return (pmd_flags(pmd) & ~(((pteval_t)(1)) << 2)) != ((((pteval_t)(1)) << 0) | (((pteval_t)(1)) << 1) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6));
19561 static inline __attribute__((always_inline)) unsigned long pages_to_mb(unsigned long npg)
19563 return npg >> (20 - 12);
19565 static inline __attribute__((always_inline)) int pud_none(pud_t pud)
19567 return native_pud_val(pud) == 0;
19569 static inline __attribute__((always_inline)) int pud_present(pud_t pud)
19571 return pud_flags(pud) & (((pteval_t)(1)) << 0);
19573 static inline __attribute__((always_inline)) unsigned long pud_page_vaddr(pud_t pud)
19575 return (unsigned long)((void *)((unsigned long)((unsigned long)(pgd_val((pud).pgd)) & ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))))+((unsigned long)(0xC0000000UL))));
19577 static inline __attribute__((always_inline)) pmd_t *pmd_offset(pud_t *pud, unsigned long address)
19579 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
19581 static inline __attribute__((always_inline)) int pud_large(pud_t pud)
19583 return ((pgd_val((pud).pgd)) & ((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 0))) ==
19584 ((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 0));
19586 static inline __attribute__((always_inline)) int pud_bad(pud_t pud)
19588 return (pud_flags(pud) & ~(((((pteval_t)(1)) << 0) | (((pteval_t)(1)) << 1) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6)) | (((pteval_t)(1)) << 2))) != 0;
19590 extern int direct_gbpages;
19591 static inline __attribute__((always_inline)) pte_t native_local_ptep_get_and_clear(pte_t *ptep)
19594 native_pte_clear(((void *)0), 0, ptep);
19597 static inline __attribute__((always_inline)) pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
19600 native_pmd_clear(pmdp);
19603 static inline __attribute__((always_inline)) void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
19604 pte_t *ptep , pte_t pte)
19606 native_set_pte(ptep, pte);
19608 static inline __attribute__((always_inline)) void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
19609 pmd_t *pmdp , pmd_t pmd)
19611 native_set_pmd(pmdp, pmd);
19613 struct vm_area_struct;
19614 extern int ptep_set_access_flags(struct vm_area_struct *vma,
19615 unsigned long address, pte_t *ptep,
19616 pte_t entry, int dirty);
19617 extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
19618 unsigned long addr, pte_t *ptep);
19619 extern int ptep_clear_flush_young(struct vm_area_struct *vma,
19620 unsigned long address, pte_t *ptep);
19621 static inline __attribute__((always_inline)) pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
19624 pte_t pte = native_ptep_get_and_clear(ptep);
19625 pte_update(mm, addr, ptep);
19628 static inline __attribute__((always_inline)) pte_t ptep_get_and_clear_full(struct mm_struct *mm,
19629 unsigned long addr, pte_t *ptep,
19633 if (__builtin_constant_p(((full))) ? !!((full)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/pgtable.h", .line = 686, }; ______r = !!((full)); ______f.miss_hit[______r]++; ______r; })) {
19634 pte = native_local_ptep_get_and_clear(ptep);
19636 pte = ptep_get_and_clear(mm, addr, ptep);
19640 static inline __attribute__((always_inline)) void ptep_set_wrprotect(struct mm_struct *mm,
19641 unsigned long addr, pte_t *ptep)
19643 clear_bit(1, (unsigned long *)&ptep->pte);
19644 pte_update(mm, addr, ptep);
19646 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
19647 unsigned long address, pmd_t *pmdp,
19648 pmd_t entry, int dirty);
19649 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
19650 unsigned long addr, pmd_t *pmdp);
19651 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
19652 unsigned long address, pmd_t *pmdp);
19653 extern void pmdp_splitting_flush(struct vm_area_struct *vma,
19654 unsigned long addr, pmd_t *pmdp);
19655 static inline __attribute__((always_inline)) int pmd_write(pmd_t pmd)
19657 return pmd_flags(pmd) & (((pteval_t)(1)) << 1);
19659 static inline __attribute__((always_inline)) pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
19662 pmd_t pmd = native_pmdp_get_and_clear(pmdp);
19663 pmd_update(mm, addr, pmdp);
19666 static inline __attribute__((always_inline)) void pmdp_set_wrprotect(struct mm_struct *mm,
19667 unsigned long addr, pmd_t *pmdp)
19669 clear_bit(1, (unsigned long *)pmdp);
19670 pmd_update(mm, addr, pmdp);
19672 static inline __attribute__((always_inline)) void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
19674 __builtin_memcpy(dst, src, count * sizeof(pgd_t));
19676 static inline __attribute__((always_inline)) void pte_clear_not_present_full(struct mm_struct *mm,
19677 unsigned long address,
19681 pte_clear(mm, address, ptep);
19683 extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
19684 unsigned long address,
19686 extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
19687 unsigned long address,
19689 static inline __attribute__((always_inline)) int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
19691 return pmd_val(pmd_a) == pmd_val(pmd_b);
19693 void pgd_clear_bad(pgd_t *);
19694 void pud_clear_bad(pud_t *);
19695 void pmd_clear_bad(pmd_t *);
19696 static inline __attribute__((always_inline)) int pgd_none_or_clear_bad(pgd_t *pgd)
19698 if (__builtin_constant_p(((pgd_none(*pgd)))) ? !!((pgd_none(*pgd))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 257, }; ______r = !!((pgd_none(*pgd))); ______f.miss_hit[______r]++; ______r; }))
19700 if (__builtin_constant_p((((__builtin_constant_p(pgd_bad(*pgd)) ? !!(pgd_bad(*pgd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 259, }; ______r = __builtin_expect(!!(pgd_bad(*pgd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(pgd_bad(*pgd)) ? !!(pgd_bad(*pgd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 259, }; ______r = __builtin_expect(!!(pgd_bad(*pgd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 259, }; ______r = !!(((__builtin_constant_p(pgd_bad(*pgd)) ? !!(pgd_bad(*pgd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 259, }; ______r = __builtin_expect(!!(pgd_bad(*pgd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) {
19701 pgd_clear_bad(pgd);
19706 static inline __attribute__((always_inline)) int pud_none_or_clear_bad(pud_t *pud)
19708 if (__builtin_constant_p(((pud_none(*pud)))) ? !!((pud_none(*pud))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 268, }; ______r = !!((pud_none(*pud))); ______f.miss_hit[______r]++; ______r; }))
19710 if (__builtin_constant_p((((__builtin_constant_p(pud_bad(*pud)) ? !!(pud_bad(*pud)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 270, }; ______r = __builtin_expect(!!(pud_bad(*pud)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(pud_bad(*pud)) ? !!(pud_bad(*pud)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 270, }; ______r = __builtin_expect(!!(pud_bad(*pud)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 270, }; ______r = !!(((__builtin_constant_p(pud_bad(*pud)) ? !!(pud_bad(*pud)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 270, }; ______r = __builtin_expect(!!(pud_bad(*pud)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) {
19711 pud_clear_bad(pud);
19716 static inline __attribute__((always_inline)) int pmd_none_or_clear_bad(pmd_t *pmd)
19718 if (__builtin_constant_p(((pmd_none(*pmd)))) ? !!((pmd_none(*pmd))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 279, }; ______r = !!((pmd_none(*pmd))); ______f.miss_hit[______r]++; ______r; }))
19720 if (__builtin_constant_p((((__builtin_constant_p(pmd_bad(*pmd)) ? !!(pmd_bad(*pmd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 281, }; ______r = __builtin_expect(!!(pmd_bad(*pmd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(pmd_bad(*pmd)) ? !!(pmd_bad(*pmd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 281, }; ______r = __builtin_expect(!!(pmd_bad(*pmd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 281, }; ______r = !!(((__builtin_constant_p(pmd_bad(*pmd)) ? !!(pmd_bad(*pmd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 281, }; ______r = __builtin_expect(!!(pmd_bad(*pmd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) {
19721 pmd_clear_bad(pmd);
19726 static inline __attribute__((always_inline)) pte_t __ptep_modify_prot_start(struct mm_struct *mm,
19727 unsigned long addr,
19730 return ptep_get_and_clear(mm, addr, ptep);
19732 static inline __attribute__((always_inline)) void __ptep_modify_prot_commit(struct mm_struct *mm,
19733 unsigned long addr,
19734 pte_t *ptep, pte_t pte)
19736 set_pte_at(mm, addr, ptep, pte);
19738 extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
19739 unsigned long pfn, unsigned long size);
19740 extern int track_pfn_vma_copy(struct vm_area_struct *vma);
19741 extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
19742 unsigned long size);
19743 extern struct kmem_cache *vm_area_cachep;
19744 extern pgprot_t protection_map[16];
19745 static inline __attribute__((always_inline)) int is_linear_pfn_mapping(struct vm_area_struct *vma)
19747 return !!(vma->vm_flags & 0x40000000);
19749 static inline __attribute__((always_inline)) int is_pfn_mapping(struct vm_area_struct *vma)
19751 return !!(vma->vm_flags & 0x00000400);
19754 unsigned int flags;
19755 unsigned long pgoff;
19756 void *virtual_address;
19759 struct vm_operations_struct {
19760 void (*open)(struct vm_area_struct * area);
19761 void (*close)(struct vm_area_struct * area);
19762 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
19763 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
19764 int (*access)(struct vm_area_struct *vma, unsigned long addr,
19765 void *buf, int len, int write);
19795 PG_checked = PG_owner_priv_1,
19796 PG_fscache = PG_private_2,
19797 PG_pinned = PG_owner_priv_1,
19798 PG_savepinned = PG_dirty,
19799 PG_slob_free = PG_private,
19800 PG_slub_frozen = PG_active,
19803 static inline __attribute__((always_inline)) int PageLocked(struct page *page) { return (__builtin_constant_p((PG_locked)) ? constant_test_bit((PG_locked), (&page->flags)) : variable_test_bit((PG_locked), (&page->flags))); }
19804 static inline __attribute__((always_inline)) int PageError(struct page *page) { return (__builtin_constant_p((PG_error)) ? constant_test_bit((PG_error), (&page->flags)) : variable_test_bit((PG_error), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageError(struct page *page) { set_bit(PG_error, &page->flags); } static inline __attribute__((always_inline)) void ClearPageError(struct page *page) { clear_bit(PG_error, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageError(struct page *page) { return test_and_clear_bit(PG_error, &page->flags); }
19805 static inline __attribute__((always_inline)) int PageReferenced(struct page *page) { return (__builtin_constant_p((PG_referenced)) ? constant_test_bit((PG_referenced), (&page->flags)) : variable_test_bit((PG_referenced), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageReferenced(struct page *page) { set_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReferenced(struct page *page) { clear_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageReferenced(struct page *page) { return test_and_clear_bit(PG_referenced, &page->flags); }
19806 static inline __attribute__((always_inline)) int PageDirty(struct page *page) { return (__builtin_constant_p((PG_dirty)) ? constant_test_bit((PG_dirty), (&page->flags)) : variable_test_bit((PG_dirty), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageDirty(struct page *page) { set_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void ClearPageDirty(struct page *page) { clear_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) int TestSetPageDirty(struct page *page) { return test_and_set_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageDirty(struct page *page) { return test_and_clear_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageDirty(struct page *page) { __clear_bit(PG_dirty, &page->flags); }
19807 static inline __attribute__((always_inline)) int PageLRU(struct page *page) { return (__builtin_constant_p((PG_lru)) ? constant_test_bit((PG_lru), (&page->flags)) : variable_test_bit((PG_lru), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageLRU(struct page *page) { set_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void ClearPageLRU(struct page *page) { clear_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageLRU(struct page *page) { __clear_bit(PG_lru, &page->flags); }
19808 static inline __attribute__((always_inline)) int PageActive(struct page *page) { return (__builtin_constant_p((PG_active)) ? constant_test_bit((PG_active), (&page->flags)) : variable_test_bit((PG_active), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageActive(struct page *page) { set_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void ClearPageActive(struct page *page) { clear_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageActive(struct page *page) { __clear_bit(PG_active, &page->flags); }
19809 static inline __attribute__((always_inline)) int TestClearPageActive(struct page *page) { return test_and_clear_bit(PG_active, &page->flags); }
19810 static inline __attribute__((always_inline)) int PageSlab(struct page *page) { return (__builtin_constant_p((PG_slab)) ? constant_test_bit((PG_slab), (&page->flags)) : variable_test_bit((PG_slab), (&page->flags))); } static inline __attribute__((always_inline)) void __SetPageSlab(struct page *page) { __set_bit(PG_slab, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlab(struct page *page) { __clear_bit(PG_slab, &page->flags); }
19811 static inline __attribute__((always_inline)) int PageChecked(struct page *page) { return (__builtin_constant_p((PG_checked)) ? constant_test_bit((PG_checked), (&page->flags)) : variable_test_bit((PG_checked), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageChecked(struct page *page) { set_bit(PG_checked, &page->flags); } static inline __attribute__((always_inline)) void ClearPageChecked(struct page *page) { clear_bit(PG_checked, &page->flags); }
19812 static inline __attribute__((always_inline)) int PagePinned(struct page *page) { return (__builtin_constant_p((PG_pinned)) ? constant_test_bit((PG_pinned), (&page->flags)) : variable_test_bit((PG_pinned), (&page->flags))); } static inline __attribute__((always_inline)) void SetPagePinned(struct page *page) { set_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePinned(struct page *page) { clear_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) int TestSetPagePinned(struct page *page) { return test_and_set_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) int TestClearPagePinned(struct page *page) { return test_and_clear_bit(PG_pinned, &page->flags); }
19813 static inline __attribute__((always_inline)) int PageSavePinned(struct page *page) { return (__builtin_constant_p((PG_savepinned)) ? constant_test_bit((PG_savepinned), (&page->flags)) : variable_test_bit((PG_savepinned), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageSavePinned(struct page *page) { set_bit(PG_savepinned, &page->flags); } static inline __attribute__((always_inline)) void ClearPageSavePinned(struct page *page) { clear_bit(PG_savepinned, &page->flags); };
19814 static inline __attribute__((always_inline)) int PageReserved(struct page *page) { return (__builtin_constant_p((PG_reserved)) ? constant_test_bit((PG_reserved), (&page->flags)) : variable_test_bit((PG_reserved), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageReserved(struct page *page) { set_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReserved(struct page *page) { clear_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageReserved(struct page *page) { __clear_bit(PG_reserved, &page->flags); }
19815 static inline __attribute__((always_inline)) int PageSwapBacked(struct page *page) { return (__builtin_constant_p((PG_swapbacked)) ? constant_test_bit((PG_swapbacked), (&page->flags)) : variable_test_bit((PG_swapbacked), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageSwapBacked(struct page *page) { set_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void ClearPageSwapBacked(struct page *page) { clear_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSwapBacked(struct page *page) { __clear_bit(PG_swapbacked, &page->flags); }
19816 static inline __attribute__((always_inline)) int PageSlobFree(struct page *page) { return (__builtin_constant_p((PG_slob_free)) ? constant_test_bit((PG_slob_free), (&page->flags)) : variable_test_bit((PG_slob_free), (&page->flags))); } static inline __attribute__((always_inline)) void __SetPageSlobFree(struct page *page) { __set_bit(PG_slob_free, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlobFree(struct page *page) { __clear_bit(PG_slob_free, &page->flags); }
19817 static inline __attribute__((always_inline)) int PageSlubFrozen(struct page *page) { return (__builtin_constant_p((PG_slub_frozen)) ? constant_test_bit((PG_slub_frozen), (&page->flags)) : variable_test_bit((PG_slub_frozen), (&page->flags))); } static inline __attribute__((always_inline)) void __SetPageSlubFrozen(struct page *page) { __set_bit(PG_slub_frozen, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlubFrozen(struct page *page) { __clear_bit(PG_slub_frozen, &page->flags); }
19818 static inline __attribute__((always_inline)) int PagePrivate(struct page *page) { return (__builtin_constant_p((PG_private)) ? constant_test_bit((PG_private), (&page->flags)) : variable_test_bit((PG_private), (&page->flags))); } static inline __attribute__((always_inline)) void SetPagePrivate(struct page *page) { set_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePrivate(struct page *page) { clear_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void __SetPagePrivate(struct page *page) { __set_bit(PG_private, &page->flags); }
19819 static inline __attribute__((always_inline)) void __ClearPagePrivate(struct page *page) { __clear_bit(PG_private, &page->flags); }
19820 static inline __attribute__((always_inline)) int PagePrivate2(struct page *page) { return (__builtin_constant_p((PG_private_2)) ? constant_test_bit((PG_private_2), (&page->flags)) : variable_test_bit((PG_private_2), (&page->flags))); } static inline __attribute__((always_inline)) void SetPagePrivate2(struct page *page) { set_bit(PG_private_2, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePrivate2(struct page *page) { clear_bit(PG_private_2, &page->flags); } static inline __attribute__((always_inline)) int TestSetPagePrivate2(struct page *page) { return test_and_set_bit(PG_private_2, &page->flags); } static inline __attribute__((always_inline)) int TestClearPagePrivate2(struct page *page) { return test_and_clear_bit(PG_private_2, &page->flags); }
19821 static inline __attribute__((always_inline)) int PageOwnerPriv1(struct page *page) { return (__builtin_constant_p((PG_owner_priv_1)) ? constant_test_bit((PG_owner_priv_1), (&page->flags)) : variable_test_bit((PG_owner_priv_1), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageOwnerPriv1(struct page *page) { set_bit(PG_owner_priv_1, &page->flags); } static inline __attribute__((always_inline)) void ClearPageOwnerPriv1(struct page *page) { clear_bit(PG_owner_priv_1, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageOwnerPriv1(struct page *page) { return test_and_clear_bit(PG_owner_priv_1, &page->flags); }
19822 static inline __attribute__((always_inline)) int PageWriteback(struct page *page) { return (__builtin_constant_p((PG_writeback)) ? constant_test_bit((PG_writeback), (&page->flags)) : variable_test_bit((PG_writeback), (&page->flags))); } static inline __attribute__((always_inline)) int TestSetPageWriteback(struct page *page) { return test_and_set_bit(PG_writeback, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageWriteback(struct page *page) { return test_and_clear_bit(PG_writeback, &page->flags); }
19823 static inline __attribute__((always_inline)) int PageMappedToDisk(struct page *page) { return (__builtin_constant_p((PG_mappedtodisk)) ? constant_test_bit((PG_mappedtodisk), (&page->flags)) : variable_test_bit((PG_mappedtodisk), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageMappedToDisk(struct page *page) { set_bit(PG_mappedtodisk, &page->flags); } static inline __attribute__((always_inline)) void ClearPageMappedToDisk(struct page *page) { clear_bit(PG_mappedtodisk, &page->flags); }
19824 static inline __attribute__((always_inline)) int PageReclaim(struct page *page) { return (__builtin_constant_p((PG_reclaim)) ? constant_test_bit((PG_reclaim), (&page->flags)) : variable_test_bit((PG_reclaim), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageReclaim(struct page *page) { set_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReclaim(struct page *page) { clear_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageReclaim(struct page *page) { return test_and_clear_bit(PG_reclaim, &page->flags); }
19825 static inline __attribute__((always_inline)) int PageReadahead(struct page *page) { return (__builtin_constant_p((PG_reclaim)) ? constant_test_bit((PG_reclaim), (&page->flags)) : variable_test_bit((PG_reclaim), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageReadahead(struct page *page) { set_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReadahead(struct page *page) { clear_bit(PG_reclaim, &page->flags); }
19826 static inline __attribute__((always_inline)) int PageSwapCache(struct page *page) { return (__builtin_constant_p((PG_swapcache)) ? constant_test_bit((PG_swapcache), (&page->flags)) : variable_test_bit((PG_swapcache), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageSwapCache(struct page *page) { set_bit(PG_swapcache, &page->flags); } static inline __attribute__((always_inline)) void ClearPageSwapCache(struct page *page) { clear_bit(PG_swapcache, &page->flags); }
19827 static inline __attribute__((always_inline)) int PageUnevictable(struct page *page) { return (__builtin_constant_p((PG_unevictable)) ? constant_test_bit((PG_unevictable), (&page->flags)) : variable_test_bit((PG_unevictable), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageUnevictable(struct page *page) { set_bit(PG_unevictable, &page->flags); } static inline __attribute__((always_inline)) void ClearPageUnevictable(struct page *page) { clear_bit(PG_unevictable, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageUnevictable(struct page *page) { __clear_bit(PG_unevictable, &page->flags); }
19828 static inline __attribute__((always_inline)) int TestClearPageUnevictable(struct page *page) { return test_and_clear_bit(PG_unevictable, &page->flags); }
19829 static inline __attribute__((always_inline)) int PageMlocked(struct page *page) { return (__builtin_constant_p((PG_mlocked)) ? constant_test_bit((PG_mlocked), (&page->flags)) : variable_test_bit((PG_mlocked), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageMlocked(struct page *page) { set_bit(PG_mlocked, &page->flags); } static inline __attribute__((always_inline)) void ClearPageMlocked(struct page *page) { clear_bit(PG_mlocked, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageMlocked(struct page *page) { __clear_bit(PG_mlocked, &page->flags); }
19830 static inline __attribute__((always_inline)) int TestSetPageMlocked(struct page *page) { return test_and_set_bit(PG_mlocked, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageMlocked(struct page *page) { return test_and_clear_bit(PG_mlocked, &page->flags); } static inline __attribute__((always_inline)) int __TestClearPageMlocked(struct page *page) { return __test_and_clear_bit(PG_mlocked, &page->flags); }
19831 static inline __attribute__((always_inline)) int PageUncached(struct page *page) { return (__builtin_constant_p((PG_uncached)) ? constant_test_bit((PG_uncached), (&page->flags)) : variable_test_bit((PG_uncached), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageUncached(struct page *page) { set_bit(PG_uncached, &page->flags); } static inline __attribute__((always_inline)) void ClearPageUncached(struct page *page) { clear_bit(PG_uncached, &page->flags); }
19832 static inline __attribute__((always_inline)) int PageHWPoison(struct page *page) { return 0; }
19833 u64 stable_page_flags(struct page *page);
19834 static inline __attribute__((always_inline)) int PageUptodate(struct page *page)
19836 int ret = (__builtin_constant_p((PG_uptodate)) ? constant_test_bit((PG_uptodate), (&(page)->flags)) : variable_test_bit((PG_uptodate), (&(page)->flags)));
19837 if (__builtin_constant_p(((ret))) ? !!((ret)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/page-flags.h", .line = 295, }; ______r = !!((ret)); ______f.miss_hit[______r]++; ______r; }))
19838 __asm__ __volatile__("": : :"memory");
19841 static inline __attribute__((always_inline)) void __SetPageUptodate(struct page *page)
19843 __asm__ __volatile__("": : :"memory");
19844 __set_bit(PG_uptodate, &(page)->flags);
19846 static inline __attribute__((always_inline)) void SetPageUptodate(struct page *page)
19848 __asm__ __volatile__("": : :"memory");
19849 set_bit(PG_uptodate, &(page)->flags);
19851 static inline __attribute__((always_inline)) void ClearPageUptodate(struct page *page) { clear_bit(PG_uptodate, &page->flags); }
19852 extern void cancel_dirty_page(struct page *page, unsigned int account_size);
19853 int test_clear_page_writeback(struct page *page);
19854 int test_set_page_writeback(struct page *page);
19855 static inline __attribute__((always_inline)) void set_page_writeback(struct page *page)
19857 test_set_page_writeback(page);
19859 static inline __attribute__((always_inline)) int PageHead(struct page *page) { return (__builtin_constant_p((PG_head)) ? constant_test_bit((PG_head), (&page->flags)) : variable_test_bit((PG_head), (&page->flags))); } static inline __attribute__((always_inline)) void __SetPageHead(struct page *page) { __set_bit(PG_head, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageHead(struct page *page) { __clear_bit(PG_head, &page->flags); } static inline __attribute__((always_inline)) void ClearPageHead(struct page *page) { clear_bit(PG_head, &page->flags); }
19860 static inline __attribute__((always_inline)) int PageTail(struct page *page) { return (__builtin_constant_p((PG_tail)) ? constant_test_bit((PG_tail), (&page->flags)) : variable_test_bit((PG_tail), (&page->flags))); } static inline __attribute__((always_inline)) void __SetPageTail(struct page *page) { __set_bit(PG_tail, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageTail(struct page *page) { __clear_bit(PG_tail, &page->flags); }
19861 static inline __attribute__((always_inline)) int PageCompound(struct page *page)
19863 return page->flags & ((1L << PG_head) | (1L << PG_tail));
19865 static inline __attribute__((always_inline)) void ClearPageCompound(struct page *page)
19867 do { if (__builtin_constant_p((((__builtin_constant_p(!PageHead(page)) ? !!(!PageHead(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/page-flags.h", .line = 356, }; ______r = __builtin_expect(!!(!PageHead(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(!PageHead(page)) ? !!(!PageHead(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/page-flags.h", .line = 356, }; ______r = __builtin_expect(!!(!PageHead(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/page-flags.h", .line = 356, }; ______r = !!(((__builtin_constant_p(!PageHead(page)) ? !!(!PageHead(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/page-flags.h", .line = 356, }; ______r = __builtin_expect(!!(!PageHead(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (356), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
19868 ClearPageHead(page);
19870 static inline __attribute__((always_inline)) int PageTransHuge(struct page *page)
19872 do { (void)(PageTail(page)); } while (0);
19873 return PageHead(page);
19875 static inline __attribute__((always_inline)) int PageTransCompound(struct page *page)
19877 return PageCompound(page);
19879 static inline __attribute__((always_inline)) int page_has_private(struct page *page)
19881 return !!(page->flags & (1 << PG_private | 1 << PG_private_2));
19883 extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
19884 struct vm_area_struct *vma,
19885 unsigned long address, pmd_t *pmd,
19886 unsigned int flags);
19887 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
19888 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
19889 struct vm_area_struct *vma);
19890 extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
19891 unsigned long address, pmd_t *pmd,
19893 extern pgtable_t get_pmd_huge_pte(struct mm_struct *mm);
19894 extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
19895 unsigned long addr,
19897 unsigned int flags);
19898 extern int zap_huge_pmd(struct mmu_gather *tlb,
19899 struct vm_area_struct *vma,
19901 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
19902 unsigned long addr, unsigned long end,
19903 unsigned char *vec);
19904 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
19905 unsigned long addr, pgprot_t newprot);
19906 enum transparent_hugepage_flag {
19907 TRANSPARENT_HUGEPAGE_FLAG,
19908 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
19909 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
19910 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
19911 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
19913 enum page_check_address_pmd_flag {
19914 PAGE_CHECK_ADDRESS_PMD_FLAG,
19915 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG,
19916 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG,
19918 extern pmd_t *page_check_address_pmd(struct page *page,
19919 struct mm_struct *mm,
19920 unsigned long address,
19921 enum page_check_address_pmd_flag flag);
19922 extern unsigned long transparent_hugepage_flags;
19923 extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
19924 pmd_t *dst_pmd, pmd_t *src_pmd,
19925 struct vm_area_struct *vma,
19926 unsigned long addr, unsigned long end);
19927 extern int handle_pte_fault(struct mm_struct *mm,
19928 struct vm_area_struct *vma, unsigned long address,
19929 pte_t *pte, pmd_t *pmd, unsigned int flags);
19930 extern int split_huge_page(struct page *page);
19931 extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
19932 extern int hugepage_madvise(struct vm_area_struct *vma,
19933 unsigned long *vm_flags, int advice);
19934 extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
19935 unsigned long start,
19938 static inline __attribute__((always_inline)) void vma_adjust_trans_huge(struct vm_area_struct *vma,
19939 unsigned long start,
19943 if (__builtin_constant_p(((!vma->anon_vma || vma->vm_ops))) ? !!((!vma->anon_vma || vma->vm_ops)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 116, }; ______r = !!((!vma->anon_vma || vma->vm_ops)); ______f.miss_hit[______r]++; ______r; }))
19945 __vma_adjust_trans_huge(vma, start, end, adjust_next);
19947 static inline __attribute__((always_inline)) int hpage_nr_pages(struct page *page)
19949 if (__builtin_constant_p((((__builtin_constant_p(PageTransHuge(page)) ? !!(PageTransHuge(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 122, }; ______r = __builtin_expect(!!(PageTransHuge(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(PageTransHuge(page)) ? !!(PageTransHuge(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 122, }; ______r = __builtin_expect(!!(PageTransHuge(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 122, }; ______r = !!(((__builtin_constant_p(PageTransHuge(page)) ? !!(PageTransHuge(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 122, }; ______r = __builtin_expect(!!(PageTransHuge(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
19950 return (1<<(21 -12));
19953 static inline __attribute__((always_inline)) struct page *compound_trans_head(struct page *page)
19955 if (__builtin_constant_p(((PageTail(page)))) ? !!((PageTail(page))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 128, }; ______r = !!((PageTail(page))); ______f.miss_hit[______r]++; ______r; })) {
19957 head = page->first_page;
19958 __asm__ __volatile__("": : :"memory");
19959 if (__builtin_constant_p(((PageTail(page)))) ? !!((PageTail(page))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 138, }; ______r = !!((PageTail(page))); ______f.miss_hit[______r]++; ______r; }))
19964 static inline __attribute__((always_inline)) int put_page_testzero(struct page *page)
19966 do { (void)(atomic_read(&page->_count) == 0); } while (0);
19967 return atomic_dec_and_test(&page->_count);
19969 static inline __attribute__((always_inline)) int get_page_unless_zero(struct page *page)
19971 return atomic_add_unless((&page->_count), 1, 0);
19973 extern int page_is_ram(unsigned long pfn);
19974 struct page *vmalloc_to_page(const void *addr);
19975 unsigned long vmalloc_to_pfn(const void *addr);
19976 static inline __attribute__((always_inline)) int is_vmalloc_addr(const void *x)
19978 unsigned long addr = (unsigned long)x;
19979 return addr >= ((unsigned long)high_memory + (8 * 1024 * 1024)) && addr < ((((((unsigned long)__FIXADDR_TOP) - (__end_of_fixed_addresses << 12)) - ((1UL) << 12) * (512 + 1)) & (~((1UL << 21) - 1))) - 2 * ((1UL) << 12));
19981 extern int is_vmalloc_or_module_addr(const void *x);
19982 static inline __attribute__((always_inline)) void compound_lock(struct page *page)
19984 bit_spin_lock(PG_compound_lock, &page->flags);
19986 static inline __attribute__((always_inline)) void compound_unlock(struct page *page)
19988 bit_spin_unlock(PG_compound_lock, &page->flags);
19990 static inline __attribute__((always_inline)) unsigned long compound_lock_irqsave(struct page *page)
19992 unsigned long flags = flags;
19993 do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0);
19994 compound_lock(page);
19997 static inline __attribute__((always_inline)) void compound_unlock_irqrestore(struct page *page,
19998 unsigned long flags)
20000 compound_unlock(page);
20001 do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 347, }; ______r = !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0);
20003 static inline __attribute__((always_inline)) struct page *compound_head(struct page *page)
20005 if (__builtin_constant_p((((__builtin_constant_p(PageTail(page)) ? !!(PageTail(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 353, }; ______r = __builtin_expect(!!(PageTail(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(PageTail(page)) ? !!(PageTail(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 353, }; ______r = __builtin_expect(!!(PageTail(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 353, }; ______r = !!(((__builtin_constant_p(PageTail(page)) ? !!(PageTail(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 353, }; ______r = __builtin_expect(!!(PageTail(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
20006 return page->first_page;
20009 static inline __attribute__((always_inline)) int page_count(struct page *page)
20011 return atomic_read(&compound_head(page)->_count);
20013 static inline __attribute__((always_inline)) void get_page(struct page *page)
20015 do { (void)(atomic_read(&page->_count) < !PageTail(page)); } while (0);
20016 atomic_inc(&page->_count);
20017 if (__builtin_constant_p((((__builtin_constant_p(PageTail(page)) ? !!(PageTail(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 379, }; ______r = __builtin_expect(!!(PageTail(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(PageTail(page)) ? !!(PageTail(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 379, }; ______r = __builtin_expect(!!(PageTail(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 379, }; ______r = !!(((__builtin_constant_p(PageTail(page)) ? !!(PageTail(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 379, }; ______r = __builtin_expect(!!(PageTail(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) {
20018 do { (void)(atomic_read(&page->first_page->_count) <= 0); } while (0);
20019 atomic_inc(&page->first_page->_count);
20022 static inline __attribute__((always_inline)) struct page *virt_to_head_page(const void *x)
20024 struct page *page = (mem_map + (((((unsigned long)(x)) - ((unsigned long)(0xC0000000UL))) >> 12) - (0UL)));
20025 return compound_head(page);
20027 static inline __attribute__((always_inline)) void init_page_count(struct page *page)
20029 atomic_set(&page->_count, 1);
20031 static inline __attribute__((always_inline)) int PageBuddy(struct page *page)
20033 return atomic_read(&page->_mapcount) == (-128);
20035 static inline __attribute__((always_inline)) void __SetPageBuddy(struct page *page)
20037 do { (void)(atomic_read(&page->_mapcount) != -1); } while (0);
20038 atomic_set(&page->_mapcount, (-128));
20040 static inline __attribute__((always_inline)) void __ClearPageBuddy(struct page *page)
20042 do { (void)(!PageBuddy(page)); } while (0);
20043 atomic_set(&page->_mapcount, -1);
20045 void put_page(struct page *page);
20046 void put_pages_list(struct list_head *pages);
20047 void split_page(struct page *page, unsigned int order);
20048 int split_free_page(struct page *page);
20049 typedef void compound_page_dtor(struct page *);
20050 static inline __attribute__((always_inline)) void set_compound_page_dtor(struct page *page,
20051 compound_page_dtor *dtor)
20053 page[1].lru.next = (void *)dtor;
20055 static inline __attribute__((always_inline)) compound_page_dtor *get_compound_page_dtor(struct page *page)
20057 return (compound_page_dtor *)page[1].lru.next;
20059 static inline __attribute__((always_inline)) int compound_order(struct page *page)
20061 if (__builtin_constant_p(((!PageHead(page)))) ? !!((!PageHead(page))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 459, }; ______r = !!((!PageHead(page))); ______f.miss_hit[______r]++; ______r; }))
20063 return (unsigned long)page[1].lru.prev;
20065 static inline __attribute__((always_inline)) int compound_trans_order(struct page *page)
20068 unsigned long flags;
20069 if (__builtin_constant_p(((!PageHead(page)))) ? !!((!PageHead(page))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 469, }; ______r = !!((!PageHead(page))); ______f.miss_hit[______r]++; ______r; }))
20071 flags = compound_lock_irqsave(page);
20072 order = compound_order(page);
20073 compound_unlock_irqrestore(page, flags);
20076 static inline __attribute__((always_inline)) void set_compound_order(struct page *page, unsigned long order)
20078 page[1].lru.prev = (void *)order;
20080 static inline __attribute__((always_inline)) pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
20082 if (__builtin_constant_p((((__builtin_constant_p(vma->vm_flags & 0x00000002) ? !!(vma->vm_flags & 0x00000002) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 492, }; ______r = __builtin_expect(!!(vma->vm_flags & 0x00000002), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(vma->vm_flags & 0x00000002) ? !!(vma->vm_flags & 0x00000002) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 492, }; ______r = __builtin_expect(!!(vma->vm_flags & 0x00000002), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 492, }; ______r = !!(((__builtin_constant_p(vma->vm_flags & 0x00000002) ? !!(vma->vm_flags & 0x00000002) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 492, }; ______r = __builtin_expect(!!(vma->vm_flags & 0x00000002), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
20083 pte = pte_mkwrite(pte);
20086 static inline __attribute__((always_inline)) enum zone_type page_zonenum(struct page *page)
20088 return (page->flags >> (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0))) & ((1UL << 2) - 1);
20090 static inline __attribute__((always_inline)) int page_zone_id(struct page *page)
20092 return (page->flags >> ((((((sizeof(unsigned long)*8) - 0) - 0) < ((((sizeof(unsigned long)*8) - 0) - 0) - 2))? (((sizeof(unsigned long)*8) - 0) - 0) : ((((sizeof(unsigned long)*8) - 0) - 0) - 2)) * ((0 + 2) != 0))) & ((1UL << (0 + 2)) - 1);
20094 static inline __attribute__((always_inline)) int zone_to_nid(struct zone *zone)
20098 static inline __attribute__((always_inline)) int page_to_nid(struct page *page)
20100 return (page->flags >> ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0))) & ((1UL << 0) - 1);
20102 static inline __attribute__((always_inline)) struct zone *page_zone(struct page *page)
20104 return &(&contig_page_data)->node_zones[page_zonenum(page)];
20106 static inline __attribute__((always_inline)) void set_page_zone(struct page *page, enum zone_type zone)
20108 page->flags &= ~(((1UL << 2) - 1) << (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0)));
20109 page->flags |= (zone & ((1UL << 2) - 1)) << (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0));
20111 static inline __attribute__((always_inline)) void set_page_node(struct page *page, unsigned long node)
20113 page->flags &= ~(((1UL << 0) - 1) << ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0)));
20114 page->flags |= (node & ((1UL << 0) - 1)) << ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0));
20116 static inline __attribute__((always_inline)) void set_page_links(struct page *page, enum zone_type zone,
20117 unsigned long node, unsigned long pfn)
20119 set_page_zone(page, zone);
20120 set_page_node(page, node);
20122 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
20123 PGALLOC_DMA, PGALLOC_NORMAL , PGALLOC_HIGH , PGALLOC_MOVABLE,
20124 PGFREE, PGACTIVATE, PGDEACTIVATE,
20125 PGFAULT, PGMAJFAULT,
20126 PGREFILL_DMA, PGREFILL_NORMAL , PGREFILL_HIGH , PGREFILL_MOVABLE,
20127 PGSTEAL_DMA, PGSTEAL_NORMAL , PGSTEAL_HIGH , PGSTEAL_MOVABLE,
20128 PGSCAN_KSWAPD_DMA, PGSCAN_KSWAPD_NORMAL , PGSCAN_KSWAPD_HIGH , PGSCAN_KSWAPD_MOVABLE,
20129 PGSCAN_DIRECT_DMA, PGSCAN_DIRECT_NORMAL , PGSCAN_DIRECT_HIGH , PGSCAN_DIRECT_MOVABLE,
20130 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
20131 KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
20132 KSWAPD_SKIP_CONGESTION_WAIT,
20133 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
20134 COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
20135 COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
20136 UNEVICTABLE_PGCULLED,
20137 UNEVICTABLE_PGSCANNED,
20138 UNEVICTABLE_PGRESCUED,
20139 UNEVICTABLE_PGMLOCKED,
20140 UNEVICTABLE_PGMUNLOCKED,
20141 UNEVICTABLE_PGCLEARED,
20142 UNEVICTABLE_PGSTRANDED,
20143 UNEVICTABLE_MLOCKFREED,
20145 THP_FAULT_FALLBACK,
20146 THP_COLLAPSE_ALLOC,
20147 THP_COLLAPSE_ALLOC_FAILED,
20151 extern int sysctl_stat_interval;
20152 struct vm_event_state {
20153 unsigned long event[NR_VM_EVENT_ITEMS];
20155 extern __attribute__((section(".data..percpu" ""))) __typeof__(struct vm_event_state) vm_event_states;
20156 static inline __attribute__((always_inline)) void __count_vm_event(enum vm_event_item item)
20158 do { do { const void *__vpp_verify = (typeof(&(((vm_event_states.event[item])))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((vm_event_states.event[item])))) { case 1: do { typedef typeof((((vm_event_states.event[item])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((vm_event_states.event[item]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((((vm_event_states.event[item])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((vm_event_states.event[item]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((((vm_event_states.event[item])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((vm_event_states.event[item]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((((vm_event_states.event[item])))))); (typeof(*(&((((vm_event_states.event[item])))))) *)tcp_ptr__; }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
20160 static inline __attribute__((always_inline)) void count_vm_event(enum vm_event_item item)
20162 do { do { const void *__vpp_verify = (typeof(&(((vm_event_states.event[item])))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((vm_event_states.event[item])))) { case 1: do { typedef typeof((((vm_event_states.event[item])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((vm_event_states.event[item]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((((vm_event_states.event[item])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((vm_event_states.event[item]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((((vm_event_states.event[item])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((vm_event_states.event[item]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((((vm_event_states.event[item])))))); (typeof(*(&((((vm_event_states.event[item])))))) *)tcp_ptr__; }) += ((1)); do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
20164 static inline __attribute__((always_inline)) void __count_vm_events(enum vm_event_item item, long delta)
20166 do { do { const void *__vpp_verify = (typeof(&((vm_event_states.event[item]))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((vm_event_states.event[item]))) { case 1: do { typedef typeof(((vm_event_states.event[item]))) pao_T__; const int pao_ID__ = (__builtin_constant_p((delta)) && (((delta)) == 1 || ((delta)) == -1)) ? ((delta)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((delta)); (void)pao_tmp__; } switch (sizeof(((vm_event_states.event[item])))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "qi" ((pao_T__)((delta)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "re" ((pao_T__)((delta)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof(((vm_event_states.event[item]))) pao_T__; const int pao_ID__ = (__builtin_constant_p((delta)) && (((delta)) == 1 || ((delta)) == -1)) ? ((delta)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((delta)); (void)pao_tmp__; } switch (sizeof(((vm_event_states.event[item])))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "qi" ((pao_T__)((delta)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "re" ((pao_T__)((delta)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof(((vm_event_states.event[item]))) pao_T__; const int pao_ID__ = (__builtin_constant_p((delta)) && (((delta)) == 1 || ((delta)) == -1)) ? ((delta)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((delta)); (void)pao_tmp__; } switch (sizeof(((vm_event_states.event[item])))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "qi" ((pao_T__)((delta)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "re" ((pao_T__)((delta)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&(((vm_event_states.event[item])))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&(((vm_event_states.event[item]))))); (typeof(*(&(((vm_event_states.event[item]))))) *)tcp_ptr__; }) += ((delta)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
20168 static inline __attribute__((always_inline)) void count_vm_events(enum vm_event_item item, long delta)
20170 do { do { const void *__vpp_verify = (typeof(&((vm_event_states.event[item]))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((vm_event_states.event[item]))) { case 1: do { typedef typeof(((vm_event_states.event[item]))) pao_T__; const int pao_ID__ = (__builtin_constant_p((delta)) && (((delta)) == 1 || ((delta)) == -1)) ? ((delta)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((delta)); (void)pao_tmp__; } switch (sizeof(((vm_event_states.event[item])))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "qi" ((pao_T__)((delta)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "re" ((pao_T__)((delta)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof(((vm_event_states.event[item]))) pao_T__; const int pao_ID__ = (__builtin_constant_p((delta)) && (((delta)) == 1 || ((delta)) == -1)) ? ((delta)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((delta)); (void)pao_tmp__; } switch (sizeof(((vm_event_states.event[item])))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "qi" ((pao_T__)((delta)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "re" ((pao_T__)((delta)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof(((vm_event_states.event[item]))) pao_T__; const int pao_ID__ = (__builtin_constant_p((delta)) && (((delta)) == 1 || ((delta)) == -1)) ? ((delta)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((delta)); (void)pao_tmp__; } switch (sizeof(((vm_event_states.event[item])))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "qi" ((pao_T__)((delta)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "re" ((pao_T__)((delta)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&(((vm_event_states.event[item])))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&(((vm_event_states.event[item]))))); (typeof(*(&(((vm_event_states.event[item]))))) *)tcp_ptr__; }) += ((delta)); do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
20172 extern void all_vm_events(unsigned long *);
20173 extern void vm_events_fold_cpu(int cpu);
20174 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
20175 static inline __attribute__((always_inline)) void zone_page_state_add(long x, struct zone *zone,
20176 enum zone_stat_item item)
20178 atomic_long_add(x, &zone->vm_stat[item]);
20179 atomic_long_add(x, &vm_stat[item]);
20181 static inline __attribute__((always_inline)) unsigned long global_page_state(enum zone_stat_item item)
20183 long x = atomic_long_read(&vm_stat[item]);
20184 if (__builtin_constant_p(((x < 0))) ? !!((x < 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 103, }; ______r = !!((x < 0)); ______f.miss_hit[______r]++; ______r; }))
20188 static inline __attribute__((always_inline)) unsigned long zone_page_state(struct zone *zone,
20189 enum zone_stat_item item)
20191 long x = atomic_long_read(&zone->vm_stat[item]);
20192 if (__builtin_constant_p(((x < 0))) ? !!((x < 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 114, }; ______r = !!((x < 0)); ______f.miss_hit[______r]++; ______r; }))
20196 static inline __attribute__((always_inline)) unsigned long zone_page_state_snapshot(struct zone *zone,
20197 enum zone_stat_item item)
20199 long x = atomic_long_read(&zone->vm_stat[item]);
20201 for (((cpu)) = -1; ((cpu)) = cpumask_next(((cpu)), (cpu_online_mask)), ((cpu)) < nr_cpu_ids;)
20202 x += ({ do { const void *__vpp_verify = (typeof(((zone->pageset))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((zone->pageset))) *)((zone->pageset)))); (typeof((typeof(*((zone->pageset))) *)((zone->pageset)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->vm_stat_diff[item];
20203 if (__builtin_constant_p(((x < 0))) ? !!((x < 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 136, }; ______r = !!((x < 0)); ______f.miss_hit[______r]++; ______r; }))
20207 extern unsigned long global_reclaimable_pages(void);
20208 extern unsigned long zone_reclaimable_pages(struct zone *zone);
20209 static inline __attribute__((always_inline)) void zap_zone_vm_stats(struct zone *zone)
20211 __builtin_memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
20213 extern void inc_zone_state(struct zone *, enum zone_stat_item);
20214 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
20215 void __inc_zone_page_state(struct page *, enum zone_stat_item);
20216 void __dec_zone_page_state(struct page *, enum zone_stat_item);
20217 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
20218 void inc_zone_page_state(struct page *, enum zone_stat_item);
20219 void dec_zone_page_state(struct page *, enum zone_stat_item);
20220 extern void inc_zone_state(struct zone *, enum zone_stat_item);
20221 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
20222 extern void dec_zone_state(struct zone *, enum zone_stat_item);
20223 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
20224 void refresh_cpu_vm_stats(int);
20225 void refresh_zone_stat_thresholds(void);
20226 int calculate_pressure_threshold(struct zone *zone);
20227 int calculate_normal_threshold(struct zone *zone);
20228 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
20229 int (*calculate_pressure)(struct zone *));
20230 extern const char * const vmstat_text[];
20231 static inline __attribute__((always_inline)) __attribute__((always_inline)) void *lowmem_page_address(struct page *page)
20233 return ((void *)((unsigned long)(((phys_addr_t)(((unsigned long)((page) - mem_map) + (0UL))) << 12))+((unsigned long)(0xC0000000UL))));
20235 void *page_address(struct page *page);
20236 void set_page_address(struct page *page, void *virtual);
20237 void page_address_init(void);
20238 extern struct address_space swapper_space;
20239 static inline __attribute__((always_inline)) struct address_space *page_mapping(struct page *page)
20241 struct address_space *mapping = page->mapping;
20242 do { (void)(PageSlab(page)); } while (0);
20243 if (__builtin_constant_p((((__builtin_constant_p(PageSwapCache(page)) ? !!(PageSwapCache(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 776, }; ______r = __builtin_expect(!!(PageSwapCache(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(PageSwapCache(page)) ? !!(PageSwapCache(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 776, }; ______r = __builtin_expect(!!(PageSwapCache(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 776, }; ______r = !!(((__builtin_constant_p(PageSwapCache(page)) ? !!(PageSwapCache(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 776, }; ______r = __builtin_expect(!!(PageSwapCache(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
20244 mapping = &swapper_space;
20245 else if (__builtin_constant_p((((unsigned long)mapping & 1))) ? !!(((unsigned long)mapping & 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 778, }; ______r = !!(((unsigned long)mapping & 1)); ______f.miss_hit[______r]++; ______r; }))
20246 mapping = ((void *)0);
20249 static inline __attribute__((always_inline)) void *page_rmapping(struct page *page)
20251 return (void *)((unsigned long)page->mapping & ~(1 | 2));
20253 static inline __attribute__((always_inline)) int PageAnon(struct page *page)
20255 return ((unsigned long)page->mapping & 1) != 0;
20257 static inline __attribute__((always_inline)) unsigned long page_index(struct page *page)
20259 if (__builtin_constant_p((((__builtin_constant_p(PageSwapCache(page)) ? !!(PageSwapCache(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 800, }; ______r = __builtin_expect(!!(PageSwapCache(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(PageSwapCache(page)) ? !!(PageSwapCache(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 800, }; ______r = __builtin_expect(!!(PageSwapCache(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 800, }; ______r = !!(((__builtin_constant_p(PageSwapCache(page)) ? !!(PageSwapCache(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 800, }; ______r = __builtin_expect(!!(PageSwapCache(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
20260 return ((page)->private);
20261 return page->index;
20263 static inline __attribute__((always_inline)) void reset_page_mapcount(struct page *page)
20265 atomic_set(&(page)->_mapcount, -1);
20267 static inline __attribute__((always_inline)) int page_mapcount(struct page *page)
20269 return atomic_read(&(page)->_mapcount) + 1;
20271 static inline __attribute__((always_inline)) int page_mapped(struct page *page)
20273 return atomic_read(&(page)->_mapcount) >= 0;
20275 extern void pagefault_out_of_memory(void);
20276 extern void show_free_areas(unsigned int flags);
20277 extern bool skip_free_areas_node(unsigned int flags, int nid);
20278 int shmem_lock(struct file *file, int lock, struct user_struct *user);
20279 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
20280 int shmem_zero_setup(struct vm_area_struct *);
20281 extern int can_do_mlock(void);
20282 extern int user_shm_lock(size_t, struct user_struct *);
20283 extern void user_shm_unlock(size_t, struct user_struct *);
20284 struct zap_details {
20285 struct vm_area_struct *nonlinear_vma;
20286 struct address_space *check_mapping;
20287 unsigned long first_index;
20288 unsigned long last_index;
20290 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
20292 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
20293 unsigned long size);
20294 unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
20295 unsigned long size, struct zap_details *);
20296 unsigned long unmap_vmas(struct mmu_gather *tlb,
20297 struct vm_area_struct *start_vma, unsigned long start_addr,
20298 unsigned long end_addr, unsigned long *nr_accounted,
20299 struct zap_details *);
20301 int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
20302 int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *);
20303 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
20304 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
20305 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
20306 int (*hugetlb_entry)(pte_t *, unsigned long,
20307 unsigned long, unsigned long, struct mm_walk *);
20308 struct mm_struct *mm;
20311 int walk_page_range(unsigned long addr, unsigned long end,
20312 struct mm_walk *walk);
20313 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
20314 unsigned long end, unsigned long floor, unsigned long ceiling);
20315 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
20316 struct vm_area_struct *vma);
20317 void unmap_mapping_range(struct address_space *mapping,
20318 loff_t const holebegin, loff_t const holelen, int even_cows);
20319 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
20320 unsigned long *pfn);
20321 int follow_phys(struct vm_area_struct *vma, unsigned long address,
20322 unsigned int flags, unsigned long *prot, resource_size_t *phys);
20323 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
20324 void *buf, int len, int write);
20325 static inline __attribute__((always_inline)) void unmap_shared_mapping_range(struct address_space *mapping,
20326 loff_t const holebegin, loff_t const holelen)
20328 unmap_mapping_range(mapping, holebegin, holelen, 0);
20330 extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
20331 extern void truncate_setsize(struct inode *inode, loff_t newsize);
20332 extern int vmtruncate(struct inode *inode, loff_t offset);
20333 extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
20334 int truncate_inode_page(struct address_space *mapping, struct page *page);
20335 int generic_error_remove_page(struct address_space *mapping, struct page *page);
20336 int invalidate_inode_page(struct page *page);
20337 extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
20338 unsigned long address, unsigned int flags);
20339 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
20340 unsigned long address, unsigned int fault_flags);
20341 extern int make_pages_present(unsigned long addr, unsigned long end);
20342 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
20343 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
20344 void *buf, int len, int write);
20345 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
20346 unsigned long start, int len, unsigned int foll_flags,
20347 struct page **pages, struct vm_area_struct **vmas,
20349 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
20350 unsigned long start, int nr_pages, int write, int force,
20351 struct page **pages, struct vm_area_struct **vmas);
20352 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
20353 struct page **pages);
20354 struct page *get_dump_page(unsigned long addr);
20355 extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
20356 extern void do_invalidatepage(struct page *page, unsigned long offset);
20357 int __set_page_dirty_nobuffers(struct page *page);
20358 int __set_page_dirty_no_writeback(struct page *page);
20359 int redirty_page_for_writepage(struct writeback_control *wbc,
20360 struct page *page);
20361 void account_page_dirtied(struct page *page, struct address_space *mapping);
20362 void account_page_writeback(struct page *page);
20363 int set_page_dirty(struct page *page);
20364 int set_page_dirty_lock(struct page *page);
20365 int clear_page_dirty_for_io(struct page *page);
20366 static inline __attribute__((always_inline)) int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
20368 return vma && (vma->vm_end == addr) && (vma->vm_flags & 0x00000100);
20370 static inline __attribute__((always_inline)) int stack_guard_page_start(struct vm_area_struct *vma,
20371 unsigned long addr)
20373 return (vma->vm_flags & 0x00000100) &&
20374 (vma->vm_start == addr) &&
20375 !vma_growsdown(vma->vm_prev, addr);
20377 static inline __attribute__((always_inline)) int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
20379 return vma && (vma->vm_start == addr) && (vma->vm_flags & 0x00000000);
20381 static inline __attribute__((always_inline)) int stack_guard_page_end(struct vm_area_struct *vma,
20382 unsigned long addr)
20384 return (vma->vm_flags & 0x00000000) &&
20385 (vma->vm_end == addr) &&
20386 !vma_growsup(vma->vm_next, addr);
20388 extern unsigned long move_page_tables(struct vm_area_struct *vma,
20389 unsigned long old_addr, struct vm_area_struct *new_vma,
20390 unsigned long new_addr, unsigned long len);
20391 extern unsigned long do_mremap(unsigned long addr,
20392 unsigned long old_len, unsigned long new_len,
20393 unsigned long flags, unsigned long new_addr);
20394 extern int mprotect_fixup(struct vm_area_struct *vma,
20395 struct vm_area_struct **pprev, unsigned long start,
20396 unsigned long end, unsigned long newflags);
20397 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
20398 struct page **pages);
20399 static inline __attribute__((always_inline)) void set_mm_counter(struct mm_struct *mm, int member, long value)
20401 atomic_long_set(&mm->rss_stat.count[member], value);
20403 static inline __attribute__((always_inline)) unsigned long get_mm_counter(struct mm_struct *mm, int member)
20405 return atomic_long_read(&mm->rss_stat.count[member]);
20407 static inline __attribute__((always_inline)) void add_mm_counter(struct mm_struct *mm, int member, long value)
20409 atomic_long_add(value, &mm->rss_stat.count[member]);
20411 static inline __attribute__((always_inline)) void inc_mm_counter(struct mm_struct *mm, int member)
20413 atomic_long_inc(&mm->rss_stat.count[member]);
20415 static inline __attribute__((always_inline)) void dec_mm_counter(struct mm_struct *mm, int member)
20417 atomic_long_dec(&mm->rss_stat.count[member]);
20419 static inline __attribute__((always_inline)) unsigned long get_mm_rss(struct mm_struct *mm)
20421 return get_mm_counter(mm, MM_FILEPAGES) +
20422 get_mm_counter(mm, MM_ANONPAGES);
20424 static inline __attribute__((always_inline)) unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
20426 return ({ typeof(mm->hiwater_rss) _max1 = (mm->hiwater_rss); typeof(get_mm_rss(mm)) _max2 = (get_mm_rss(mm)); (void) (&_max1 == &_max2); _max1 > _max2 ? _max1 : _max2; });
20428 static inline __attribute__((always_inline)) unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
20430 return ({ typeof(mm->hiwater_vm) _max1 = (mm->hiwater_vm); typeof(mm->total_vm) _max2 = (mm->total_vm); (void) (&_max1 == &_max2); _max1 > _max2 ? _max1 : _max2; });
20432 static inline __attribute__((always_inline)) void update_hiwater_rss(struct mm_struct *mm)
20434 unsigned long _rss = get_mm_rss(mm);
20435 if (__builtin_constant_p((((mm)->hiwater_rss < _rss))) ? !!(((mm)->hiwater_rss < _rss)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1107, }; ______r = !!(((mm)->hiwater_rss < _rss)); ______f.miss_hit[______r]++; ______r; }))
20436 (mm)->hiwater_rss = _rss;
20438 static inline __attribute__((always_inline)) void update_hiwater_vm(struct mm_struct *mm)
20440 if (__builtin_constant_p(((mm->hiwater_vm < mm->total_vm))) ? !!((mm->hiwater_vm < mm->total_vm)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1113, }; ______r = !!((mm->hiwater_vm < mm->total_vm)); ______f.miss_hit[______r]++; ______r; }))
20441 mm->hiwater_vm = mm->total_vm;
20443 static inline __attribute__((always_inline)) void setmax_mm_hiwater_rss(unsigned long *maxrss,
20444 struct mm_struct *mm)
20446 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
20447 if (__builtin_constant_p(((*maxrss < hiwater_rss))) ? !!((*maxrss < hiwater_rss)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1122, }; ______r = !!((*maxrss < hiwater_rss)); ______f.miss_hit[______r]++; ______r; }))
20448 *maxrss = hiwater_rss;
20450 static inline __attribute__((always_inline)) void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
20453 struct shrink_control {
20455 unsigned long nr_to_scan;
20458 int (*shrink)(struct shrinker *, struct shrink_control *sc);
20460 struct list_head list;
20463 extern void register_shrinker(struct shrinker *);
20464 extern void unregister_shrinker(struct shrinker *);
20465 int vma_wants_writenotify(struct vm_area_struct *vma);
20466 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
20468 static inline __attribute__((always_inline)) pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
20472 (ptep = __get_locked_pte(mm, addr, ptl));
20475 static inline __attribute__((always_inline)) int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
20476 unsigned long address)
20480 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
20481 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
20482 pmd_t *pmd, unsigned long address);
20483 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
20484 static inline __attribute__((always_inline)) pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
20486 return ((__builtin_constant_p(pgd_none(*pgd)) ? !!(pgd_none(*pgd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1215, }; ______r = __builtin_expect(!!(pgd_none(*pgd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })) && __pud_alloc(mm, pgd, address))?
20487 ((void *)0): pud_offset(pgd, address);
20489 static inline __attribute__((always_inline)) pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
20491 return ((__builtin_constant_p(pud_none(*pud)) ? !!(pud_none(*pud)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1221, }; ______r = __builtin_expect(!!(pud_none(*pud)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })) && __pmd_alloc(mm, pud, address))?
20492 ((void *)0): pmd_offset(pud, address);
20494 static inline __attribute__((always_inline)) void pgtable_page_ctor(struct page *page)
20497 inc_zone_page_state(page, NR_PAGETABLE);
20499 static inline __attribute__((always_inline)) void pgtable_page_dtor(struct page *page)
20502 dec_zone_page_state(page, NR_PAGETABLE);
20504 extern void free_area_init(unsigned long * zones_size);
20505 extern void free_area_init_node(int nid, unsigned long * zones_size,
20506 unsigned long zone_start_pfn, unsigned long *zholes_size);
20507 extern void free_area_init_nodes(unsigned long *max_zone_pfn);
20508 extern void add_active_range(unsigned int nid, unsigned long start_pfn,
20509 unsigned long end_pfn);
20510 extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
20511 unsigned long end_pfn);
20512 extern void remove_all_active_ranges(void);
20513 void sort_node_map(void);
20514 unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
20515 unsigned long end_pfn);
20516 extern unsigned long absent_pages_in_range(unsigned long start_pfn,
20517 unsigned long end_pfn);
20518 extern void get_pfn_range_for_nid(unsigned int nid,
20519 unsigned long *start_pfn, unsigned long *end_pfn);
20520 extern unsigned long find_min_pfn_with_active_regions(void);
20521 extern void free_bootmem_with_active_regions(int nid,
20522 unsigned long max_low_pfn);
20523 int add_from_early_node_map(struct range *range, int az,
20524 int nr_range, int nid);
20525 u64 __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) find_memory_core_early(int nid, u64 size, u64 align,
20526 u64 goal, u64 limit);
20527 typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
20528 extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
20529 extern void sparse_memory_present_with_active_regions(int nid);
20530 extern int __attribute__ ((__section__(".meminit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) early_pfn_to_nid(unsigned long pfn);
20531 extern void set_dma_reserve(unsigned long new_dma_reserve);
20532 extern void memmap_init_zone(unsigned long, int, unsigned long,
20533 unsigned long, enum memmap_context);
20534 extern void setup_per_zone_wmarks(void);
20535 extern int __attribute__ ((__section__(".meminit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) init_per_zone_wmark_min(void);
20536 extern void mem_init(void);
20537 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) mmap_init(void);
20538 extern void show_mem(unsigned int flags);
20539 extern void si_meminfo(struct sysinfo * val);
20540 extern void si_meminfo_node(struct sysinfo *val, int nid);
20541 extern int after_bootmem;
20542 extern void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
20543 extern void setup_per_cpu_pageset(void);
20544 extern void zone_pcp_update(struct zone *zone);
20545 extern atomic_long_t mmap_pages_allocated;
20546 extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
20547 void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
20548 void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
20549 void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *);
20550 struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
20551 struct prio_tree_iter *iter);
20552 static inline __attribute__((always_inline)) void vma_nonlinear_insert(struct vm_area_struct *vma,
20553 struct list_head *list)
20555 vma->shared.vm_set.parent = ((void *)0);
20556 list_add_tail(&vma->shared.vm_set.list, list);
20558 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
20559 extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
20560 unsigned long end, unsigned long pgoff, struct vm_area_struct *insert);
20561 extern struct vm_area_struct *vma_merge(struct mm_struct *,
20562 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
20563 unsigned long vm_flags, struct anon_vma *, struct file *, unsigned long,
20564 struct mempolicy *);
20565 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
20566 extern int split_vma(struct mm_struct *,
20567 struct vm_area_struct *, unsigned long addr, int new_below);
20568 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
20569 extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
20570 struct rb_node **, struct rb_node *);
20571 extern void unlink_file_vma(struct vm_area_struct *);
20572 extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
20573 unsigned long addr, unsigned long len, unsigned long pgoff);
20574 extern void exit_mmap(struct mm_struct *);
20575 extern int mm_take_all_locks(struct mm_struct *mm);
20576 extern void mm_drop_all_locks(struct mm_struct *mm);
20577 extern void added_exe_file_vma(struct mm_struct *mm);
20578 extern void removed_exe_file_vma(struct mm_struct *mm);
20579 extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
20580 extern struct file *get_mm_exe_file(struct mm_struct *mm);
20581 extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
20582 extern int install_special_mapping(struct mm_struct *mm,
20583 unsigned long addr, unsigned long len,
20584 unsigned long flags, struct page **pages);
20585 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
20586 extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
20587 unsigned long len, unsigned long prot,
20588 unsigned long flag, unsigned long pgoff);
20589 extern unsigned long mmap_region(struct file *file, unsigned long addr,
20590 unsigned long len, unsigned long flags,
20591 vm_flags_t vm_flags, unsigned long pgoff);
20592 static inline __attribute__((always_inline)) unsigned long do_mmap(struct file *file, unsigned long addr,
20593 unsigned long len, unsigned long prot,
20594 unsigned long flag, unsigned long offset)
20596 unsigned long ret = -22;
20597 if (__builtin_constant_p((((offset + ((((len)) + ((typeof((len)))((((1UL) << 12))) - 1)) & ~((typeof((len)))((((1UL) << 12))) - 1))) < offset))) ? !!(((offset + ((((len)) + ((typeof((len)))((((1UL) << 12))) - 1)) & ~((typeof((len)))((((1UL) << 12))) - 1))) < offset)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1446, }; ______r = !!(((offset + ((((len)) + ((typeof((len)))((((1UL) << 12))) - 1)) & ~((typeof((len)))((((1UL) << 12))) - 1))) < offset)); ______f.miss_hit[______r]++; ______r; }))
20599 if (__builtin_constant_p(((!(offset & ~(~(((1UL) << 12)-1)))))) ? !!((!(offset & ~(~(((1UL) << 12)-1))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1448, }; ______r = !!((!(offset & ~(~(((1UL) << 12)-1))))); ______f.miss_hit[______r]++; ______r; }))
20600 ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> 12);
20604 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
20605 extern unsigned long do_brk(unsigned long, unsigned long);
20606 extern unsigned long page_unuse(struct page *);
20607 extern void truncate_inode_pages(struct address_space *, loff_t);
20608 extern void truncate_inode_pages_range(struct address_space *,
20609 loff_t lstart, loff_t lend);
20610 extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
20611 int write_one_page(struct page *page, int wait);
20612 void task_dirty_inc(struct task_struct *tsk);
20613 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
20614 unsigned long offset, unsigned long nr_to_read);
20615 void page_cache_sync_readahead(struct address_space *mapping,
20616 struct file_ra_state *ra,
20618 unsigned long offset,
20619 unsigned long size);
20620 void page_cache_async_readahead(struct address_space *mapping,
20621 struct file_ra_state *ra,
20624 unsigned long offset,
20625 unsigned long size);
20626 unsigned long max_sane_readahead(unsigned long nr);
20627 unsigned long ra_submit(struct file_ra_state *ra,
20628 struct address_space *mapping,
20629 struct file *filp);
20630 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
20631 extern int expand_downwards(struct vm_area_struct *vma,
20632 unsigned long address);
20633 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
20634 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
20635 struct vm_area_struct **pprev);
20636 static inline __attribute__((always_inline)) struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
20638 struct vm_area_struct * vma = find_vma(mm,start_addr);
20639 if (__builtin_constant_p(((vma && end_addr <= vma->vm_start))) ? !!((vma && end_addr <= vma->vm_start)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1519, }; ______r = !!((vma && end_addr <= vma->vm_start)); ______f.miss_hit[______r]++; ______r; }))
20643 static inline __attribute__((always_inline)) unsigned long vma_pages(struct vm_area_struct *vma)
20645 return (vma->vm_end - vma->vm_start) >> 12;
20647 pgprot_t vm_get_page_prot(unsigned long vm_flags);
20648 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
20649 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
20650 unsigned long pfn, unsigned long size, pgprot_t);
20651 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
20652 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
20653 unsigned long pfn);
20654 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
20655 unsigned long pfn);
20656 struct page *follow_page(struct vm_area_struct *, unsigned long address,
20657 unsigned int foll_flags);
20658 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
20660 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
20661 unsigned long size, pte_fn_t fn, void *data);
20662 void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
20663 static inline __attribute__((always_inline)) void
20664 kernel_map_pages(struct page *page, int numpages, int enable) {}
20665 static inline __attribute__((always_inline)) void enable_debug_pagealloc(void)
20668 static inline __attribute__((always_inline)) bool kernel_page_present(struct page *page) { return true; }
20669 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
20670 int in_gate_area_no_mm(unsigned long addr);
20671 int in_gate_area(struct mm_struct *mm, unsigned long addr);
20672 int drop_caches_sysctl_handler(struct ctl_table *, int,
20673 void *, size_t *, loff_t *);
20674 unsigned long shrink_slab(struct shrink_control *shrink,
20675 unsigned long nr_pages_scanned,
20676 unsigned long lru_pages);
20677 extern int randomize_va_space;
20678 const char * arch_vma_name(struct vm_area_struct *vma);
20679 void print_vma_addr(char *prefix, unsigned long rip);
20680 void sparse_mem_maps_populate_node(struct page **map_map,
20681 unsigned long pnum_begin,
20682 unsigned long pnum_end,
20683 unsigned long map_count,
20685 struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
20686 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
20687 pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
20688 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
20689 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
20690 void *vmemmap_alloc_block(unsigned long size, int node);
20691 void *vmemmap_alloc_block_buf(unsigned long size, int node);
20692 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
20693 int vmemmap_populate_basepages(struct page *start_page,
20694 unsigned long pages, int node);
20695 int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
20696 void vmemmap_populate_print_last(void);
20698 MF_COUNT_INCREASED = 1 << 0,
20700 extern void memory_failure(unsigned long pfn, int trapno);
20701 extern int __memory_failure(unsigned long pfn, int trapno, int flags);
20702 extern int unpoison_memory(unsigned long pfn);
20703 extern int sysctl_memory_failure_early_kill;
20704 extern int sysctl_memory_failure_recovery;
20705 extern void shake_page(struct page *p, int access);
20706 extern atomic_long_t mce_bad_pages;
20707 extern int soft_offline_page(struct page *page, int flags);
20708 extern void dump_page(struct page *page);
20709 extern void clear_huge_page(struct page *page,
20710 unsigned long addr,
20711 unsigned int pages_per_huge_page);
20712 extern void copy_user_huge_page(struct page *dst, struct page *src,
20713 unsigned long addr, struct vm_area_struct *vma,
20714 unsigned int pages_per_huge_page);
20715 static inline __attribute__((always_inline)) unsigned long get_page_memtype(struct page *pg)
20717 unsigned long pg_flags = pg->flags & (1UL << PG_uncached | 1UL << PG_arch_1);
20718 if (__builtin_constant_p(((pg_flags == 0))) ? !!((pg_flags == 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/cacheflush.h", .line = 28, }; ______r = !!((pg_flags == 0)); ______f.miss_hit[______r]++; ______r; }))
20720 else if (__builtin_constant_p(((pg_flags == (1UL << PG_arch_1)))) ? !!((pg_flags == (1UL << PG_arch_1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/cacheflush.h", .line = 30, }; ______r = !!((pg_flags == (1UL << PG_arch_1))); ______f.miss_hit[______r]++; ______r; }))
20721 return ((((pteval_t)(1)) << 3));
20722 else if (__builtin_constant_p(((pg_flags == (1UL << PG_uncached)))) ? !!((pg_flags == (1UL << PG_uncached))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/cacheflush.h", .line = 32, }; ______r = !!((pg_flags == (1UL << PG_uncached))); ______f.miss_hit[______r]++; ______r; }))
20723 return ((((pteval_t)(1)) << 4));
20727 static inline __attribute__((always_inline)) void set_page_memtype(struct page *pg, unsigned long memtype)
20729 unsigned long memtype_flags = 0;
20730 unsigned long old_flags;
20731 unsigned long new_flags;
20733 case ((((pteval_t)(1)) << 3)):
20734 memtype_flags = (1UL << PG_arch_1);
20736 case ((((pteval_t)(1)) << 4)):
20737 memtype_flags = (1UL << PG_uncached);
20740 memtype_flags = (1UL << PG_uncached | 1UL << PG_arch_1);
20744 old_flags = pg->flags;
20745 new_flags = (old_flags & (~(1UL << PG_uncached | 1UL << PG_arch_1))) | memtype_flags;
20746 } while (({ __typeof__(*(((&pg->flags)))) __ret; __typeof__(*(((&pg->flags)))) __old = (((old_flags))); __typeof__(*(((&pg->flags)))) __new = (((new_flags))); switch ((sizeof(*&pg->flags))) { case 1: { volatile u8 *__ptr = (volatile u8 *)(((&pg->flags))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgb %2,%1" : "=a" (__ret), "+m" (*__ptr) : "q" (__new), "0" (__old) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)(((&pg->flags))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgw %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)(((&pg->flags))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgl %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } default: __cmpxchg_wrong_size(); } __ret; }) != old_flags);
20748 int _set_memory_uc(unsigned long addr, int numpages);
20749 int _set_memory_wc(unsigned long addr, int numpages);
20750 int _set_memory_wb(unsigned long addr, int numpages);
20751 int set_memory_uc(unsigned long addr, int numpages);
20752 int set_memory_wc(unsigned long addr, int numpages);
20753 int set_memory_wb(unsigned long addr, int numpages);
20754 int set_memory_x(unsigned long addr, int numpages);
20755 int set_memory_nx(unsigned long addr, int numpages);
20756 int set_memory_ro(unsigned long addr, int numpages);
20757 int set_memory_rw(unsigned long addr, int numpages);
20758 int set_memory_np(unsigned long addr, int numpages);
20759 int set_memory_4k(unsigned long addr, int numpages);
20760 int set_memory_array_uc(unsigned long *addr, int addrinarray);
20761 int set_memory_array_wc(unsigned long *addr, int addrinarray);
20762 int set_memory_array_wb(unsigned long *addr, int addrinarray);
20763 int set_pages_array_uc(struct page **pages, int addrinarray);
20764 int set_pages_array_wc(struct page **pages, int addrinarray);
20765 int set_pages_array_wb(struct page **pages, int addrinarray);
20766 int set_pages_uc(struct page *page, int numpages);
20767 int set_pages_wb(struct page *page, int numpages);
20768 int set_pages_x(struct page *page, int numpages);
20769 int set_pages_nx(struct page *page, int numpages);
20770 int set_pages_ro(struct page *page, int numpages);
20771 int set_pages_rw(struct page *page, int numpages);
20772 void clflush_cache_range(void *addr, unsigned int size);
20773 void mark_rodata_ro(void);
20774 extern const int rodata_test_data;
20775 extern int kernel_set_to_readonly;
20776 void set_kernel_text_rw(void);
20777 void set_kernel_text_ro(void);
20778 static inline __attribute__((always_inline)) int rodata_test(void)
20782 static inline __attribute__((always_inline)) void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
20785 static inline __attribute__((always_inline)) void flush_kernel_dcache_page(struct page *page)
20788 static inline __attribute__((always_inline)) void flush_kernel_vmap_range(void *vaddr, int size)
20791 static inline __attribute__((always_inline)) void invalidate_kernel_vmap_range(void *vaddr, int size)
20794 static inline __attribute__((always_inline)) void __native_flush_tlb(void)
20796 native_write_cr3(native_read_cr3());
20798 static inline __attribute__((always_inline)) void __native_flush_tlb_global(void)
20800 unsigned long flags;
20802 do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0);
20803 cr4 = native_read_cr4();
20804 native_write_cr4(cr4 & ~0x00000080);
20805 native_write_cr4(cr4);
20806 do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0);
20808 static inline __attribute__((always_inline)) void __native_flush_tlb_single(unsigned long addr)
20810 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
20812 static inline __attribute__((always_inline)) void __flush_tlb_all(void)
20814 if (__builtin_constant_p((((__builtin_constant_p((0*32+13)) && ( ((((0*32+13))>>5)==0 && (1UL<<(((0*32+13))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+13))>>5)==1 && (1UL<<(((0*32+13))&31) & (0|0))) || ((((0*32+13))>>5)==2 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==3 && (1UL<<(((0*32+13))&31) & (0))) || ((((0*32+13))>>5)==4 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==5 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==6 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==7 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==8 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==9 && (1UL<<(((0*32+13))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+13))) ? constant_test_bit(((0*32+13)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+13)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))))) ? !!(((__builtin_constant_p((0*32+13)) && ( ((((0*32+13))>>5)==0 && (1UL<<(((0*32+13))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+13))>>5)==1 && (1UL<<(((0*32+13))&31) & (0|0))) || ((((0*32+13))>>5)==2 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==3 && (1UL<<(((0*32+13))&31) & (0))) || ((((0*32+13))>>5)==4 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==5 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==6 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==7 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==8 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==9 && (1UL<<(((0*32+13))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+13))) ? constant_test_bit(((0*32+13)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+13)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/tlbflush.h", .line = 51, }; ______r = !!(((__builtin_constant_p((0*32+13)) && ( ((((0*32+13))>>5)==0 && (1UL<<(((0*32+13))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+13))>>5)==1 && (1UL<<(((0*32+13))&31) & (0|0))) || ((((0*32+13))>>5)==2 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==3 && (1UL<<(((0*32+13))&31) & (0))) || ((((0*32+13))>>5)==4 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==5 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==6 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==7 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==8 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==9 && (1UL<<(((0*32+13))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+13))) ? constant_test_bit(((0*32+13)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+13)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; }))
20815 __flush_tlb_global();
20819 static inline __attribute__((always_inline)) void __flush_tlb_one(unsigned long addr)
20821 if (__builtin_constant_p(((1))) ? !!((1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/tlbflush.h", .line = 59, }; ______r = !!((1)); ______f.miss_hit[______r]++; ______r; }))
20822 __flush_tlb_single(addr);
20826 extern void flush_tlb_all(void);
20827 extern void flush_tlb_current_task(void);
20828 extern void flush_tlb_mm(struct mm_struct *);
20829 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
20830 static inline __attribute__((always_inline)) void flush_tlb_range(struct vm_area_struct *vma,
20831 unsigned long start, unsigned long end)
20833 flush_tlb_mm(vma->vm_mm);
20835 void native_flush_tlb_others(const struct cpumask *cpumask,
20836 struct mm_struct *mm, unsigned long va);
20838 struct mm_struct *active_mm;
20841 extern __attribute__((section(".data..percpu" ""))) __typeof__(struct tlb_state) cpu_tlbstate __attribute__((__aligned__((1 << (6)))));
20842 static inline __attribute__((always_inline)) void reset_lazy_tlbstate(void)
20844 do { typedef typeof(cpu_tlbstate.state) pto_T__; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/tlbflush.h", .line = 159, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pto_T__ pto_tmp__; pto_tmp__ = (0); (void)pto_tmp__; } switch (sizeof(cpu_tlbstate.state)) { case 1: asm("mov" "b %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.state) : "qi" ((pto_T__)(0))); break; case 2: asm("mov" "w %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.state) : "ri" ((pto_T__)(0))); break; case 4: asm("mov" "l %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.state) : "ri" ((pto_T__)(0))); break; case 8: asm("mov" "q %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.state) : "re" ((pto_T__)(0))); break; default: __bad_percpu_size(); } } while (0);
20845 do { typedef typeof(cpu_tlbstate.active_mm) pto_T__; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/tlbflush.h", .line = 160, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pto_T__ pto_tmp__; pto_tmp__ = (&init_mm); (void)pto_tmp__; } switch (sizeof(cpu_tlbstate.active_mm)) { case 1: asm("mov" "b %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.active_mm) : "qi" ((pto_T__)(&init_mm))); break; case 2: asm("mov" "w %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.active_mm) : "ri" ((pto_T__)(&init_mm))); break; case 4: asm("mov" "l %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.active_mm) : "ri" ((pto_T__)(&init_mm))); break; case 8: asm("mov" "q %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.active_mm) : "re" ((pto_T__)(&init_mm))); break; default: __bad_percpu_size(); } } while (0);
20847 static inline __attribute__((always_inline)) void flush_tlb_kernel_range(unsigned long start,
20852 extern unsigned long highstart_pfn, highend_pfn;
20853 extern void *kmap_high(struct page *page);
20854 extern void kunmap_high(struct page *page);
20855 void *kmap(struct page *page);
20856 void kunmap(struct page *page);
20857 void *kmap_atomic_prot(struct page *page, pgprot_t prot);
20858 void *__kmap_atomic(struct page *page);
20859 void __kunmap_atomic(void *kvaddr);
20860 void *kmap_atomic_pfn(unsigned long pfn);
20861 void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
20862 struct page *kmap_atomic_to_page(void *ptr);
20863 extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
20864 unsigned long end_pfn);
20865 unsigned int nr_free_highpages(void);
20866 extern unsigned long totalhigh_pages;
20867 void kmap_flush_unused(void);
20868 extern __attribute__((section(".data..percpu" ""))) __typeof__(int) __kmap_atomic_idx;
20869 static inline __attribute__((always_inline)) int kmap_atomic_idx_push(void)
20871 int idx = ({ typeof(__kmap_atomic_idx) pscr2_ret__; do { const void *__vpp_verify = (typeof(&(__kmap_atomic_idx)))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(__kmap_atomic_idx)) { case 1: pscr2_ret__ = ({ typeof(__kmap_atomic_idx) paro_ret__ = 1; switch (sizeof(__kmap_atomic_idx)) { case 1: asm("xaddb %0, ""%%""fs"":" "%P" "1" : "+q" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 2: asm("xaddw %0, ""%%""fs"":" "%P" "1" : "+r" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 4: asm("xaddl %0, ""%%""fs"":" "%P" "1" : "+r" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 8: asm("xaddq %0, ""%%""fs"":" "%P" "1" : "+re" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; default: __bad_percpu_size(); } paro_ret__ += 1; paro_ret__; }); break; case 2: pscr2_ret__ = ({ typeof(__kmap_atomic_idx) paro_ret__ = 1; switch (sizeof(__kmap_atomic_idx)) { case 1: asm("xaddb %0, ""%%""fs"":" "%P" "1" : "+q" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 2: asm("xaddw %0, ""%%""fs"":" "%P" "1" : "+r" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 4: asm("xaddl %0, ""%%""fs"":" "%P" "1" : "+r" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 8: asm("xaddq %0, ""%%""fs"":" "%P" "1" : "+re" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; default: __bad_percpu_size(); } paro_ret__ += 1; paro_ret__; }); break; case 4: pscr2_ret__ = ({ typeof(__kmap_atomic_idx) paro_ret__ = 1; switch (sizeof(__kmap_atomic_idx)) { case 1: asm("xaddb %0, ""%%""fs"":" "%P" "1" : "+q" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 2: asm("xaddw %0, ""%%""fs"":" "%P" "1" : "+r" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 4: asm("xaddl %0, ""%%""fs"":" "%P" "1" : "+r" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 8: asm("xaddq %0, ""%%""fs"":" "%P" "1" : "+re" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; default: __bad_percpu_size(); } paro_ret__ += 1; paro_ret__; }); break; case 8: pscr2_ret__ = ({ typeof(__kmap_atomic_idx) ret__; do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); do { do { const void *__vpp_verify = (typeof(&((__kmap_atomic_idx))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((__kmap_atomic_idx))) { case 1: do { typedef typeof(((__kmap_atomic_idx))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof(((__kmap_atomic_idx)))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof(((__kmap_atomic_idx))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof(((__kmap_atomic_idx)))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof(((__kmap_atomic_idx))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof(((__kmap_atomic_idx)))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&(((__kmap_atomic_idx)))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&(((__kmap_atomic_idx))))); (typeof(*(&(((__kmap_atomic_idx))))) *)tcp_ptr__; }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); ret__ = ({ typeof((__kmap_atomic_idx)) pscr_ret__; do { const void *__vpp_verify = (typeof(&((__kmap_atomic_idx))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((__kmap_atomic_idx))) { case 1: pscr_ret__ = ({ typeof(((__kmap_atomic_idx))) pfo_ret__; switch (sizeof(((__kmap_atomic_idx)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 2: pscr_ret__ = ({ typeof(((__kmap_atomic_idx))) pfo_ret__; switch (sizeof(((__kmap_atomic_idx)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 4: pscr_ret__ = ({ typeof(((__kmap_atomic_idx))) pfo_ret__; switch (sizeof(((__kmap_atomic_idx)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 8: pscr_ret__ = (*({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((__kmap_atomic_idx))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((__kmap_atomic_idx)))); (typeof(*(&((__kmap_atomic_idx)))) *)tcp_ptr__; }));break; default: __bad_size_call_parameter();break; } pscr_ret__; }); do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); ret__; }); break; default: __bad_size_call_parameter(); break; } pscr2_ret__; }) - 1;
20874 static inline __attribute__((always_inline)) int kmap_atomic_idx(void)
20876 return ({ typeof((__kmap_atomic_idx)) pscr_ret__; do { const void *__vpp_verify = (typeof(&((__kmap_atomic_idx))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((__kmap_atomic_idx))) { case 1: pscr_ret__ = ({ typeof(((__kmap_atomic_idx))) pfo_ret__; switch (sizeof(((__kmap_atomic_idx)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 2: pscr_ret__ = ({ typeof(((__kmap_atomic_idx))) pfo_ret__; switch (sizeof(((__kmap_atomic_idx)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 4: pscr_ret__ = ({ typeof(((__kmap_atomic_idx))) pfo_ret__; switch (sizeof(((__kmap_atomic_idx)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 8: pscr_ret__ = (*({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((__kmap_atomic_idx))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((__kmap_atomic_idx)))); (typeof(*(&((__kmap_atomic_idx)))) *)tcp_ptr__; }));break; default: __bad_size_call_parameter();break; } pscr_ret__; }) - 1;
20878 static inline __attribute__((always_inline)) void kmap_atomic_idx_pop(void)
20880 do { do { const void *__vpp_verify = (typeof(&((((__kmap_atomic_idx))))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((((__kmap_atomic_idx))))) { case 1: do { typedef typeof(((((__kmap_atomic_idx))))) pao_T__; const int pao_ID__ = (__builtin_constant_p((-(1))) && (((-(1))) == 1 || ((-(1))) == -1)) ? ((-(1))) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((-(1))); (void)pao_tmp__; } switch (sizeof(((((__kmap_atomic_idx)))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "qi" ((pao_T__)((-(1))))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "ri" ((pao_T__)((-(1))))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "ri" ((pao_T__)((-(1))))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "re" ((pao_T__)((-(1))))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof(((((__kmap_atomic_idx))))) pao_T__; const int pao_ID__ = (__builtin_constant_p((-(1))) && (((-(1))) == 1 || ((-(1))) == -1)) ? ((-(1))) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((-(1))); (void)pao_tmp__; } switch (sizeof(((((__kmap_atomic_idx)))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "qi" ((pao_T__)((-(1))))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "ri" ((pao_T__)((-(1))))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "ri" ((pao_T__)((-(1))))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "re" ((pao_T__)((-(1))))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof(((((__kmap_atomic_idx))))) pao_T__; const int pao_ID__ = (__builtin_constant_p((-(1))) && (((-(1))) == 1 || ((-(1))) == -1)) ? ((-(1))) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((-(1))); (void)pao_tmp__; } switch (sizeof(((((__kmap_atomic_idx)))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "qi" ((pao_T__)((-(1))))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "ri" ((pao_T__)((-(1))))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "ri" ((pao_T__)((-(1))))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "re" ((pao_T__)((-(1))))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&(((((__kmap_atomic_idx)))))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&(((((__kmap_atomic_idx))))))); (typeof(*(&(((((__kmap_atomic_idx))))))) *)tcp_ptr__; }) += ((-(1))); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
20882 static inline __attribute__((always_inline)) void clear_user_highpage(struct page *page, unsigned long vaddr)
20884 void *addr = __kmap_atomic(page);
20885 clear_user_page(addr, vaddr, page);
20886 do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((addr)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((addr)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((addr)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 132, }; ______r = !!((__builtin_types_compatible_p(typeof((addr)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(addr); } while (0);
20888 static inline __attribute__((always_inline)) struct page *
20889 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
20890 unsigned long vaddr)
20892 return alloc_pages_node(numa_node_id(), ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u) | (( gfp_t)0x20000u) | (( gfp_t)0x02u)) | (( gfp_t)0x8000u) | (( gfp_t)0x08u), 0);
20894 static inline __attribute__((always_inline)) void clear_highpage(struct page *page)
20896 void *kaddr = __kmap_atomic(page);
20898 do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 185, }; ______r = !!((__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(kaddr); } while (0);
20900 static inline __attribute__((always_inline)) void zero_user_segments(struct page *page,
20901 unsigned start1, unsigned end1,
20902 unsigned start2, unsigned end2)
20904 void *kaddr = __kmap_atomic(page);
20905 do { if (__builtin_constant_p((((__builtin_constant_p(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)) ? !!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 194, }; ______r = __builtin_expect(!!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)) ? !!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 194, }; ______r = __builtin_expect(!!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 194, }; ______r = !!(((__builtin_constant_p(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)) ? !!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 194, }; ______r = __builtin_expect(!!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/highmem.h"), "i" (194), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
20906 if (__builtin_constant_p(((end1 > start1))) ? !!((end1 > start1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 196, }; ______r = !!((end1 > start1)); ______f.miss_hit[______r]++; ______r; }))
20907 __builtin_memset(kaddr + start1, 0, end1 - start1);
20908 if (__builtin_constant_p(((end2 > start2))) ? !!((end2 > start2)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 199, }; ______r = !!((end2 > start2)); ______f.miss_hit[______r]++; ______r; }))
20909 __builtin_memset(kaddr + start2, 0, end2 - start2);
20910 do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 202, }; ______r = !!((__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(kaddr); } while (0);
20913 static inline __attribute__((always_inline)) void zero_user_segment(struct page *page,
20914 unsigned start, unsigned end)
20916 zero_user_segments(page, start, end, 0, 0);
20918 static inline __attribute__((always_inline)) void zero_user(struct page *page,
20919 unsigned start, unsigned size)
20921 zero_user_segments(page, start, start + size, 0, 0);
20923 static inline __attribute__((always_inline)) void __attribute__((deprecated)) memclear_highpage_flush(struct page *page,
20924 unsigned int offset, unsigned int size)
20926 zero_user(page, offset, size);
20928 static inline __attribute__((always_inline)) void copy_user_highpage(struct page *to, struct page *from,
20929 unsigned long vaddr, struct vm_area_struct *vma)
20932 vfrom = __kmap_atomic(from);
20933 vto = __kmap_atomic(to);
20934 copy_user_page(vto, vfrom, vaddr, to);
20935 do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((vto)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((vto)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((vto)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 234, }; ______r = !!((__builtin_types_compatible_p(typeof((vto)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(vto); } while (0);
20936 do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 235, }; ______r = !!((__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(vfrom); } while (0);
20938 static inline __attribute__((always_inline)) void copy_highpage(struct page *to, struct page *from)
20941 vfrom = __kmap_atomic(from);
20942 vto = __kmap_atomic(to);
20943 copy_page(vto, vfrom);
20944 do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((vto)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((vto)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((vto)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 247, }; ______r = !!((__builtin_types_compatible_p(typeof((vto)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(vto); } while (0);
20945 do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 248, }; ______r = !!((__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(vfrom); } while (0);
20947 struct scatterlist {
20948 unsigned long page_link;
20949 unsigned int offset;
20950 unsigned int length;
20951 dma_addr_t dma_address;
20952 unsigned int dma_length;
20955 struct scatterlist *sgl;
20956 unsigned int nents;
20957 unsigned int orig_nents;
20959 static inline __attribute__((always_inline)) void sg_assign_page(struct scatterlist *sg, struct page *page)
20961 unsigned long page_link = sg->page_link & 0x3;
20962 do { if (__builtin_constant_p((((__builtin_constant_p((unsigned long) page & 0x03) ? !!((unsigned long) page & 0x03) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/scatterlist.h", .line = 63, }; ______r = __builtin_expect(!!((unsigned long) page & 0x03), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned long) page & 0x03) ? !!((unsigned long) page & 0x03) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/scatterlist.h", .line = 63, }; ______r = __builtin_expect(!!((unsigned long) page & 0x03), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/scatterlist.h", .line = 63, }; ______r = !!(((__builtin_constant_p((unsigned long) page & 0x03) ? !!((unsigned long) page & 0x03) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/scatterlist.h", .line = 63, }; ______r = __builtin_expect(!!((unsigned long) page & 0x03), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/scatterlist.h"), "i" (63), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
20963 sg->page_link = page_link | (unsigned long) page;
20965 static inline __attribute__((always_inline)) void sg_set_page(struct scatterlist *sg, struct page *page,
20966 unsigned int len, unsigned int offset)
20968 sg_assign_page(sg, page);
20969 sg->offset = offset;
20972 static inline __attribute__((always_inline)) struct page *sg_page(struct scatterlist *sg)
20974 return (struct page *)((sg)->page_link & ~0x3);
20976 static inline __attribute__((always_inline)) void sg_set_buf(struct scatterlist *sg, const void *buf,
20977 unsigned int buflen)
20979 sg_set_page(sg, (mem_map + (((((unsigned long)(buf)) - ((unsigned long)(0xC0000000UL))) >> 12) - (0UL))), buflen, ((unsigned long)(buf) & ~(~(((1UL) << 12)-1))));
20981 static inline __attribute__((always_inline)) void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
20982 struct scatterlist *sgl)
20984 prv[prv_nents - 1].offset = 0;
20985 prv[prv_nents - 1].length = 0;
20986 prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02;
20988 static inline __attribute__((always_inline)) void sg_mark_end(struct scatterlist *sg)
20990 sg->page_link |= 0x02;
20991 sg->page_link &= ~0x01;
20993 static inline __attribute__((always_inline)) dma_addr_t sg_phys(struct scatterlist *sg)
20995 return ((dma_addr_t)((unsigned long)((sg_page(sg)) - mem_map) + (0UL)) << 12) + sg->offset;
20997 static inline __attribute__((always_inline)) void *sg_virt(struct scatterlist *sg)
20999 return page_address(sg_page(sg)) + sg->offset;
21001 struct scatterlist *sg_next(struct scatterlist *);
21002 struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
21003 void sg_init_table(struct scatterlist *, unsigned int);
21004 void sg_init_one(struct scatterlist *, const void *, unsigned int);
21005 typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
21006 typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
21007 void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *);
21008 void sg_free_table(struct sg_table *);
21009 int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
21011 int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
21012 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
21013 void *buf, size_t buflen);
21014 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
21015 void *buf, size_t buflen);
21016 struct sg_mapping_iter {
21021 struct scatterlist *__sg;
21022 unsigned int __nents;
21023 unsigned int __offset;
21024 unsigned int __flags;
21026 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
21027 unsigned int nents, unsigned int flags);
21028 bool sg_miter_next(struct sg_mapping_iter *miter);
21029 void sg_miter_stop(struct sg_mapping_iter *miter);
21030 static inline __attribute__((always_inline)) enum km_type crypto_kmap_type(int out)
21033 if (__builtin_constant_p((((((current_thread_info()->preempt_count) & (((1UL << (8))-1) << (0 + 8))))))) ? !!(((((current_thread_info()->preempt_count) & (((1UL << (8))-1) << (0 + 8)))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/scatterwalk.h", .line = 32, }; ______r = !!(((((current_thread_info()->preempt_count) & (((1UL << (8))-1) << (0 + 8)))))); ______f.miss_hit[______r]++; ______r; }))
21034 type = out * (KM_SOFTIRQ1 - KM_SOFTIRQ0) + KM_SOFTIRQ0;
21036 type = out * (KM_USER1 - KM_USER0) + KM_USER0;
21039 static inline __attribute__((always_inline)) void *crypto_kmap(struct page *page, int out)
21041 return __kmap_atomic(page);
21043 static inline __attribute__((always_inline)) void crypto_kunmap(void *vaddr, int out)
21045 do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((vaddr)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((vaddr)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((vaddr)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/scatterwalk.h", .line = 47, }; ______r = !!((__builtin_types_compatible_p(typeof((vaddr)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(vaddr); } while (0);
21047 static inline __attribute__((always_inline)) void crypto_yield(u32 flags)
21049 if (__builtin_constant_p(((flags & 0x00000200))) ? !!((flags & 0x00000200)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/scatterwalk.h", .line = 52, }; ______r = !!((flags & 0x00000200)); ______f.miss_hit[______r]++; ______r; }))
21050 ({ __might_sleep("include/crypto/scatterwalk.h", 53, 0); _cond_resched(); });
21052 static inline __attribute__((always_inline)) void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
21053 struct scatterlist *sg2)
21055 sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
21056 sg1[num - 1].page_link &= ~0x02;
21058 static inline __attribute__((always_inline)) struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
21060 if (__builtin_constant_p(((((sg)->page_link & 0x02)))) ? !!((((sg)->page_link & 0x02))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/scatterwalk.h", .line = 65, }; ______r = !!((((sg)->page_link & 0x02))); ______f.miss_hit[______r]++; ______r; }))
21061 return ((void *)0);
21062 return (++sg)->length ? sg : (void *)sg_page(sg);
21064 static inline __attribute__((always_inline)) void scatterwalk_crypto_chain(struct scatterlist *head,
21065 struct scatterlist *sg,
21066 int chain, int num)
21068 if (__builtin_constant_p(((chain))) ? !!((chain)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/scatterwalk.h", .line = 75, }; ______r = !!((chain)); ______f.miss_hit[______r]++; ______r; })) {
21069 head->length += sg->length;
21070 sg = scatterwalk_sg_next(sg);
21072 if (__builtin_constant_p(((sg))) ? !!((sg)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/scatterwalk.h", .line = 80, }; ______r = !!((sg)); ______f.miss_hit[______r]++; ______r; }))
21073 scatterwalk_sg_chain(head, num, sg);
21077 static inline __attribute__((always_inline)) unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
21078 struct scatter_walk *walk_out)
21080 return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << 12) +
21081 (int)(walk_in->offset - walk_out->offset));
21083 static inline __attribute__((always_inline)) unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
21085 unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
21086 unsigned int len_this_page = ((unsigned long)(~walk->offset) & ~(~(((1UL) << 12)-1))) + 1;
21087 return len_this_page > len ? len : len_this_page;
21089 static inline __attribute__((always_inline)) unsigned int scatterwalk_clamp(struct scatter_walk *walk,
21090 unsigned int nbytes)
21092 unsigned int len_this_page = scatterwalk_pagelen(walk);
21093 return nbytes > len_this_page ? len_this_page : nbytes;
21095 static inline __attribute__((always_inline)) void scatterwalk_advance(struct scatter_walk *walk,
21096 unsigned int nbytes)
21098 walk->offset += nbytes;
21100 static inline __attribute__((always_inline)) unsigned int scatterwalk_aligned(struct scatter_walk *walk,
21101 unsigned int alignmask)
21103 return !(walk->offset & alignmask);
21105 static inline __attribute__((always_inline)) struct page *scatterwalk_page(struct scatter_walk *walk)
21107 return sg_page(walk->sg) + (walk->offset >> 12);
21109 static inline __attribute__((always_inline)) void scatterwalk_unmap(void *vaddr, int out)
21111 crypto_kunmap(vaddr, out);
21113 void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
21114 void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
21115 size_t nbytes, int out);
21116 void *scatterwalk_map(struct scatter_walk *walk, int out);
21117 void scatterwalk_done(struct scatter_walk *walk, int out, int more);
21118 void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
21119 unsigned int start, unsigned int nbytes, int out);
21120 struct aead_givcrypt_request {
21123 struct aead_request areq;
21125 static inline __attribute__((always_inline)) struct crypto_aead *aead_givcrypt_reqtfm(
21126 struct aead_givcrypt_request *req)
21128 return crypto_aead_reqtfm(&req->areq);
21130 static inline __attribute__((always_inline)) int crypto_aead_givencrypt(struct aead_givcrypt_request *req)
21132 struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req));
21133 return crt->givencrypt(req);
21135 static inline __attribute__((always_inline)) int crypto_aead_givdecrypt(struct aead_givcrypt_request *req)
21137 struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req));
21138 return crt->givdecrypt(req);
21140 static inline __attribute__((always_inline)) void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req,
21141 struct crypto_aead *tfm)
21143 req->areq.base.tfm = crypto_aead_tfm(tfm);
21145 static inline __attribute__((always_inline)) struct aead_givcrypt_request *aead_givcrypt_alloc(
21146 struct crypto_aead *tfm, gfp_t gfp)
21148 struct aead_givcrypt_request *req;
21149 req = kmalloc(sizeof(struct aead_givcrypt_request) +
21150 crypto_aead_reqsize(tfm), gfp);
21151 if (__builtin_constant_p((((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/aead.h", .line = 65, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/aead.h", .line = 65, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/aead.h", .line = 65, }; ______r = !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/aead.h", .line = 65, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
21152 aead_givcrypt_set_tfm(req, tfm);
21155 static inline __attribute__((always_inline)) void aead_givcrypt_free(struct aead_givcrypt_request *req)
21159 static inline __attribute__((always_inline)) void aead_givcrypt_set_callback(
21160 struct aead_givcrypt_request *req, u32 flags,
21161 crypto_completion_t complete, void *data)
21163 aead_request_set_callback(&req->areq, flags, complete, data);
21165 static inline __attribute__((always_inline)) void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req,
21166 struct scatterlist *src,
21167 struct scatterlist *dst,
21168 unsigned int nbytes, void *iv)
21170 aead_request_set_crypt(&req->areq, src, dst, nbytes, iv);
21172 static inline __attribute__((always_inline)) void aead_givcrypt_set_assoc(struct aead_givcrypt_request *req,
21173 struct scatterlist *assoc,
21174 unsigned int assoclen)
21176 aead_request_set_assoc(&req->areq, assoc, assoclen);
21178 static inline __attribute__((always_inline)) void aead_givcrypt_set_giv(struct aead_givcrypt_request *req,
21185 struct crypto_aead_spawn {
21186 struct crypto_spawn base;
21188 extern const struct crypto_type crypto_nivaead_type;
21189 static inline __attribute__((always_inline)) void crypto_set_aead_spawn(
21190 struct crypto_aead_spawn *spawn, struct crypto_instance *inst)
21192 crypto_set_spawn(&spawn->base, inst);
21194 int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
21195 u32 type, u32 mask);
21196 static inline __attribute__((always_inline)) void crypto_drop_aead(struct crypto_aead_spawn *spawn)
21198 crypto_drop_spawn(&spawn->base);
21200 static inline __attribute__((always_inline)) struct crypto_alg *crypto_aead_spawn_alg(
21201 struct crypto_aead_spawn *spawn)
21203 return spawn->base.alg;
21205 static inline __attribute__((always_inline)) struct crypto_aead *crypto_spawn_aead(
21206 struct crypto_aead_spawn *spawn)
21208 return __crypto_aead_cast(
21209 crypto_spawn_tfm(&spawn->base, 0x00000003,
21212 struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl,
21213 struct rtattr **tb, u32 type,
21215 void aead_geniv_free(struct crypto_instance *inst);
21216 int aead_geniv_init(struct crypto_tfm *tfm);
21217 void aead_geniv_exit(struct crypto_tfm *tfm);
21218 static inline __attribute__((always_inline)) struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv)
21220 return crypto_aead_crt(geniv)->base;
21222 static inline __attribute__((always_inline)) void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req)
21224 return aead_request_ctx(&req->areq);
21226 static inline __attribute__((always_inline)) void aead_givcrypt_complete(struct aead_givcrypt_request *req,
21229 aead_request_complete(&req->areq, err);
21231 struct async_aes_ctx {
21232 struct cryptd_ablkcipher *cryptd_tfm;
21234 struct aesni_rfc4106_gcm_ctx {
21235 u8 hash_subkey[16];
21236 struct crypto_aes_ctx aes_key_expanded;
21238 struct cryptd_aead *cryptd_tfm;
21240 struct aesni_gcm_set_hash_subkey_result {
21242 struct completion completion;
21244 struct aesni_hash_subkey_req_data {
21246 struct aesni_gcm_set_hash_subkey_result result;
21247 struct scatterlist sg;
21249 __attribute__((regparm(0))) int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
21250 unsigned int key_len);
21251 __attribute__((regparm(0))) void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
21253 __attribute__((regparm(0))) void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
21255 __attribute__((regparm(0))) void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
21256 const u8 *in, unsigned int len);
21257 __attribute__((regparm(0))) void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
21258 const u8 *in, unsigned int len);
21259 __attribute__((regparm(0))) void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
21260 const u8 *in, unsigned int len, u8 *iv);
21261 __attribute__((regparm(0))) void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
21262 const u8 *in, unsigned int len, u8 *iv);
21263 int crypto_fpu_init(void);
21264 void crypto_fpu_exit(void);
21265 static inline __attribute__((always_inline)) struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
21267 unsigned long addr = (unsigned long)raw_ctx;
21268 unsigned long align = (16);
21269 if (__builtin_constant_p(((align <= crypto_tfm_ctx_alignment()))) ? !!((align <= crypto_tfm_ctx_alignment())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 162, }; ______r = !!((align <= crypto_tfm_ctx_alignment())); ______f.miss_hit[______r]++; ______r; }))
21271 return (struct crypto_aes_ctx *)((((addr)) + ((typeof((addr)))((align)) - 1)) & ~((typeof((addr)))((align)) - 1));
21273 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
21274 const u8 *in_key, unsigned int key_len)
21276 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
21277 u32 *flags = &tfm->crt_flags;
21279 if (__builtin_constant_p(((key_len != 16 && key_len != 24 && key_len != 32))) ? !!((key_len != 16 && key_len != 24 && key_len != 32)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
21280 "arch/x86/crypto/aesni-intel_glue.c"
21283 , }; ______r = !!((key_len != 16 && key_len != 24 && key_len != 32)); ______f.miss_hit[______r]++; ______r; }))
21285 *flags |= 0x00200000;
21288 if (__builtin_constant_p(((!irq_fpu_usable()))) ? !!((!irq_fpu_usable())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 180, }; ______r = !!((!irq_fpu_usable())); ______f.miss_hit[______r]++; ______r; }))
21289 err = crypto_aes_expand_key(ctx, in_key, key_len);
21291 kernel_fpu_begin();
21292 err = aesni_set_key(ctx, in_key, key_len);
21297 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
21298 unsigned int key_len)
21300 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
21302 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
21304 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
21305 if (__builtin_constant_p(((!irq_fpu_usable()))) ? !!((!irq_fpu_usable())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 201, }; ______r = !!((!irq_fpu_usable())); ______f.miss_hit[______r]++; ______r; }))
21306 crypto_aes_encrypt_x86(ctx, dst, src);
21308 kernel_fpu_begin();
21309 aesni_enc(ctx, dst, src);
21313 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
21315 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
21316 if (__builtin_constant_p(((!irq_fpu_usable()))) ? !!((!irq_fpu_usable())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 214, }; ______r = !!((!irq_fpu_usable())); ______f.miss_hit[______r]++; ______r; }))
21317 crypto_aes_decrypt_x86(ctx, dst, src);
21319 kernel_fpu_begin();
21320 aesni_dec(ctx, dst, src);
21324 static struct crypto_alg aesni_alg = {
21326 .cra_driver_name = "aes-aesni",
21327 .cra_priority = 300,
21328 .cra_flags = 0x00000001,
21329 .cra_blocksize = 16,
21330 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+(16)-1,
21331 .cra_alignmask = 0,
21332 .cra_module = (&__this_module),
21333 .cra_list = { &(aesni_alg.cra_list), &(aesni_alg.cra_list) },
21336 .cia_min_keysize = 16,
21337 .cia_max_keysize = 32,
21338 .cia_setkey = aes_set_key,
21339 .cia_encrypt = aes_encrypt,
21340 .cia_decrypt = aes_decrypt
21344 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
21346 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
21347 aesni_enc(ctx, dst, src);
21349 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
21351 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
21352 aesni_dec(ctx, dst, src);
21354 static struct crypto_alg __aesni_alg = {
21355 .cra_name = "__aes-aesni",
21356 .cra_driver_name = "__driver-aes-aesni",
21358 .cra_flags = 0x00000001,
21359 .cra_blocksize = 16,
21360 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+(16)-1,
21361 .cra_alignmask = 0,
21362 .cra_module = (&__this_module),
21363 .cra_list = { &(__aesni_alg.cra_list), &(__aesni_alg.cra_list) },
21366 .cia_min_keysize = 16,
21367 .cia_max_keysize = 32,
21368 .cia_setkey = aes_set_key,
21369 .cia_encrypt = __aes_encrypt,
21370 .cia_decrypt = __aes_decrypt
21374 static int ecb_encrypt(struct blkcipher_desc *desc,
21375 struct scatterlist *dst, struct scatterlist *src,
21376 unsigned int nbytes)
21378 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
21379 struct blkcipher_walk walk;
21381 blkcipher_walk_init(&walk, dst, src, nbytes);
21382 err = blkcipher_walk_virt(desc, &walk);
21383 desc->flags &= ~0x00000200;
21384 kernel_fpu_begin();
21385 while ((nbytes = walk.nbytes)) {
21386 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
21387 nbytes & (~(16 -1)));
21389 err = blkcipher_walk_done(desc, &walk, nbytes);
21394 static int ecb_decrypt(struct blkcipher_desc *desc,
21395 struct scatterlist *dst, struct scatterlist *src,
21396 unsigned int nbytes)
21398 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
21399 struct blkcipher_walk walk;
21401 blkcipher_walk_init(&walk, dst, src, nbytes);
21402 err = blkcipher_walk_virt(desc, &walk);
21403 desc->flags &= ~0x00000200;
21404 kernel_fpu_begin();
21405 while ((nbytes = walk.nbytes)) {
21406 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
21407 nbytes & (~(16 -1)));
21409 err = blkcipher_walk_done(desc, &walk, nbytes);
21414 static struct crypto_alg blk_ecb_alg = {
21415 .cra_name = "__ecb-aes-aesni",
21416 .cra_driver_name = "__driver-ecb-aes-aesni",
21418 .cra_flags = 0x00000004,
21419 .cra_blocksize = 16,
21420 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+(16)-1,
21421 .cra_alignmask = 0,
21422 .cra_type = &crypto_blkcipher_type,
21423 .cra_module = (&__this_module),
21424 .cra_list = { &(blk_ecb_alg.cra_list), &(blk_ecb_alg.cra_list) },
21429 .setkey = aes_set_key,
21430 .encrypt = ecb_encrypt,
21431 .decrypt = ecb_decrypt,
21435 static int cbc_encrypt(struct blkcipher_desc *desc,
21436 struct scatterlist *dst, struct scatterlist *src,
21437 unsigned int nbytes)
21439 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
21440 struct blkcipher_walk walk;
21442 blkcipher_walk_init(&walk, dst, src, nbytes);
21443 err = blkcipher_walk_virt(desc, &walk);
21444 desc->flags &= ~0x00000200;
21445 kernel_fpu_begin();
21446 while ((nbytes = walk.nbytes)) {
21447 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
21448 nbytes & (~(16 -1)), walk.iv);
21450 err = blkcipher_walk_done(desc, &walk, nbytes);
21455 static int cbc_decrypt(struct blkcipher_desc *desc,
21456 struct scatterlist *dst, struct scatterlist *src,
21457 unsigned int nbytes)
21459 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
21460 struct blkcipher_walk walk;
21462 blkcipher_walk_init(&walk, dst, src, nbytes);
21463 err = blkcipher_walk_virt(desc, &walk);
21464 desc->flags &= ~0x00000200;
21465 kernel_fpu_begin();
21466 while ((nbytes = walk.nbytes)) {
21467 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
21468 nbytes & (~(16 -1)), walk.iv);
21470 err = blkcipher_walk_done(desc, &walk, nbytes);
21475 static struct crypto_alg blk_cbc_alg = {
21476 .cra_name = "__cbc-aes-aesni",
21477 .cra_driver_name = "__driver-cbc-aes-aesni",
21479 .cra_flags = 0x00000004,
21480 .cra_blocksize = 16,
21481 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+(16)-1,
21482 .cra_alignmask = 0,
21483 .cra_type = &crypto_blkcipher_type,
21484 .cra_module = (&__this_module),
21485 .cra_list = { &(blk_cbc_alg.cra_list), &(blk_cbc_alg.cra_list) },
21490 .setkey = aes_set_key,
21491 .encrypt = cbc_encrypt,
21492 .decrypt = cbc_decrypt,
21496 static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
21497 unsigned int key_len)
21499 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
21500 struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
21502 crypto_ablkcipher_clear_flags(child, 0x000fff00);
21503 crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
21505 err = crypto_ablkcipher_setkey(child, key, key_len);
21506 crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
21510 static int ablk_encrypt(struct ablkcipher_request *req)
21512 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
21513 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
21514 if (__builtin_constant_p(((!irq_fpu_usable()))) ? !!((!irq_fpu_usable())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 508, }; ______r = !!((!irq_fpu_usable())); ______f.miss_hit[______r]++; ______r; })) {
21515 struct ablkcipher_request *cryptd_req =
21516 ablkcipher_request_ctx(req);
21517 __builtin_memcpy(cryptd_req, req, sizeof(*req));
21518 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
21519 return crypto_ablkcipher_encrypt(cryptd_req);
21521 struct blkcipher_desc desc;
21522 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
21523 desc.info = req->info;
21525 return crypto_blkcipher_crt(desc.tfm)->encrypt(
21526 &desc, req->dst, req->src, req->nbytes);
21529 static int ablk_decrypt(struct ablkcipher_request *req)
21531 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
21532 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
21533 if (__builtin_constant_p(((!irq_fpu_usable()))) ? !!((!irq_fpu_usable())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 529, }; ______r = !!((!irq_fpu_usable())); ______f.miss_hit[______r]++; ______r; })) {
21534 struct ablkcipher_request *cryptd_req =
21535 ablkcipher_request_ctx(req);
21536 __builtin_memcpy(cryptd_req, req, sizeof(*req));
21537 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
21538 return crypto_ablkcipher_decrypt(cryptd_req);
21540 struct blkcipher_desc desc;
21541 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
21542 desc.info = req->info;
21544 return crypto_blkcipher_crt(desc.tfm)->decrypt(
21545 &desc, req->dst, req->src, req->nbytes);
21548 static void ablk_exit(struct crypto_tfm *tfm)
21550 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
21551 cryptd_free_ablkcipher(ctx->cryptd_tfm);
21553 static void ablk_init_common(struct crypto_tfm *tfm,
21554 struct cryptd_ablkcipher *cryptd_tfm)
21556 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
21557 ctx->cryptd_tfm = cryptd_tfm;
21558 tfm->crt_u.ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
21559 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
21561 static int ablk_ecb_init(struct crypto_tfm *tfm)
21563 struct cryptd_ablkcipher *cryptd_tfm;
21564 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
21565 if (__builtin_constant_p(((IS_ERR(cryptd_tfm)))) ? !!((IS_ERR(cryptd_tfm))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 567, }; ______r = !!((IS_ERR(cryptd_tfm))); ______f.miss_hit[______r]++; ______r; }))
21566 return PTR_ERR(cryptd_tfm);
21567 ablk_init_common(tfm, cryptd_tfm);
21570 static struct crypto_alg ablk_ecb_alg = {
21571 .cra_name = "ecb(aes)",
21572 .cra_driver_name = "ecb-aes-aesni",
21573 .cra_priority = 400,
21574 .cra_flags = 0x00000005|0x00000080,
21575 .cra_blocksize = 16,
21576 .cra_ctxsize = sizeof(struct async_aes_ctx),
21577 .cra_alignmask = 0,
21578 .cra_type = &crypto_ablkcipher_type,
21579 .cra_module = (&__this_module),
21580 .cra_list = { &(ablk_ecb_alg.cra_list), &(ablk_ecb_alg.cra_list) },
21581 .cra_init = ablk_ecb_init,
21582 .cra_exit = ablk_exit,
21587 .setkey = ablk_set_key,
21588 .encrypt = ablk_encrypt,
21589 .decrypt = ablk_decrypt,
21593 static int ablk_cbc_init(struct crypto_tfm *tfm)
21595 struct cryptd_ablkcipher *cryptd_tfm;
21596 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
21597 if (__builtin_constant_p(((IS_ERR(cryptd_tfm)))) ? !!((IS_ERR(cryptd_tfm))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 602, }; ______r = !!((IS_ERR(cryptd_tfm))); ______f.miss_hit[______r]++; ______r; }))
21598 return PTR_ERR(cryptd_tfm);
21599 ablk_init_common(tfm, cryptd_tfm);
21602 static struct crypto_alg ablk_cbc_alg = {
21603 .cra_name = "cbc(aes)",
21604 .cra_driver_name = "cbc-aes-aesni",
21605 .cra_priority = 400,
21606 .cra_flags = 0x00000005|0x00000080,
21607 .cra_blocksize = 16,
21608 .cra_ctxsize = sizeof(struct async_aes_ctx),
21609 .cra_alignmask = 0,
21610 .cra_type = &crypto_ablkcipher_type,
21611 .cra_module = (&__this_module),
21612 .cra_list = { &(ablk_cbc_alg.cra_list), &(ablk_cbc_alg.cra_list) },
21613 .cra_init = ablk_cbc_init,
21614 .cra_exit = ablk_exit,
21620 .setkey = ablk_set_key,
21621 .encrypt = ablk_encrypt,
21622 .decrypt = ablk_decrypt,
21626 static int ablk_pcbc_init(struct crypto_tfm *tfm)
21628 struct cryptd_ablkcipher *cryptd_tfm;
21629 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
21631 if (__builtin_constant_p(((IS_ERR(cryptd_tfm)))) ? !!((IS_ERR(cryptd_tfm))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 758, }; ______r = !!((IS_ERR(cryptd_tfm))); ______f.miss_hit[______r]++; ______r; }))
21632 return PTR_ERR(cryptd_tfm);
21633 ablk_init_common(tfm, cryptd_tfm);
21636 static struct crypto_alg ablk_pcbc_alg = {
21637 .cra_name = "pcbc(aes)",
21638 .cra_driver_name = "pcbc-aes-aesni",
21639 .cra_priority = 400,
21640 .cra_flags = 0x00000005|0x00000080,
21641 .cra_blocksize = 16,
21642 .cra_ctxsize = sizeof(struct async_aes_ctx),
21643 .cra_alignmask = 0,
21644 .cra_type = &crypto_ablkcipher_type,
21645 .cra_module = (&__this_module),
21646 .cra_list = { &(ablk_pcbc_alg.cra_list), &(ablk_pcbc_alg.cra_list) },
21647 .cra_init = ablk_pcbc_init,
21648 .cra_exit = ablk_exit,
21654 .setkey = ablk_set_key,
21655 .encrypt = ablk_encrypt,
21656 .decrypt = ablk_decrypt,
21660 static int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) aesni_init(void)
21663 if (__builtin_constant_p(((!(__builtin_constant_p((4*32+25)) && ( ((((4*32+25))>>5)==0 && (1UL<<(((4*32+25))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+25))>>5)==1 && (1UL<<(((4*32+25))&31) & (0|0))) || ((((4*32+25))>>5)==2 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==3 && (1UL<<(((4*32+25))&31) & (0))) || ((((4*32+25))>>5)==4 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==5 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==6 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==7 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==8 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==9 && (1UL<<(((4*32+25))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+25))) ? constant_test_bit(((4*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((4*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))))) ? !!((!(__builtin_constant_p((4*32+25)) && ( ((((4*32+25))>>5)==0 && (1UL<<(((4*32+25))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+25))>>5)==1 && (1UL<<(((4*32+25))&31) & (0|0))) || ((((4*32+25))>>5)==2 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==3 && (1UL<<(((4*32+25))&31) & (0))) || ((((4*32+25))>>5)==4 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==5 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==6 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==7 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==8 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==9 && (1UL<<(((4*32+25))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+25))) ? constant_test_bit(((4*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((4*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1259, }; ______r = !!((!(__builtin_constant_p((4*32+25)) && ( ((((4*32+25))>>5)==0 && (1UL<<(((4*32+25))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+25))>>5)==1 && (1UL<<(((4*32+25))&31) & (0|0))) || ((((4*32+25))>>5)==2 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==3 && (1UL<<(((4*32+25))&31) & (0))) || ((((4*32+25))>>5)==4 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==5 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==6 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==7 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==8 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==9 && (1UL<<(((4*32+25))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+25))) ? constant_test_bit(((4*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((4*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; })) {
21664 printk("<6>" "Intel AES-NI instructions are not detected.\n");
21667 if (__builtin_constant_p((((err = crypto_fpu_init())))) ? !!(((err = crypto_fpu_init()))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1264, }; ______r = !!(((err = crypto_fpu_init()))); ______f.miss_hit[______r]++; ______r; }))
21669 if (__builtin_constant_p((((err = crypto_register_alg(&aesni_alg))))) ? !!(((err = crypto_register_alg(&aesni_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1266, }; ______r = !!(((err = crypto_register_alg(&aesni_alg)))); ______f.miss_hit[______r]++; ______r; }))
21671 if (__builtin_constant_p((((err = crypto_register_alg(&__aesni_alg))))) ? !!(((err = crypto_register_alg(&__aesni_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1268, }; ______r = !!(((err = crypto_register_alg(&__aesni_alg)))); ______f.miss_hit[______r]++; ______r; }))
21673 if (__builtin_constant_p((((err = crypto_register_alg(&blk_ecb_alg))))) ? !!(((err = crypto_register_alg(&blk_ecb_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1270, }; ______r = !!(((err = crypto_register_alg(&blk_ecb_alg)))); ______f.miss_hit[______r]++; ______r; }))
21675 if (__builtin_constant_p((((err = crypto_register_alg(&blk_cbc_alg))))) ? !!(((err = crypto_register_alg(&blk_cbc_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1272, }; ______r = !!(((err = crypto_register_alg(&blk_cbc_alg)))); ______f.miss_hit[______r]++; ______r; }))
21677 if (__builtin_constant_p((((err = crypto_register_alg(&ablk_ecb_alg))))) ? !!(((err = crypto_register_alg(&ablk_ecb_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1274, }; ______r = !!(((err = crypto_register_alg(&ablk_ecb_alg)))); ______f.miss_hit[______r]++; ______r; }))
21679 if (__builtin_constant_p((((err = crypto_register_alg(&ablk_cbc_alg))))) ? !!(((err = crypto_register_alg(&ablk_cbc_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1276, }; ______r = !!(((err = crypto_register_alg(&ablk_cbc_alg)))); ______f.miss_hit[______r]++; ______r; }))
21681 if (__builtin_constant_p((((err = crypto_register_alg(&ablk_pcbc_alg))))) ? !!(((err = crypto_register_alg(&ablk_pcbc_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1297, }; ______r = !!(((err = crypto_register_alg(&ablk_pcbc_alg)))); ______f.miss_hit[______r]++; ______r; }))
21682 goto ablk_pcbc_err;
21684 crypto_unregister_alg(&ablk_pcbc_alg);
21686 crypto_unregister_alg(&ablk_cbc_alg);
21688 crypto_unregister_alg(&ablk_ecb_alg);
21690 crypto_unregister_alg(&blk_cbc_alg);
21692 crypto_unregister_alg(&blk_ecb_alg);
21694 crypto_unregister_alg(&__aesni_alg);
21696 crypto_unregister_alg(&aesni_alg);
21701 static void __attribute__ ((__section__(".exit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) aesni_exit(void)
21703 crypto_unregister_alg(&ablk_pcbc_alg);
21704 crypto_unregister_alg(&ablk_cbc_alg);
21705 crypto_unregister_alg(&ablk_ecb_alg);
21706 crypto_unregister_alg(&blk_cbc_alg);
21707 crypto_unregister_alg(&blk_ecb_alg);
21708 crypto_unregister_alg(&__aesni_alg);
21709 crypto_unregister_alg(&aesni_alg);
21712 static inline __attribute__((always_inline)) initcall_t __inittest(void) { return aesni_init; } int init_module(void) __attribute__((alias("aesni_init")));;
21713 static inline __attribute__((always_inline)) exitcall_t __exittest(void) { return aesni_exit; } void cleanup_module(void) __attribute__((alias("aesni_exit")));;
21714 static const char __mod_description1380[] __attribute__((__used__)) __attribute__((section(".modinfo"), unused, aligned(1))) = "description" "=" "Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized";
21715 static const char __mod_license1381[] __attribute__((__used__)) __attribute__((section(".modinfo"), unused, aligned(1))) = "license" "=" "GPL";
21716 static const char __mod_alias1382[] __attribute__((__used__)) __attribute__((section(".modinfo"), unused, aligned(1))) = "alias" "=" "aes";