]> git.wh0rd.org - ICEs.git/blame - 388835/aesni-intel_glue.i.0
more
[ICEs.git] / 388835 / aesni-intel_glue.i.0
CommitLineData
bd3239d2
MF
1typedef __signed__ char __s8;
2typedef unsigned char __u8;
3typedef __signed__ short __s16;
4typedef unsigned short __u16;
5typedef __signed__ int __s32;
6typedef unsigned int __u32;
7__extension__ typedef __signed__ long long __s64;
8__extension__ typedef unsigned long long __u64;
9typedef signed char s8;
10typedef unsigned char u8;
11typedef signed short s16;
12typedef unsigned short u16;
13typedef signed int s32;
14typedef unsigned int u32;
15typedef signed long long s64;
16typedef unsigned long long u64;
17typedef unsigned short umode_t;
18struct ftrace_branch_data {
19 const char *func;
20 const char *file;
21 unsigned line;
22 union {
23 struct {
24 unsigned long correct;
25 unsigned long incorrect;
26 };
27 struct {
28 unsigned long miss;
29 unsigned long hit;
30 };
31 unsigned long miss_hit[2];
32 };
33};
34void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
35enum {
36 false = 0,
37 true = 1
38};
39typedef struct {
40 unsigned long fds_bits [(1024/(8 * sizeof(unsigned long)))];
41} __kernel_fd_set;
42typedef void (*__kernel_sighandler_t)(int);
43typedef int __kernel_key_t;
44typedef int __kernel_mqd_t;
45typedef unsigned long __kernel_ino_t;
46typedef unsigned short __kernel_mode_t;
47typedef unsigned short __kernel_nlink_t;
48typedef long __kernel_off_t;
49typedef int __kernel_pid_t;
50typedef unsigned short __kernel_ipc_pid_t;
51typedef unsigned short __kernel_uid_t;
52typedef unsigned short __kernel_gid_t;
53typedef unsigned int __kernel_size_t;
54typedef int __kernel_ssize_t;
55typedef int __kernel_ptrdiff_t;
56typedef long __kernel_time_t;
57typedef long __kernel_suseconds_t;
58typedef long __kernel_clock_t;
59typedef int __kernel_timer_t;
60typedef int __kernel_clockid_t;
61typedef int __kernel_daddr_t;
62typedef char * __kernel_caddr_t;
63typedef unsigned short __kernel_uid16_t;
64typedef unsigned short __kernel_gid16_t;
65typedef unsigned int __kernel_uid32_t;
66typedef unsigned int __kernel_gid32_t;
67typedef unsigned short __kernel_old_uid_t;
68typedef unsigned short __kernel_old_gid_t;
69typedef unsigned short __kernel_old_dev_t;
70typedef long long __kernel_loff_t;
71typedef struct {
72 int val[2];
73} __kernel_fsid_t;
74typedef __u32 __kernel_dev_t;
75typedef __kernel_fd_set fd_set;
76typedef __kernel_dev_t dev_t;
77typedef __kernel_ino_t ino_t;
78typedef __kernel_mode_t mode_t;
79typedef __kernel_nlink_t nlink_t;
80typedef __kernel_off_t off_t;
81typedef __kernel_pid_t pid_t;
82typedef __kernel_daddr_t daddr_t;
83typedef __kernel_key_t key_t;
84typedef __kernel_suseconds_t suseconds_t;
85typedef __kernel_timer_t timer_t;
86typedef __kernel_clockid_t clockid_t;
87typedef __kernel_mqd_t mqd_t;
88typedef _Bool bool;
89typedef __kernel_uid32_t uid_t;
90typedef __kernel_gid32_t gid_t;
91typedef __kernel_uid16_t uid16_t;
92typedef __kernel_gid16_t gid16_t;
93typedef unsigned long uintptr_t;
94typedef __kernel_old_uid_t old_uid_t;
95typedef __kernel_old_gid_t old_gid_t;
96typedef __kernel_loff_t loff_t;
97typedef __kernel_size_t size_t;
98typedef __kernel_ssize_t ssize_t;
99typedef __kernel_ptrdiff_t ptrdiff_t;
100typedef __kernel_time_t time_t;
101typedef __kernel_clock_t clock_t;
102typedef __kernel_caddr_t caddr_t;
103typedef unsigned char u_char;
104typedef unsigned short u_short;
105typedef unsigned int u_int;
106typedef unsigned long u_long;
107typedef unsigned char unchar;
108typedef unsigned short ushort;
109typedef unsigned int uint;
110typedef unsigned long ulong;
111typedef __u8 u_int8_t;
112typedef __s8 int8_t;
113typedef __u16 u_int16_t;
114typedef __s16 int16_t;
115typedef __u32 u_int32_t;
116typedef __s32 int32_t;
117typedef __u8 uint8_t;
118typedef __u16 uint16_t;
119typedef __u32 uint32_t;
120typedef __u64 uint64_t;
121typedef __u64 u_int64_t;
122typedef __s64 int64_t;
123typedef u64 sector_t;
124typedef u64 blkcnt_t;
125typedef u64 dma_addr_t;
126typedef __u16 __le16;
127typedef __u16 __be16;
128typedef __u32 __le32;
129typedef __u32 __be32;
130typedef __u64 __le64;
131typedef __u64 __be64;
132typedef __u16 __sum16;
133typedef __u32 __wsum;
134typedef unsigned gfp_t;
135typedef unsigned fmode_t;
136typedef u64 phys_addr_t;
137typedef phys_addr_t resource_size_t;
138typedef struct {
139 int counter;
140} atomic_t;
141struct list_head {
142 struct list_head *next, *prev;
143};
144struct hlist_head {
145 struct hlist_node *first;
146};
147struct hlist_node {
148 struct hlist_node *next, **pprev;
149};
150struct ustat {
151 __kernel_daddr_t f_tfree;
152 __kernel_ino_t f_tinode;
153 char f_fname[6];
154 char f_fpack[6];
155};
156struct timespec;
157struct compat_timespec;
158struct restart_block {
159 long (*fn)(struct restart_block *);
160 union {
161 struct {
162 u32 *uaddr;
163 u32 val;
164 u32 flags;
165 u32 bitset;
166 u64 time;
167 u32 *uaddr2;
168 } futex;
169 struct {
170 clockid_t clockid;
171 struct timespec *rmtp;
172 u64 expires;
173 } nanosleep;
174 struct {
175 struct pollfd *ufds;
176 int nfds;
177 int has_timeout;
178 unsigned long tv_sec;
179 unsigned long tv_nsec;
180 } poll;
181 };
182};
183extern long do_no_restart_syscall(struct restart_block *parm);
184extern unsigned int __sw_hweight8(unsigned int w);
185extern unsigned int __sw_hweight16(unsigned int w);
186extern unsigned int __sw_hweight32(unsigned int w);
187extern unsigned long __sw_hweight64(__u64 w);
188struct alt_instr {
189 u8 *instr;
190 u8 *replacement;
191 u16 cpuid;
192 u8 instrlen;
193 u8 replacementlen;
194};
195extern void alternative_instructions(void);
196extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
197struct module;
198extern void alternatives_smp_module_add(struct module *mod, char *name,
199 void *locks, void *locks_end,
200 void *text, void *text_end);
201extern void alternatives_smp_module_del(struct module *mod);
202extern void alternatives_smp_switch(int smp);
203extern int alternatives_text_reserved(void *start, void *end);
204extern bool skip_smp_alternatives;
205extern const char * const x86_cap_flags[10*32];
206extern const char * const x86_power_flags[32];
207static inline __attribute__((always_inline)) __attribute__((always_inline)) __attribute__((pure)) bool __static_cpu_has(u16 bit)
208{
209 asm goto("1: jmp %l[t_no]\n"
210 "2:\n"
211 ".section .altinstructions,\"a\"\n"
212 " " ".balign 4" " " "\n"
213 " " ".long" " " "1b\n"
214 " " ".long" " " "0\n"
215 " .word %P0\n"
216 " .byte 2b - 1b\n"
217 " .byte 0\n"
218 ".previous\n"
219 : : "i" (bit) : : t_no);
220 return true;
221 t_no:
222 return false;
223}
224struct paravirt_patch_site;
225void apply_paravirt(struct paravirt_patch_site *start,
226 struct paravirt_patch_site *end);
227extern void *text_poke_early(void *addr, const void *opcode, size_t len);
228struct text_poke_param {
229 void *addr;
230 const void *opcode;
231 size_t len;
232};
233extern void *text_poke(void *addr, const void *opcode, size_t len);
234extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
235extern void text_poke_smp_batch(struct text_poke_param *params, int n);
236static inline __attribute__((always_inline)) __attribute__((always_inline)) void
237set_bit(unsigned int nr, volatile unsigned long *addr)
238{
239 if (__builtin_constant_p((((__builtin_constant_p(nr))))) ? !!(((__builtin_constant_p(nr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/bitops.h", .line = 62, }; ______r = !!(((__builtin_constant_p(nr)))); ______f.miss_hit[______r]++; ______r; })) {
240 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "orb %1,%0"
241 : "+m" (*(volatile long *) ((void *)(addr) + ((nr)>>3)))
242 : "iq" ((u8)(1 << ((nr) & 7)))
243 : "memory");
244 } else {
245 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "bts %1,%0"
246 : "+m" (*(volatile long *) (addr)) : "Ir" (nr) : "memory");
247 }
248}
249static inline __attribute__((always_inline)) void __set_bit(int nr, volatile unsigned long *addr)
250{
251 asm volatile("bts %1,%0" : "+m" (*(volatile long *) (addr)) : "Ir" (nr) : "memory");
252}
253static inline __attribute__((always_inline)) __attribute__((always_inline)) void
254clear_bit(int nr, volatile unsigned long *addr)
255{
256 if (__builtin_constant_p((((__builtin_constant_p(nr))))) ? !!(((__builtin_constant_p(nr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/bitops.h", .line = 100, }; ______r = !!(((__builtin_constant_p(nr)))); ______f.miss_hit[______r]++; ______r; })) {
257 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "andb %1,%0"
258 : "+m" (*(volatile long *) ((void *)(addr) + ((nr)>>3)))
259 : "iq" ((u8)~(1 << ((nr) & 7))));
260 } else {
261 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "btr %1,%0"
262 : "+m" (*(volatile long *) (addr))
263 : "Ir" (nr));
264 }
265}
266static inline __attribute__((always_inline)) void clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
267{
268 __asm__ __volatile__("": : :"memory");
269 clear_bit(nr, addr);
270}
271static inline __attribute__((always_inline)) void __clear_bit(int nr, volatile unsigned long *addr)
272{
273 asm volatile("btr %1,%0" : "+m" (*(volatile long *) (addr)) : "Ir" (nr));
274}
275static inline __attribute__((always_inline)) void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
276{
277 __asm__ __volatile__("": : :"memory");
278 __clear_bit(nr, addr);
279}
280static inline __attribute__((always_inline)) void __change_bit(int nr, volatile unsigned long *addr)
281{
282 asm volatile("btc %1,%0" : "+m" (*(volatile long *) (addr)) : "Ir" (nr));
283}
284static inline __attribute__((always_inline)) void change_bit(int nr, volatile unsigned long *addr)
285{
286 if (__builtin_constant_p((((__builtin_constant_p(nr))))) ? !!(((__builtin_constant_p(nr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/bitops.h", .line = 176, }; ______r = !!(((__builtin_constant_p(nr)))); ______f.miss_hit[______r]++; ______r; })) {
287 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "xorb %1,%0"
288 : "+m" (*(volatile long *) ((void *)(addr) + ((nr)>>3)))
289 : "iq" ((u8)(1 << ((nr) & 7))));
290 } else {
291 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "btc %1,%0"
292 : "+m" (*(volatile long *) (addr))
293 : "Ir" (nr));
294 }
295}
296static inline __attribute__((always_inline)) int test_and_set_bit(int nr, volatile unsigned long *addr)
297{
298 int oldbit;
299 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "bts %2,%1\n\t"
300 "sbb %0,%0" : "=r" (oldbit), "+m" (*(volatile long *) (addr)) : "Ir" (nr) : "memory");
301 return oldbit;
302}
303static inline __attribute__((always_inline)) __attribute__((always_inline)) int
304test_and_set_bit_lock(int nr, volatile unsigned long *addr)
305{
306 return test_and_set_bit(nr, addr);
307}
308static inline __attribute__((always_inline)) int __test_and_set_bit(int nr, volatile unsigned long *addr)
309{
310 int oldbit;
311 asm("bts %2,%1\n\t"
312 "sbb %0,%0"
313 : "=r" (oldbit), "+m" (*(volatile long *) (addr))
314 : "Ir" (nr));
315 return oldbit;
316}
317static inline __attribute__((always_inline)) int test_and_clear_bit(int nr, volatile unsigned long *addr)
318{
319 int oldbit;
320 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "btr %2,%1\n\t"
321 "sbb %0,%0"
322 : "=r" (oldbit), "+m" (*(volatile long *) (addr)) : "Ir" (nr) : "memory");
323 return oldbit;
324}
325static inline __attribute__((always_inline)) int __test_and_clear_bit(int nr, volatile unsigned long *addr)
326{
327 int oldbit;
328 asm volatile("btr %2,%1\n\t"
329 "sbb %0,%0"
330 : "=r" (oldbit), "+m" (*(volatile long *) (addr))
331 : "Ir" (nr));
332 return oldbit;
333}
334static inline __attribute__((always_inline)) int __test_and_change_bit(int nr, volatile unsigned long *addr)
335{
336 int oldbit;
337 asm volatile("btc %2,%1\n\t"
338 "sbb %0,%0"
339 : "=r" (oldbit), "+m" (*(volatile long *) (addr))
340 : "Ir" (nr) : "memory");
341 return oldbit;
342}
343static inline __attribute__((always_inline)) int test_and_change_bit(int nr, volatile unsigned long *addr)
344{
345 int oldbit;
346 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "btc %2,%1\n\t"
347 "sbb %0,%0"
348 : "=r" (oldbit), "+m" (*(volatile long *) (addr)) : "Ir" (nr) : "memory");
349 return oldbit;
350}
351static inline __attribute__((always_inline)) __attribute__((always_inline)) int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
352{
353 return ((1UL << (nr % 32)) &
354 (addr[nr / 32])) != 0;
355}
356static inline __attribute__((always_inline)) int variable_test_bit(int nr, volatile const unsigned long *addr)
357{
358 int oldbit;
359 asm volatile("bt %2,%1\n\t"
360 "sbb %0,%0"
361 : "=r" (oldbit)
362 : "m" (*(unsigned long *)addr), "Ir" (nr));
363 return oldbit;
364}
365static inline __attribute__((always_inline)) unsigned long __ffs(unsigned long word)
366{
367 asm("bsf %1,%0"
368 : "=r" (word)
369 : "rm" (word));
370 return word;
371}
372static inline __attribute__((always_inline)) unsigned long ffz(unsigned long word)
373{
374 asm("bsf %1,%0"
375 : "=r" (word)
376 : "r" (~word));
377 return word;
378}
379static inline __attribute__((always_inline)) unsigned long __fls(unsigned long word)
380{
381 asm("bsr %1,%0"
382 : "=r" (word)
383 : "rm" (word));
384 return word;
385}
386static inline __attribute__((always_inline)) int ffs(int x)
387{
388 int r;
389 asm("bsfl %1,%0\n\t"
390 "cmovzl %2,%0"
391 : "=r" (r) : "rm" (x), "r" (-1));
392 return r + 1;
393}
394static inline __attribute__((always_inline)) int fls(int x)
395{
396 int r;
397 asm("bsrl %1,%0\n\t"
398 "cmovzl %2,%0"
399 : "=&r" (r) : "rm" (x), "rm" (-1));
400 return r + 1;
401}
402extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
403 size, unsigned long offset);
404extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
405 long size, unsigned long offset);
406extern unsigned long find_first_bit(const unsigned long *addr,
407 unsigned long size);
408extern unsigned long find_first_zero_bit(const unsigned long *addr,
409 unsigned long size);
410static inline __attribute__((always_inline)) int sched_find_first_bit(const unsigned long *b)
411{
412 if (__builtin_constant_p(((b[0]))) ? !!((b[0])) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/bitops/sched.h", .line = 19, }; ______r = !!((b[0])); ______f.miss_hit[______r]++; ______r; }))
413 return __ffs(b[0]);
414 if (__builtin_constant_p(((b[1]))) ? !!((b[1])) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/bitops/sched.h", .line = 21, }; ______r = !!((b[1])); ______f.miss_hit[______r]++; ______r; }))
415 return __ffs(b[1]) + 32;
416 if (__builtin_constant_p(((b[2]))) ? !!((b[2])) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/bitops/sched.h", .line = 23, }; ______r = !!((b[2])); ______f.miss_hit[______r]++; ______r; }))
417 return __ffs(b[2]) + 64;
418 return __ffs(b[3]) + 96;
419}
420static inline __attribute__((always_inline)) unsigned int __arch_hweight32(unsigned int w)
421{
422 unsigned int res = 0;
423 asm ("661:\n\t" "call __sw_hweight32" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(4*32+23)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" ".byte 0xf3,0x0f,0xb8,0xc0" "\n664:\n" ".previous"
424 : "=""a" (res)
425 : "a" (w));
426 return res;
427}
428static inline __attribute__((always_inline)) unsigned int __arch_hweight16(unsigned int w)
429{
430 return __arch_hweight32(w & 0xffff);
431}
432static inline __attribute__((always_inline)) unsigned int __arch_hweight8(unsigned int w)
433{
434 return __arch_hweight32(w & 0xff);
435}
436static inline __attribute__((always_inline)) unsigned long __arch_hweight64(__u64 w)
437{
438 unsigned long res = 0;
439 return __arch_hweight32((u32)w) +
440 __arch_hweight32((u32)(w >> 32));
441 return res;
442}
443static inline __attribute__((always_inline)) __attribute__((always_inline)) int fls64(__u64 x)
444{
445 __u32 h = x >> 32;
446 if (__builtin_constant_p(((h))) ? !!((h)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/bitops/fls64.h", .line = 21, }; ______r = !!((h)); ______f.miss_hit[______r]++; ______r; }))
447 return fls(h) + 32;
448 return fls(x);
449}
450static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __arch_swab32(__u32 val)
451{
452 asm("bswap %0" : "=r" (val) : "0" (val));
453 return val;
454}
455static inline __attribute__((always_inline)) __attribute__((__const__)) __u64 __arch_swab64(__u64 val)
456{
457 union {
458 struct {
459 __u32 a;
460 __u32 b;
461 } s;
462 __u64 u;
463 } v;
464 v.u = val;
465 asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
466 : "=r" (v.s.a), "=r" (v.s.b)
467 : "0" (v.s.a), "1" (v.s.b));
468 return v.u;
469}
470static inline __attribute__((always_inline)) __attribute__((__const__)) __u16 __fswab16(__u16 val)
471{
472 return ((__u16)( (((__u16)(val) & (__u16)0x00ffU) << 8) | (((__u16)(val) & (__u16)0xff00U) >> 8)));
473}
474static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswab32(__u32 val)
475{
476 return __arch_swab32(val);
477}
478static inline __attribute__((always_inline)) __attribute__((__const__)) __u64 __fswab64(__u64 val)
479{
480 return __arch_swab64(val);
481}
482static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswahw32(__u32 val)
483{
484 return ((__u32)( (((__u32)(val) & (__u32)0x0000ffffUL) << 16) | (((__u32)(val) & (__u32)0xffff0000UL) >> 16)));
485}
486static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswahb32(__u32 val)
487{
488 return ((__u32)( (((__u32)(val) & (__u32)0x00ff00ffUL) << 8) | (((__u32)(val) & (__u32)0xff00ff00UL) >> 8)));
489}
490static inline __attribute__((always_inline)) __u16 __swab16p(const __u16 *p)
491{
492 return (__builtin_constant_p((__u16)(*p)) ? ((__u16)( (((__u16)(*p) & (__u16)0x00ffU) << 8) | (((__u16)(*p) & (__u16)0xff00U) >> 8))) : __fswab16(*p));
493}
494static inline __attribute__((always_inline)) __u32 __swab32p(const __u32 *p)
495{
496 return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x000000ffUL) << 24) | (((__u32)(*p) & (__u32)0x0000ff00UL) << 8) | (((__u32)(*p) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(*p) & (__u32)0xff000000UL) >> 24))) : __fswab32(*p));
497}
498static inline __attribute__((always_inline)) __u64 __swab64p(const __u64 *p)
499{
500 return (__builtin_constant_p((__u64)(*p)) ? ((__u64)( (((__u64)(*p) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(*p) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(*p) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(*p) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(*p) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(*p) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(*p) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(*p) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(*p));
501}
502static inline __attribute__((always_inline)) __u32 __swahw32p(const __u32 *p)
503{
504 return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x0000ffffUL) << 16) | (((__u32)(*p) & (__u32)0xffff0000UL) >> 16))) : __fswahw32(*p));
505}
506static inline __attribute__((always_inline)) __u32 __swahb32p(const __u32 *p)
507{
508 return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x00ff00ffUL) << 8) | (((__u32)(*p) & (__u32)0xff00ff00UL) >> 8))) : __fswahb32(*p));
509}
510static inline __attribute__((always_inline)) void __swab16s(__u16 *p)
511{
512 *p = __swab16p(p);
513}
514static inline __attribute__((always_inline)) void __swab32s(__u32 *p)
515{
516 *p = __swab32p(p);
517}
518static inline __attribute__((always_inline)) void __swab64s(__u64 *p)
519{
520 *p = __swab64p(p);
521}
522static inline __attribute__((always_inline)) void __swahw32s(__u32 *p)
523{
524 *p = __swahw32p(p);
525}
526static inline __attribute__((always_inline)) void __swahb32s(__u32 *p)
527{
528 *p = __swahb32p(p);
529}
530static inline __attribute__((always_inline)) __le64 __cpu_to_le64p(const __u64 *p)
531{
532 return ( __le64)*p;
533}
534static inline __attribute__((always_inline)) __u64 __le64_to_cpup(const __le64 *p)
535{
536 return ( __u64)*p;
537}
538static inline __attribute__((always_inline)) __le32 __cpu_to_le32p(const __u32 *p)
539{
540 return ( __le32)*p;
541}
542static inline __attribute__((always_inline)) __u32 __le32_to_cpup(const __le32 *p)
543{
544 return ( __u32)*p;
545}
546static inline __attribute__((always_inline)) __le16 __cpu_to_le16p(const __u16 *p)
547{
548 return ( __le16)*p;
549}
550static inline __attribute__((always_inline)) __u16 __le16_to_cpup(const __le16 *p)
551{
552 return ( __u16)*p;
553}
554static inline __attribute__((always_inline)) __be64 __cpu_to_be64p(const __u64 *p)
555{
556 return ( __be64)__swab64p(p);
557}
558static inline __attribute__((always_inline)) __u64 __be64_to_cpup(const __be64 *p)
559{
560 return __swab64p((__u64 *)p);
561}
562static inline __attribute__((always_inline)) __be32 __cpu_to_be32p(const __u32 *p)
563{
564 return ( __be32)__swab32p(p);
565}
566static inline __attribute__((always_inline)) __u32 __be32_to_cpup(const __be32 *p)
567{
568 return __swab32p((__u32 *)p);
569}
570static inline __attribute__((always_inline)) __be16 __cpu_to_be16p(const __u16 *p)
571{
572 return ( __be16)__swab16p(p);
573}
574static inline __attribute__((always_inline)) __u16 __be16_to_cpup(const __be16 *p)
575{
576 return __swab16p((__u16 *)p);
577}
578static inline __attribute__((always_inline)) void le16_add_cpu(__le16 *var, u16 val)
579{
580 *var = (( __le16)(__u16)((( __u16)(__le16)(*var)) + val));
581}
582static inline __attribute__((always_inline)) void le32_add_cpu(__le32 *var, u32 val)
583{
584 *var = (( __le32)(__u32)((( __u32)(__le32)(*var)) + val));
585}
586static inline __attribute__((always_inline)) void le64_add_cpu(__le64 *var, u64 val)
587{
588 *var = (( __le64)(__u64)((( __u64)(__le64)(*var)) + val));
589}
590static inline __attribute__((always_inline)) void be16_add_cpu(__be16 *var, u16 val)
591{
592 *var = (( __be16)(__builtin_constant_p((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val))) ? ((__u16)( (((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val)) & (__u16)0x00ffU) << 8) | (((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val)) & (__u16)0xff00U) >> 8))) : __fswab16(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val))));
593}
594static inline __attribute__((always_inline)) void be32_add_cpu(__be32 *var, u32 val)
595{
596 *var = (( __be32)(__builtin_constant_p((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val))) ? ((__u32)( (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x000000ffUL) << 24) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0xff000000UL) >> 24))) : __fswab32(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val))));
597}
598static inline __attribute__((always_inline)) void be64_add_cpu(__be64 *var, u64 val)
599{
600 *var = (( __be64)(__builtin_constant_p((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val))) ? ((__u64)( (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val))));
601}
602static inline __attribute__((always_inline)) unsigned long find_next_zero_bit_le(const void *addr,
603 unsigned long size, unsigned long offset)
604{
605 return find_next_zero_bit(addr, size, offset);
606}
607static inline __attribute__((always_inline)) unsigned long find_next_bit_le(const void *addr,
608 unsigned long size, unsigned long offset)
609{
610 return find_next_bit(addr, size, offset);
611}
612static inline __attribute__((always_inline)) unsigned long find_first_zero_bit_le(const void *addr,
613 unsigned long size)
614{
615 return find_first_zero_bit(addr, size);
616}
617static inline __attribute__((always_inline)) int test_bit_le(int nr, const void *addr)
618{
619 return (__builtin_constant_p((nr ^ 0)) ? constant_test_bit((nr ^ 0), (addr)) : variable_test_bit((nr ^ 0), (addr)));
620}
621static inline __attribute__((always_inline)) void __set_bit_le(int nr, void *addr)
622{
623 __set_bit(nr ^ 0, addr);
624}
625static inline __attribute__((always_inline)) void __clear_bit_le(int nr, void *addr)
626{
627 __clear_bit(nr ^ 0, addr);
628}
629static inline __attribute__((always_inline)) int test_and_set_bit_le(int nr, void *addr)
630{
631 return test_and_set_bit(nr ^ 0, addr);
632}
633static inline __attribute__((always_inline)) int test_and_clear_bit_le(int nr, void *addr)
634{
635 return test_and_clear_bit(nr ^ 0, addr);
636}
637static inline __attribute__((always_inline)) int __test_and_set_bit_le(int nr, void *addr)
638{
639 return __test_and_set_bit(nr ^ 0, addr);
640}
641static inline __attribute__((always_inline)) int __test_and_clear_bit_le(int nr, void *addr)
642{
643 return __test_and_clear_bit(nr ^ 0, addr);
644}
645static __inline__ __attribute__((always_inline)) int get_bitmask_order(unsigned int count)
646{
647 int order;
648 order = fls(count);
649 return order;
650}
651static __inline__ __attribute__((always_inline)) int get_count_order(unsigned int count)
652{
653 int order;
654 order = fls(count) - 1;
655 if (__builtin_constant_p(((count & (count - 1)))) ? !!((count & (count - 1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitops.h", .line = 42, }; ______r = !!((count & (count - 1))); ______f.miss_hit[______r]++; ______r; }))
656 order++;
657 return order;
658}
659static inline __attribute__((always_inline)) unsigned long hweight_long(unsigned long w)
660{
661 return sizeof(w) == 4 ? (__builtin_constant_p(w) ? ((( (!!((w) & (1ULL << 0))) + (!!((w) & (1ULL << 1))) + (!!((w) & (1ULL << 2))) + (!!((w) & (1ULL << 3))) + (!!((w) & (1ULL << 4))) + (!!((w) & (1ULL << 5))) + (!!((w) & (1ULL << 6))) + (!!((w) & (1ULL << 7))) ) + ( (!!(((w) >> 8) & (1ULL << 0))) + (!!(((w) >> 8) & (1ULL << 1))) + (!!(((w) >> 8) & (1ULL << 2))) + (!!(((w) >> 8) & (1ULL << 3))) + (!!(((w) >> 8) & (1ULL << 4))) + (!!(((w) >> 8) & (1ULL << 5))) + (!!(((w) >> 8) & (1ULL << 6))) + (!!(((w) >> 8) & (1ULL << 7))) )) + (( (!!(((w) >> 16) & (1ULL << 0))) + (!!(((w) >> 16) & (1ULL << 1))) + (!!(((w) >> 16) & (1ULL << 2))) + (!!(((w) >> 16) & (1ULL << 3))) + (!!(((w) >> 16) & (1ULL << 4))) + (!!(((w) >> 16) & (1ULL << 5))) + (!!(((w) >> 16) & (1ULL << 6))) + (!!(((w) >> 16) & (1ULL << 7))) ) + ( (!!((((w) >> 16) >> 8) & (1ULL << 0))) + (!!((((w) >> 16) >> 8) & (1ULL << 1))) + (!!((((w) >> 16) >> 8) & (1ULL << 2))) + (!!((((w) >> 16) >> 8) & (1ULL << 3))) + (!!((((w) >> 16) >> 8) & (1ULL << 4))) + (!!((((w) >> 16) >> 8) & (1ULL << 5))) + (!!((((w) >> 16) >> 8) & (1ULL << 6))) + (!!((((w) >> 16) >> 8) & (1ULL << 7))) ))) : __arch_hweight32(w)) : (__builtin_constant_p(w) ? (((( (!!((w) & (1ULL << 0))) + (!!((w) & (1ULL << 1))) + (!!((w) & (1ULL << 2))) + (!!((w) & (1ULL << 3))) + (!!((w) & (1ULL << 4))) + (!!((w) & (1ULL << 5))) + (!!((w) & (1ULL << 6))) + (!!((w) & (1ULL << 7))) ) + ( (!!(((w) >> 8) & (1ULL << 0))) + (!!(((w) >> 8) & (1ULL << 1))) + (!!(((w) >> 8) & (1ULL << 2))) + (!!(((w) >> 8) & (1ULL << 3))) + (!!(((w) >> 8) & (1ULL << 4))) + (!!(((w) >> 8) & (1ULL << 5))) + (!!(((w) >> 8) & (1ULL << 6))) + (!!(((w) >> 8) & (1ULL << 7))) )) + (( (!!(((w) >> 16) & (1ULL << 0))) + (!!(((w) >> 16) & (1ULL << 1))) + (!!(((w) >> 16) & (1ULL << 2))) + (!!(((w) >> 16) & (1ULL << 3))) + (!!(((w) >> 16) & (1ULL << 4))) + (!!(((w) >> 16) & (1ULL << 5))) + (!!(((w) >> 16) & (1ULL << 6))) + (!!(((w) >> 16) & (1ULL << 7))) ) + ( (!!((((w) >> 16) >> 8) & (1ULL << 0))) + (!!((((w) >> 16) >> 8) & (1ULL << 1))) + (!!((((w) >> 16) >> 8) & (1ULL << 2))) + (!!((((w) >> 16) >> 8) & (1ULL << 3))) + (!!((((w) >> 16) >> 8) & (1ULL << 4))) + (!!((((w) >> 16) >> 8) & (1ULL << 5))) + (!!((((w) >> 16) >> 8) & (1ULL << 6))) + (!!((((w) >> 16) >> 8) & (1ULL << 7))) ))) + ((( (!!(((w) >> 32) & (1ULL << 0))) + (!!(((w) >> 32) & (1ULL << 1))) + (!!(((w) >> 32) & (1ULL << 2))) + (!!(((w) >> 32) & (1ULL << 3))) + (!!(((w) >> 32) & (1ULL << 4))) + (!!(((w) >> 32) & (1ULL << 5))) + (!!(((w) >> 32) & (1ULL << 6))) + (!!(((w) >> 32) & (1ULL << 7))) ) + ( (!!((((w) >> 32) >> 8) & (1ULL << 0))) + (!!((((w) >> 32) >> 8) & (1ULL << 1))) + (!!((((w) >> 32) >> 8) & (1ULL << 2))) + (!!((((w) >> 32) >> 8) & (1ULL << 3))) + (!!((((w) >> 32) >> 8) & (1ULL << 4))) + (!!((((w) >> 32) >> 8) & (1ULL << 5))) + (!!((((w) >> 32) >> 8) & (1ULL << 6))) + (!!((((w) >> 32) >> 8) & (1ULL << 7))) )) + (( (!!((((w) >> 32) >> 16) & (1ULL << 0))) + (!!((((w) >> 32) >> 16) & (1ULL << 1))) + (!!((((w) >> 32) >> 16) & (1ULL << 2))) + (!!((((w) >> 32) >> 16) & (1ULL << 3))) + (!!((((w) >> 32) >> 16) & (1ULL << 4))) + (!!((((w) >> 32) >> 16) & (1ULL << 5))) + (!!((((w) >> 32) >> 16) & (1ULL << 6))) + (!!((((w) >> 32) >> 16) & (1ULL << 7))) ) + ( (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 0))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 1))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 2))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 3))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 4))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 5))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 6))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 7))) )))) : __arch_hweight64(w));
662}
663static inline __attribute__((always_inline)) __u32 rol32(__u32 word, unsigned int shift)
664{
665 return (word << shift) | (word >> (32 - shift));
666}
667static inline __attribute__((always_inline)) __u32 ror32(__u32 word, unsigned int shift)
668{
669 return (word >> shift) | (word << (32 - shift));
670}
671static inline __attribute__((always_inline)) __u16 rol16(__u16 word, unsigned int shift)
672{
673 return (word << shift) | (word >> (16 - shift));
674}
675static inline __attribute__((always_inline)) __u16 ror16(__u16 word, unsigned int shift)
676{
677 return (word >> shift) | (word << (16 - shift));
678}
679static inline __attribute__((always_inline)) __u8 rol8(__u8 word, unsigned int shift)
680{
681 return (word << shift) | (word >> (8 - shift));
682}
683static inline __attribute__((always_inline)) __u8 ror8(__u8 word, unsigned int shift)
684{
685 return (word >> shift) | (word << (8 - shift));
686}
687static inline __attribute__((always_inline)) __s32 sign_extend32(__u32 value, int index)
688{
689 __u8 shift = 31 - index;
690 return (__s32)(value << shift) >> shift;
691}
692static inline __attribute__((always_inline)) unsigned fls_long(unsigned long l)
693{
694 if (__builtin_constant_p(((sizeof(l) == 4))) ? !!((sizeof(l) == 4)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitops.h", .line = 125, }; ______r = !!((sizeof(l) == 4)); ______f.miss_hit[______r]++; ______r; }))
695 return fls(l);
696 return fls64(l);
697}
698static inline __attribute__((always_inline)) unsigned long __ffs64(u64 word)
699{
700 if (__builtin_constant_p(((((u32)word) == 0UL))) ? !!((((u32)word) == 0UL)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitops.h", .line = 141, }; ______r = !!((((u32)word) == 0UL)); ______f.miss_hit[______r]++; ______r; }))
701 return __ffs((u32)(word >> 32)) + 32;
702 return __ffs((unsigned long)word);
703}
704extern unsigned long find_last_bit(const unsigned long *addr,
705 unsigned long size);
706extern unsigned int __VMALLOC_RESERVE;
707extern int sysctl_legacy_va_layout;
708extern void find_low_pfn_range(void);
709extern void setup_bootmem_allocator(void);
710extern int devmem_is_allowed(unsigned long pagenr);
711extern unsigned long max_low_pfn_mapped;
712extern unsigned long max_pfn_mapped;
713static inline __attribute__((always_inline)) phys_addr_t get_max_mapped(void)
714{
715 return (phys_addr_t)max_pfn_mapped << 12;
716}
717extern unsigned long init_memory_mapping(unsigned long start,
718 unsigned long end);
719extern void initmem_init(void);
720extern void free_initmem(void);
721typedef __builtin_va_list __gnuc_va_list;
722typedef __gnuc_va_list va_list;
723extern char *strndup_user(const char *, long);
724extern void *memdup_user(const void *, size_t);
725extern char *strcpy(char *dest, const char *src);
726extern char *strncpy(char *dest, const char *src, size_t count);
727extern char *strcat(char *dest, const char *src);
728extern char *strncat(char *dest, const char *src, size_t count);
729extern int strcmp(const char *cs, const char *ct);
730extern int strncmp(const char *cs, const char *ct, size_t count);
731extern char *strchr(const char *s, int c);
732extern size_t strlen(const char *s);
733static inline __attribute__((always_inline)) __attribute__((always_inline)) void *__memcpy(void *to, const void *from, size_t n)
734{
735 int d0, d1, d2;
736 asm volatile("rep ; movsl\n\t"
737 "movl %4,%%ecx\n\t"
738 "andl $3,%%ecx\n\t"
739 "jz 1f\n\t"
740 "rep ; movsb\n\t"
741 "1:"
742 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
743 : "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from)
744 : "memory");
745 return to;
746}
747static inline __attribute__((always_inline)) __attribute__((always_inline)) void *__constant_memcpy(void *to, const void *from,
748 size_t n)
749{
750 long esi, edi;
751 if (__builtin_constant_p(((!n))) ? !!((!n)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/string_32.h", .line = 55, }; ______r = !!((!n)); ______f.miss_hit[______r]++; ______r; }))
752 return to;
753 switch (n) {
754 case 1:
755 *(char *)to = *(char *)from;
756 return to;
757 case 2:
758 *(short *)to = *(short *)from;
759 return to;
760 case 4:
761 *(int *)to = *(int *)from;
762 return to;
763 case 3:
764 *(short *)to = *(short *)from;
765 *((char *)to + 2) = *((char *)from + 2);
766 return to;
767 case 5:
768 *(int *)to = *(int *)from;
769 *((char *)to + 4) = *((char *)from + 4);
770 return to;
771 case 6:
772 *(int *)to = *(int *)from;
773 *((short *)to + 2) = *((short *)from + 2);
774 return to;
775 case 8:
776 *(int *)to = *(int *)from;
777 *((int *)to + 1) = *((int *)from + 1);
778 return to;
779 }
780 esi = (long)from;
781 edi = (long)to;
782 if (__builtin_constant_p(((n >= 5 * 4))) ? !!((n >= 5 * 4)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/string_32.h", .line = 88, }; ______r = !!((n >= 5 * 4)); ______f.miss_hit[______r]++; ______r; })) {
783 int ecx;
784 asm volatile("rep ; movsl"
785 : "=&c" (ecx), "=&D" (edi), "=&S" (esi)
786 : "0" (n / 4), "1" (edi), "2" (esi)
787 : "memory"
788 );
789 } else {
790 if (__builtin_constant_p(((n >= 4 * 4))) ? !!((n >= 4 * 4)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/string_32.h", .line = 98, }; ______r = !!((n >= 4 * 4)); ______f.miss_hit[______r]++; ______r; }))
791 asm volatile("movsl"
792 : "=&D"(edi), "=&S"(esi)
793 : "0"(edi), "1"(esi)
794 : "memory");
795 if (__builtin_constant_p(((n >= 3 * 4))) ? !!((n >= 3 * 4)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/string_32.h", .line = 103, }; ______r = !!((n >= 3 * 4)); ______f.miss_hit[______r]++; ______r; }))
796 asm volatile("movsl"
797 : "=&D"(edi), "=&S"(esi)
798 : "0"(edi), "1"(esi)
799 : "memory");
800 if (__builtin_constant_p(((n >= 2 * 4))) ? !!((n >= 2 * 4)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/string_32.h", .line = 108, }; ______r = !!((n >= 2 * 4)); ______f.miss_hit[______r]++; ______r; }))
801 asm volatile("movsl"
802 : "=&D"(edi), "=&S"(esi)
803 : "0"(edi), "1"(esi)
804 : "memory");
805 if (__builtin_constant_p(((n >= 1 * 4))) ? !!((n >= 1 * 4)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/string_32.h", .line = 113, }; ______r = !!((n >= 1 * 4)); ______f.miss_hit[______r]++; ______r; }))
806 asm volatile("movsl"
807 : "=&D"(edi), "=&S"(esi)
808 : "0"(edi), "1"(esi)
809 : "memory");
810 }
811 switch (n % 4) {
812 case 0:
813 return to;
814 case 1:
815 asm volatile("movsb"
816 : "=&D"(edi), "=&S"(esi)
817 : "0"(edi), "1"(esi)
818 : "memory");
819 return to;
820 case 2:
821 asm volatile("movsw"
822 : "=&D"(edi), "=&S"(esi)
823 : "0"(edi), "1"(esi)
824 : "memory");
825 return to;
826 default:
827 asm volatile("movsw\n\tmovsb"
828 : "=&D"(edi), "=&S"(esi)
829 : "0"(edi), "1"(esi)
830 : "memory");
831 return to;
832 }
833}
834void *memmove(void *dest, const void *src, size_t n);
835extern void *memchr(const void *cs, int c, size_t count);
836static inline __attribute__((always_inline)) void *__memset_generic(void *s, char c, size_t count)
837{
838 int d0, d1;
839 asm volatile("rep\n\t"
840 "stosb"
841 : "=&c" (d0), "=&D" (d1)
842 : "a" (c), "1" (s), "0" (count)
843 : "memory");
844 return s;
845}
846static inline __attribute__((always_inline)) __attribute__((always_inline))
847void *__constant_c_memset(void *s, unsigned long c, size_t count)
848{
849 int d0, d1;
850 asm volatile("rep ; stosl\n\t"
851 "testb $2,%b3\n\t"
852 "je 1f\n\t"
853 "stosw\n"
854 "1:\ttestb $1,%b3\n\t"
855 "je 2f\n\t"
856 "stosb\n"
857 "2:"
858 : "=&c" (d0), "=&D" (d1)
859 : "a" (c), "q" (count), "0" (count/4), "1" ((long)s)
860 : "memory");
861 return s;
862}
863extern size_t strnlen(const char *s, size_t count);
864extern char *strstr(const char *cs, const char *ct);
865static inline __attribute__((always_inline)) __attribute__((always_inline))
866void *__constant_c_and_count_memset(void *s, unsigned long pattern,
867 size_t count)
868{
869 switch (count) {
870 case 0:
871 return s;
872 case 1:
873 *(unsigned char *)s = pattern & 0xff;
874 return s;
875 case 2:
876 *(unsigned short *)s = pattern & 0xffff;
877 return s;
878 case 3:
879 *(unsigned short *)s = pattern & 0xffff;
880 *((unsigned char *)s + 2) = pattern & 0xff;
881 return s;
882 case 4:
883 *(unsigned long *)s = pattern;
884 return s;
885 }
886 {
887 int d0, d1;
888 unsigned long eax = pattern;
889 switch (count % 4) {
890 case 0:
891 asm volatile("rep ; stosl" "" : "=&c" (d0), "=&D" (d1) : "a" (eax), "0" (count/4), "1" ((long)s) : "memory");
892 return s;
893 case 1:
894 asm volatile("rep ; stosl" "\n\tstosb" : "=&c" (d0), "=&D" (d1) : "a" (eax), "0" (count/4), "1" ((long)s) : "memory");
895 return s;
896 case 2:
897 asm volatile("rep ; stosl" "\n\tstosw" : "=&c" (d0), "=&D" (d1) : "a" (eax), "0" (count/4), "1" ((long)s) : "memory");
898 return s;
899 default:
900 asm volatile("rep ; stosl" "\n\tstosw\n\tstosb" : "=&c" (d0), "=&D" (d1) : "a" (eax), "0" (count/4), "1" ((long)s) : "memory");
901 return s;
902 }
903 }
904}
905extern void *memscan(void *addr, int c, size_t size);
906size_t strlcpy(char *, const char *, size_t);
907extern size_t strlcat(char *, const char *, __kernel_size_t);
908extern int strnicmp(const char *, const char *, __kernel_size_t);
909extern int strcasecmp(const char *s1, const char *s2);
910extern int strncasecmp(const char *s1, const char *s2, size_t n);
911extern char * strnchr(const char *, size_t, int);
912extern char * strrchr(const char *,int);
913extern char * __attribute__((warn_unused_result)) skip_spaces(const char *);
914extern char *strim(char *);
915static inline __attribute__((always_inline)) __attribute__((warn_unused_result)) char *strstrip(char *str)
916{
917 return strim(str);
918}
919extern char * strnstr(const char *, const char *, size_t);
920extern char * strpbrk(const char *,const char *);
921extern char * strsep(char **,const char *);
922extern __kernel_size_t strspn(const char *,const char *);
923extern __kernel_size_t strcspn(const char *,const char *);
924extern int __builtin_memcmp(const void *,const void *,__kernel_size_t);
925extern char *kstrdup(const char *s, gfp_t gfp);
926extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
927extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
928extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
929extern void argv_free(char **argv);
930extern bool sysfs_streq(const char *s1, const char *s2);
931extern int strtobool(const char *s, bool *res);
932int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
933int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
934int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __attribute__((format(printf,3,4)));
935extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
936 const void *from, size_t available);
937static inline __attribute__((always_inline)) bool strstarts(const char *str, const char *prefix)
938{
939 return strncmp(str, prefix, strlen(prefix)) == 0;
940}
941static inline __attribute__((always_inline)) void clear_page(void *page)
942{
943 __builtin_memset(page, 0, ((1UL) << 12));
944}
945static inline __attribute__((always_inline)) void copy_page(void *to, void *from)
946{
947 __builtin_memcpy(to, from, ((1UL) << 12));
948}
949struct page;
950static inline __attribute__((always_inline)) void clear_user_page(void *page, unsigned long vaddr,
951 struct page *pg)
952{
953 clear_page(page);
954}
955static inline __attribute__((always_inline)) void copy_user_page(void *to, void *from, unsigned long vaddr,
956 struct page *topage)
957{
958 copy_page(to, from);
959}
960extern bool __virt_addr_valid(unsigned long kaddr);
961static inline __attribute__((always_inline)) __attribute__((__const__)) int get_order(unsigned long size)
962{
963 int order;
964 size = (size - 1) >> (12 - 1);
965 order = -1;
966 do {
967 size >>= 1;
968 order++;
969 } while (size);
970 return order;
971}
972struct task_struct;
973struct exec_domain;
974struct task_struct;
975struct mm_struct;
976struct vm86_regs {
977 long ebx;
978 long ecx;
979 long edx;
980 long esi;
981 long edi;
982 long ebp;
983 long eax;
984 long __null_ds;
985 long __null_es;
986 long __null_fs;
987 long __null_gs;
988 long orig_eax;
989 long eip;
990 unsigned short cs, __csh;
991 long eflags;
992 long esp;
993 unsigned short ss, __ssh;
994 unsigned short es, __esh;
995 unsigned short ds, __dsh;
996 unsigned short fs, __fsh;
997 unsigned short gs, __gsh;
998};
999struct revectored_struct {
1000 unsigned long __map[8];
1001};
1002struct vm86_struct {
1003 struct vm86_regs regs;
1004 unsigned long flags;
1005 unsigned long screen_bitmap;
1006 unsigned long cpu_type;
1007 struct revectored_struct int_revectored;
1008 struct revectored_struct int21_revectored;
1009};
1010struct vm86plus_info_struct {
1011 unsigned long force_return_for_pic:1;
1012 unsigned long vm86dbg_active:1;
1013 unsigned long vm86dbg_TFpendig:1;
1014 unsigned long unused:28;
1015 unsigned long is_vm86pus:1;
1016 unsigned char vm86dbg_intxxtab[32];
1017};
1018struct vm86plus_struct {
1019 struct vm86_regs regs;
1020 unsigned long flags;
1021 unsigned long screen_bitmap;
1022 unsigned long cpu_type;
1023 struct revectored_struct int_revectored;
1024 struct revectored_struct int21_revectored;
1025 struct vm86plus_info_struct vm86plus;
1026};
1027extern const char early_idt_handlers[32][10];
1028struct pt_regs {
1029 unsigned long bx;
1030 unsigned long cx;
1031 unsigned long dx;
1032 unsigned long si;
1033 unsigned long di;
1034 unsigned long bp;
1035 unsigned long ax;
1036 unsigned long ds;
1037 unsigned long es;
1038 unsigned long fs;
1039 unsigned long gs;
1040 unsigned long orig_ax;
1041 unsigned long ip;
1042 unsigned long cs;
1043 unsigned long flags;
1044 unsigned long sp;
1045 unsigned long ss;
1046};
1047typedef int (*initcall_t)(void);
1048typedef void (*exitcall_t)(void);
1049extern initcall_t __con_initcall_start[], __con_initcall_end[];
1050extern initcall_t __security_initcall_start[], __security_initcall_end[];
1051typedef void (*ctor_fn_t)(void);
1052extern int do_one_initcall(initcall_t fn);
1053extern char __attribute__ ((__section__(".init.data"))) boot_command_line[];
1054extern char *saved_command_line;
1055extern unsigned int reset_devices;
1056void setup_arch(char **);
1057void prepare_namespace(void);
1058extern void (*late_time_init)(void);
1059extern int initcall_debug;
1060struct cpuinfo_x86;
1061struct task_struct;
1062extern unsigned long profile_pc(struct pt_regs *regs);
1063extern unsigned long
1064convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
1065extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1066 int error_code, int si_code);
1067void signal_fault(struct pt_regs *regs, void *frame, char *where);
1068extern long syscall_trace_enter(struct pt_regs *);
1069extern void syscall_trace_leave(struct pt_regs *);
1070static inline __attribute__((always_inline)) unsigned long regs_return_value(struct pt_regs *regs)
1071{
1072 return regs->ax;
1073}
1074static inline __attribute__((always_inline)) int user_mode(struct pt_regs *regs)
1075{
1076 return (regs->cs & 0x3) == 0x3;
1077}
1078static inline __attribute__((always_inline)) int user_mode_vm(struct pt_regs *regs)
1079{
1080 return ((regs->cs & 0x3) | (regs->flags & 0x00020000)) >=
1081 0x3;
1082}
1083static inline __attribute__((always_inline)) int v8086_mode(struct pt_regs *regs)
1084{
1085 return (regs->flags & 0x00020000);
1086}
1087static inline __attribute__((always_inline)) unsigned long kernel_stack_pointer(struct pt_regs *regs)
1088{
1089 return (unsigned long)(&regs->sp);
1090}
1091static inline __attribute__((always_inline)) unsigned long instruction_pointer(struct pt_regs *regs)
1092{
1093 return ((regs)->ip);
1094}
1095static inline __attribute__((always_inline)) void instruction_pointer_set(struct pt_regs *regs,
1096 unsigned long val)
1097{
1098 (((regs)->ip) = (val));
1099}
1100static inline __attribute__((always_inline)) unsigned long user_stack_pointer(struct pt_regs *regs)
1101{
1102 return ((regs)->sp);
1103}
1104static inline __attribute__((always_inline)) void user_stack_pointer_set(struct pt_regs *regs,
1105 unsigned long val)
1106{
1107 (((regs)->sp) = (val));
1108}
1109static inline __attribute__((always_inline)) unsigned long frame_pointer(struct pt_regs *regs)
1110{
1111 return ((regs)->bp);
1112}
1113static inline __attribute__((always_inline)) void frame_pointer_set(struct pt_regs *regs,
1114 unsigned long val)
1115{
1116 (((regs)->bp) = (val));
1117}
1118extern int regs_query_register_offset(const char *name);
1119extern const char *regs_query_register_name(unsigned int offset);
1120static inline __attribute__((always_inline)) unsigned long regs_get_register(struct pt_regs *regs,
1121 unsigned int offset)
1122{
1123 if (__builtin_constant_p((((__builtin_constant_p(offset > (__builtin_offsetof(struct pt_regs,ss))) ? !!(offset > (__builtin_offsetof(struct pt_regs,ss))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/ptrace.h", .line = 229, }; ______r = __builtin_expect(!!(offset > (__builtin_offsetof(struct pt_regs,ss))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(offset > (__builtin_offsetof(struct pt_regs,ss))) ? !!(offset > (__builtin_offsetof(struct pt_regs,ss))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/ptrace.h", .line = 229, }; ______r = __builtin_expect(!!(offset > (__builtin_offsetof(struct pt_regs,ss))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/ptrace.h", .line = 229, }; ______r = !!(((__builtin_constant_p(offset > (__builtin_offsetof(struct pt_regs,ss))) ? !!(offset > (__builtin_offsetof(struct pt_regs,ss))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/ptrace.h", .line = 229, }; ______r = __builtin_expect(!!(offset > (__builtin_offsetof(struct pt_regs,ss))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
1124 return 0;
1125 return *(unsigned long *)((unsigned long)regs + offset);
1126}
1127static inline __attribute__((always_inline)) int regs_within_kernel_stack(struct pt_regs *regs,
1128 unsigned long addr)
1129{
1130 return ((addr & ~((((1UL) << 12) << 1) - 1)) ==
1131 (kernel_stack_pointer(regs) & ~((((1UL) << 12) << 1) - 1)));
1132}
1133static inline __attribute__((always_inline)) unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
1134 unsigned int n)
1135{
1136 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
1137 addr += n;
1138 if (__builtin_constant_p(((regs_within_kernel_stack(regs, (unsigned long)addr)))) ? !!((regs_within_kernel_stack(regs, (unsigned long)addr))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/ptrace.h", .line = 263, }; ______r = !!((regs_within_kernel_stack(regs, (unsigned long)addr))); ______f.miss_hit[______r]++; ______r; }))
1139 return *addr;
1140 else
1141 return 0;
1142}
1143struct user_desc;
1144extern int do_get_thread_area(struct task_struct *p, int idx,
1145 struct user_desc *info);
1146extern int do_set_thread_area(struct task_struct *p, int idx,
1147 struct user_desc *info, int can_allocate);
1148struct kernel_vm86_regs {
1149 struct pt_regs pt;
1150 unsigned short es, __esh;
1151 unsigned short ds, __dsh;
1152 unsigned short fs, __fsh;
1153 unsigned short gs, __gsh;
1154};
1155struct kernel_vm86_struct {
1156 struct kernel_vm86_regs regs;
1157 unsigned long flags;
1158 unsigned long screen_bitmap;
1159 unsigned long cpu_type;
1160 struct revectored_struct int_revectored;
1161 struct revectored_struct int21_revectored;
1162 struct vm86plus_info_struct vm86plus;
1163 struct pt_regs *regs32;
1164};
1165void handle_vm86_fault(struct kernel_vm86_regs *, long);
1166int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
1167struct pt_regs *save_v86_state(struct kernel_vm86_regs *);
1168struct task_struct;
1169void release_vm86_irqs(struct task_struct *);
1170struct math_emu_info {
1171 long ___orig_eip;
1172 union {
1173 struct pt_regs *regs;
1174 struct kernel_vm86_regs *vm86;
1175 };
1176};
1177struct _fpx_sw_bytes {
1178 __u32 magic1;
1179 __u32 extended_size;
1180 __u64 xstate_bv;
1181 __u32 xstate_size;
1182 __u32 padding[7];
1183};
1184struct _fpreg {
1185 unsigned short significand[4];
1186 unsigned short exponent;
1187};
1188struct _fpxreg {
1189 unsigned short significand[4];
1190 unsigned short exponent;
1191 unsigned short padding[3];
1192};
1193struct _xmmreg {
1194 unsigned long element[4];
1195};
1196struct _fpstate {
1197 unsigned long cw;
1198 unsigned long sw;
1199 unsigned long tag;
1200 unsigned long ipoff;
1201 unsigned long cssel;
1202 unsigned long dataoff;
1203 unsigned long datasel;
1204 struct _fpreg _st[8];
1205 unsigned short status;
1206 unsigned short magic;
1207 unsigned long _fxsr_env[6];
1208 unsigned long mxcsr;
1209 unsigned long reserved;
1210 struct _fpxreg _fxsr_st[8];
1211 struct _xmmreg _xmm[8];
1212 unsigned long padding1[44];
1213 union {
1214 unsigned long padding2[12];
1215 struct _fpx_sw_bytes sw_reserved;
1216 };
1217};
1218struct sigcontext {
1219 unsigned short gs, __gsh;
1220 unsigned short fs, __fsh;
1221 unsigned short es, __esh;
1222 unsigned short ds, __dsh;
1223 unsigned long di;
1224 unsigned long si;
1225 unsigned long bp;
1226 unsigned long sp;
1227 unsigned long bx;
1228 unsigned long dx;
1229 unsigned long cx;
1230 unsigned long ax;
1231 unsigned long trapno;
1232 unsigned long err;
1233 unsigned long ip;
1234 unsigned short cs, __csh;
1235 unsigned long flags;
1236 unsigned long sp_at_signal;
1237 unsigned short ss, __ssh;
1238 void *fpstate;
1239 unsigned long oldmask;
1240 unsigned long cr2;
1241};
1242struct _xsave_hdr {
1243 __u64 xstate_bv;
1244 __u64 reserved1[2];
1245 __u64 reserved2[5];
1246};
1247struct _ymmh_state {
1248 __u32 ymmh_space[64];
1249};
1250struct _xstate {
1251 struct _fpstate fpstate;
1252 struct _xsave_hdr xstate_hdr;
1253 struct _ymmh_state ymmh;
1254};
1255extern __attribute__((const, noreturn))
1256int ____ilog2_NaN(void);
1257static inline __attribute__((always_inline)) __attribute__((const))
1258int __ilog2_u32(u32 n)
1259{
1260 return fls(n) - 1;
1261}
1262static inline __attribute__((always_inline)) __attribute__((const))
1263int __ilog2_u64(u64 n)
1264{
1265 return fls64(n) - 1;
1266}
1267static inline __attribute__((always_inline)) __attribute__((const))
1268bool is_power_of_2(unsigned long n)
1269{
1270 return (n != 0 && ((n & (n - 1)) == 0));
1271}
1272static inline __attribute__((always_inline)) __attribute__((const))
1273unsigned long __roundup_pow_of_two(unsigned long n)
1274{
1275 return 1UL << fls_long(n - 1);
1276}
1277static inline __attribute__((always_inline)) __attribute__((const))
1278unsigned long __rounddown_pow_of_two(unsigned long n)
1279{
1280 return 1UL << (fls_long(n) - 1);
1281}
1282extern const char linux_banner[];
1283extern const char linux_proc_banner[];
1284extern int console_printk[];
1285static inline __attribute__((always_inline)) void console_silent(void)
1286{
1287 (console_printk[0]) = 0;
1288}
1289static inline __attribute__((always_inline)) void console_verbose(void)
1290{
1291 if (__builtin_constant_p((((console_printk[0])))) ? !!(((console_printk[0]))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/printk.h", .line = 41, }; ______r = !!(((console_printk[0]))); ______f.miss_hit[______r]++; ______r; }))
1292 (console_printk[0]) = 15;
1293}
1294struct va_format {
1295 const char *fmt;
1296 va_list *va;
1297};
1298static inline __attribute__((always_inline)) __attribute__ ((format (printf, 1, 2)))
1299int no_printk(const char *fmt, ...)
1300{
1301 return 0;
1302}
1303extern __attribute__((regparm(0))) __attribute__ ((format (printf, 1, 2)))
1304void early_printk(const char *fmt, ...);
1305extern int printk_needs_cpu(int cpu);
1306extern void printk_tick(void);
1307 __attribute__((regparm(0))) __attribute__ ((format (printf, 1, 0)))
1308int vprintk(const char *fmt, va_list args);
1309 __attribute__((regparm(0))) __attribute__ ((format (printf, 1, 2))) __attribute__((__cold__))
1310int printk(const char *fmt, ...);
1311extern int __printk_ratelimit(const char *func);
1312extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
1313 unsigned int interval_msec);
1314extern int printk_delay_msec;
1315extern int dmesg_restrict;
1316extern int kptr_restrict;
1317void log_buf_kexec_setup(void);
1318void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) setup_log_buf(int early);
1319extern void dump_stack(void) __attribute__((__cold__));
1320enum {
1321 DUMP_PREFIX_NONE,
1322 DUMP_PREFIX_ADDRESS,
1323 DUMP_PREFIX_OFFSET
1324};
1325extern void hex_dump_to_buffer(const void *buf, size_t len,
1326 int rowsize, int groupsize,
1327 char *linebuf, size_t linebuflen, bool ascii);
1328extern void print_hex_dump(const char *level, const char *prefix_str,
1329 int prefix_type, int rowsize, int groupsize,
1330 const void *buf, size_t len, bool ascii);
1331extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
1332 const void *buf, size_t len);
1333extern long long dynamic_debug_enabled;
1334extern long long dynamic_debug_enabled2;
1335struct _ddebug {
1336 const char *modname;
1337 const char *function;
1338 const char *filename;
1339 const char *format;
1340 unsigned int lineno:24;
1341 unsigned int flags:8;
1342 char enabled;
1343} __attribute__((aligned(8)));
1344int ddebug_add_module(struct _ddebug *tab, unsigned int n,
1345 const char *modname);
1346static inline __attribute__((always_inline)) int ddebug_remove_module(const char *mod)
1347{
1348 return 0;
1349}
1350struct bug_entry {
1351 unsigned long bug_addr;
1352 const char *file;
1353 unsigned short line;
1354 unsigned short flags;
1355};
1356extern void warn_slowpath_fmt(const char *file, const int line,
1357 const char *fmt, ...) __attribute__((format(printf, 3, 4)));
1358extern void warn_slowpath_fmt_taint(const char *file, const int line,
1359 unsigned taint, const char *fmt, ...)
1360 __attribute__((format(printf, 4, 5)));
1361extern void warn_slowpath_null(const char *file, const int line);
1362static inline __attribute__((always_inline)) u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
1363{
1364 union {
1365 u64 v64;
1366 u32 v32[2];
1367 } d = { dividend };
1368 u32 upper;
1369 upper = d.v32[1];
1370 d.v32[1] = 0;
1371 if (__builtin_constant_p(((upper >= divisor))) ? !!((upper >= divisor)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/div64.h", .line = 46, }; ______r = !!((upper >= divisor)); ______f.miss_hit[______r]++; ______r; })) {
1372 d.v32[1] = upper / divisor;
1373 upper %= divisor;
1374 }
1375 asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) :
1376 "rm" (divisor), "0" (d.v32[0]), "1" (upper));
1377 return d.v64;
1378}
1379struct completion;
1380struct pt_regs;
1381struct user;
1382 void __might_sleep(const char *file, int line, int preempt_offset);
1383void might_fault(void);
1384extern struct atomic_notifier_head panic_notifier_list;
1385extern long (*panic_blink)(int state);
1386 void panic(const char * fmt, ...)
1387 __attribute__ ((noreturn, format (printf, 1, 2))) __attribute__((__cold__));
1388extern void oops_enter(void);
1389extern void oops_exit(void);
1390void print_oops_end_marker(void);
1391extern int oops_may_print(void);
1392 void do_exit(long error_code)
1393 __attribute__((noreturn));
1394 void complete_and_exit(struct completion *, long)
1395 __attribute__((noreturn));
1396int __attribute__((warn_unused_result)) _kstrtoul(const char *s, unsigned int base, unsigned long *res);
1397int __attribute__((warn_unused_result)) _kstrtol(const char *s, unsigned int base, long *res);
1398int __attribute__((warn_unused_result)) kstrtoull(const char *s, unsigned int base, unsigned long long *res);
1399int __attribute__((warn_unused_result)) kstrtoll(const char *s, unsigned int base, long long *res);
1400static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtoul(const char *s, unsigned int base, unsigned long *res)
1401{
1402 if (__builtin_constant_p(((sizeof(unsigned long) == sizeof(unsigned long long) && __alignof__(unsigned long) == __alignof__(unsigned long long)))) ? !!((sizeof(unsigned long) == sizeof(unsigned long long) && __alignof__(unsigned long) == __alignof__(unsigned long long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
1403 "include/linux/kernel.h"
1404 , .line =
1405 204
1406 , }; ______r = !!((sizeof(unsigned long) == sizeof(unsigned long long) && __alignof__(unsigned long) == __alignof__(unsigned long long))); ______f.miss_hit[______r]++; ______r; }))
1407 return kstrtoull(s, base, (unsigned long long *)res);
1408 else
1409 return _kstrtoul(s, base, res);
1410}
1411static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtol(const char *s, unsigned int base, long *res)
1412{
1413 if (__builtin_constant_p(((sizeof(long) == sizeof(long long) && __alignof__(long) == __alignof__(long long)))) ? !!((sizeof(long) == sizeof(long long) && __alignof__(long) == __alignof__(long long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
1414 "include/linux/kernel.h"
1415 , .line =
1416 217
1417 , }; ______r = !!((sizeof(long) == sizeof(long long) && __alignof__(long) == __alignof__(long long))); ______f.miss_hit[______r]++; ______r; }))
1418 return kstrtoll(s, base, (long long *)res);
1419 else
1420 return _kstrtol(s, base, res);
1421}
1422int __attribute__((warn_unused_result)) kstrtouint(const char *s, unsigned int base, unsigned int *res);
1423int __attribute__((warn_unused_result)) kstrtoint(const char *s, unsigned int base, int *res);
1424static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtou64(const char *s, unsigned int base, u64 *res)
1425{
1426 return kstrtoull(s, base, res);
1427}
1428static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtos64(const char *s, unsigned int base, s64 *res)
1429{
1430 return kstrtoll(s, base, res);
1431}
1432static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtou32(const char *s, unsigned int base, u32 *res)
1433{
1434 return kstrtouint(s, base, res);
1435}
1436static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtos32(const char *s, unsigned int base, s32 *res)
1437{
1438 return kstrtoint(s, base, res);
1439}
1440int __attribute__((warn_unused_result)) kstrtou16(const char *s, unsigned int base, u16 *res);
1441int __attribute__((warn_unused_result)) kstrtos16(const char *s, unsigned int base, s16 *res);
1442int __attribute__((warn_unused_result)) kstrtou8(const char *s, unsigned int base, u8 *res);
1443int __attribute__((warn_unused_result)) kstrtos8(const char *s, unsigned int base, s8 *res);
1444int __attribute__((warn_unused_result)) kstrtoull_from_user(const char *s, size_t count, unsigned int base, unsigned long long *res);
1445int __attribute__((warn_unused_result)) kstrtoll_from_user(const char *s, size_t count, unsigned int base, long long *res);
1446int __attribute__((warn_unused_result)) kstrtoul_from_user(const char *s, size_t count, unsigned int base, unsigned long *res);
1447int __attribute__((warn_unused_result)) kstrtol_from_user(const char *s, size_t count, unsigned int base, long *res);
1448int __attribute__((warn_unused_result)) kstrtouint_from_user(const char *s, size_t count, unsigned int base, unsigned int *res);
1449int __attribute__((warn_unused_result)) kstrtoint_from_user(const char *s, size_t count, unsigned int base, int *res);
1450int __attribute__((warn_unused_result)) kstrtou16_from_user(const char *s, size_t count, unsigned int base, u16 *res);
1451int __attribute__((warn_unused_result)) kstrtos16_from_user(const char *s, size_t count, unsigned int base, s16 *res);
1452int __attribute__((warn_unused_result)) kstrtou8_from_user(const char *s, size_t count, unsigned int base, u8 *res);
1453int __attribute__((warn_unused_result)) kstrtos8_from_user(const char *s, size_t count, unsigned int base, s8 *res);
1454static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtou64_from_user(const char *s, size_t count, unsigned int base, u64 *res)
1455{
1456 return kstrtoull_from_user(s, count, base, res);
1457}
1458static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtos64_from_user(const char *s, size_t count, unsigned int base, s64 *res)
1459{
1460 return kstrtoll_from_user(s, count, base, res);
1461}
1462static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtou32_from_user(const char *s, size_t count, unsigned int base, u32 *res)
1463{
1464 return kstrtouint_from_user(s, count, base, res);
1465}
1466static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtos32_from_user(const char *s, size_t count, unsigned int base, s32 *res)
1467{
1468 return kstrtoint_from_user(s, count, base, res);
1469}
1470extern unsigned long simple_strtoul(const char *,char **,unsigned int);
1471extern long simple_strtol(const char *,char **,unsigned int);
1472extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
1473extern long long simple_strtoll(const char *,char **,unsigned int);
1474extern int sprintf(char * buf, const char * fmt, ...)
1475 __attribute__ ((format (printf, 2, 3)));
1476extern int vsprintf(char *buf, const char *, va_list)
1477 __attribute__ ((format (printf, 2, 0)));
1478extern int snprintf(char * buf, size_t size, const char * fmt, ...)
1479 __attribute__ ((format (printf, 3, 4)));
1480extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1481 __attribute__ ((format (printf, 3, 0)));
1482extern int scnprintf(char * buf, size_t size, const char * fmt, ...)
1483 __attribute__ ((format (printf, 3, 4)));
1484extern int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
1485 __attribute__ ((format (printf, 3, 0)));
1486extern char *kasprintf(gfp_t gfp, const char *fmt, ...)
1487 __attribute__ ((format (printf, 2, 3)));
1488extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
1489extern int sscanf(const char *, const char *, ...)
1490 __attribute__ ((format (scanf, 2, 3)));
1491extern int vsscanf(const char *, const char *, va_list)
1492 __attribute__ ((format (scanf, 2, 0)));
1493extern int get_option(char **str, int *pint);
1494extern char *get_options(const char *str, int nints, int *ints);
1495extern unsigned long long memparse(const char *ptr, char **retptr);
1496extern int core_kernel_text(unsigned long addr);
1497extern int core_kernel_data(unsigned long addr);
1498extern int __kernel_text_address(unsigned long addr);
1499extern int kernel_text_address(unsigned long addr);
1500extern int func_ptr_is_kernel_text(void *ptr);
1501struct pid;
1502extern struct pid *session_of_pgrp(struct pid *pgrp);
1503unsigned long int_sqrt(unsigned long);
1504extern void bust_spinlocks(int yes);
1505extern void wake_up_klogd(void);
1506extern int oops_in_progress;
1507extern int panic_timeout;
1508extern int panic_on_oops;
1509extern int panic_on_unrecovered_nmi;
1510extern int panic_on_io_nmi;
1511extern const char *print_tainted(void);
1512extern void add_taint(unsigned flag);
1513extern int test_taint(unsigned flag);
1514extern unsigned long get_taint(void);
1515extern int root_mountflags;
1516extern bool early_boot_irqs_disabled;
1517extern enum system_states {
1518 SYSTEM_BOOTING,
1519 SYSTEM_RUNNING,
1520 SYSTEM_HALT,
1521 SYSTEM_POWER_OFF,
1522 SYSTEM_RESTART,
1523 SYSTEM_SUSPEND_DISK,
1524} system_state;
1525extern const char hex_asc[];
1526static inline __attribute__((always_inline)) char *pack_hex_byte(char *buf, u8 byte)
1527{
1528 *buf++ = hex_asc[((byte) & 0xf0) >> 4];
1529 *buf++ = hex_asc[((byte) & 0x0f)];
1530 return buf;
1531}
1532extern int hex_to_bin(char ch);
1533extern void hex2bin(u8 *dst, const char *src, size_t count);
1534void tracing_on(void);
1535void tracing_off(void);
1536void tracing_off_permanent(void);
1537int tracing_is_on(void);
1538enum ftrace_dump_mode {
1539 DUMP_NONE,
1540 DUMP_ALL,
1541 DUMP_ORIG,
1542};
1543extern void tracing_start(void);
1544extern void tracing_stop(void);
1545extern void ftrace_off_permanent(void);
1546static inline __attribute__((always_inline)) void __attribute__ ((format (printf, 1, 2)))
1547____trace_printk_check_format(const char *fmt, ...)
1548{
1549}
1550extern int
1551__trace_bprintk(unsigned long ip, const char *fmt, ...)
1552 __attribute__ ((format (printf, 2, 3)));
1553extern int
1554__trace_printk(unsigned long ip, const char *fmt, ...)
1555 __attribute__ ((format (printf, 2, 3)));
1556extern void trace_dump_stack(void);
1557extern int
1558__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
1559extern int
1560__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
1561extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
1562struct sysinfo;
1563extern int do_sysinfo(struct sysinfo *info);
1564struct sysinfo {
1565 long uptime;
1566 unsigned long loads[3];
1567 unsigned long totalram;
1568 unsigned long freeram;
1569 unsigned long sharedram;
1570 unsigned long bufferram;
1571 unsigned long totalswap;
1572 unsigned long freeswap;
1573 unsigned short procs;
1574 unsigned short pad;
1575 unsigned long totalhigh;
1576 unsigned long freehigh;
1577 unsigned int mem_unit;
1578 char _f[20-2*sizeof(long)-sizeof(int)];
1579};
1580extern int __build_bug_on_failed;
1581extern void __bad_percpu_size(void);
1582static inline __attribute__((always_inline)) __attribute__((always_inline)) int x86_this_cpu_constant_test_bit(unsigned int nr,
1583 const unsigned long *addr)
1584{
1585 unsigned long *a = (unsigned long *)addr + nr / 32;
1586 return ((1UL << (nr % 32)) & ({ typeof(*a) pfo_ret__; switch (sizeof(*a)) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m" (*a)); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m" (*a)); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m" (*a)); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m" (*a)); break; default: __bad_percpu_size(); } pfo_ret__; })) != 0;
1587}
1588static inline __attribute__((always_inline)) int x86_this_cpu_variable_test_bit(int nr,
1589 const unsigned long *addr)
1590{
1591 int oldbit;
1592 asm volatile("bt ""%%""fs"":" "%P" "2"",%1\n\t"
1593 "sbb %0,%0"
1594 : "=r" (oldbit)
1595 : "m" (*(unsigned long *)addr), "Ir" (nr));
1596 return oldbit;
1597}
1598extern unsigned long __per_cpu_offset[8];
1599extern void setup_per_cpu_areas(void);
1600extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned long) this_cpu_off;
1601struct task_struct;
1602extern __attribute__((section(".data..percpu" ""))) __typeof__(struct task_struct *) current_task;
1603static inline __attribute__((always_inline)) __attribute__((always_inline)) struct task_struct *get_current(void)
1604{
1605 return ({ typeof(current_task) pfo_ret__; switch (sizeof(current_task)) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "p" (&(current_task))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "p" (&(current_task))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "p" (&(current_task))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "p" (&(current_task))); break; default: __bad_percpu_size(); } pfo_ret__; });
1606}
1607extern void __xchg_wrong_size(void);
1608static inline __attribute__((always_inline)) void set_64bit(volatile u64 *ptr, u64 value)
1609{
1610 u32 low = value;
1611 u32 high = value >> 32;
1612 u64 prev = *ptr;
1613 asm volatile("\n1:\t"
1614 ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchg8b %0\n\t"
1615 "jnz 1b"
1616 : "=m" (*ptr), "+A" (prev)
1617 : "b" (low), "c" (high)
1618 : "memory");
1619}
1620extern void __cmpxchg_wrong_size(void);
1621static inline __attribute__((always_inline)) u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
1622{
1623 u64 prev;
1624 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchg8b %1"
1625 : "=A" (prev),
1626 "+m" (*ptr)
1627 : "b" ((u32)new),
1628 "c" ((u32)(new >> 32)),
1629 "0" (old)
1630 : "memory");
1631 return prev;
1632}
1633static inline __attribute__((always_inline)) u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
1634{
1635 u64 prev;
1636 asm volatile("cmpxchg8b %1"
1637 : "=A" (prev),
1638 "+m" (*ptr)
1639 : "b" ((u32)new),
1640 "c" ((u32)(new >> 32)),
1641 "0" (old)
1642 : "memory");
1643 return prev;
1644}
1645extern const unsigned char * const *ideal_nops;
1646extern void arch_init_ideal_nops(void);
1647static inline __attribute__((always_inline)) unsigned long native_save_fl(void)
1648{
1649 unsigned long flags;
1650 asm volatile("# __raw_save_flags\n\t"
1651 "pushf ; pop %0"
1652 : "=rm" (flags)
1653 :
1654 : "memory");
1655 return flags;
1656}
1657static inline __attribute__((always_inline)) void native_restore_fl(unsigned long flags)
1658{
1659 asm volatile("push %0 ; popf"
1660 :
1661 :"g" (flags)
1662 :"memory", "cc");
1663}
1664static inline __attribute__((always_inline)) void native_irq_disable(void)
1665{
1666 asm volatile("cli": : :"memory");
1667}
1668static inline __attribute__((always_inline)) void native_irq_enable(void)
1669{
1670 asm volatile("sti": : :"memory");
1671}
1672static inline __attribute__((always_inline)) void native_safe_halt(void)
1673{
1674 asm volatile("sti; hlt": : :"memory");
1675}
1676static inline __attribute__((always_inline)) void native_halt(void)
1677{
1678 asm volatile("hlt": : :"memory");
1679}
1680typedef u64 pteval_t;
1681typedef u64 pmdval_t;
1682typedef u64 pudval_t;
1683typedef u64 pgdval_t;
1684typedef u64 pgprotval_t;
1685typedef union {
1686 struct {
1687 unsigned long pte_low, pte_high;
1688 };
1689 pteval_t pte;
1690} pte_t;
1691extern bool __vmalloc_start_set;
1692typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
1693typedef struct { pgdval_t pgd; } pgd_t;
1694static inline __attribute__((always_inline)) pgd_t native_make_pgd(pgdval_t val)
1695{
1696 return (pgd_t) { val };
1697}
1698static inline __attribute__((always_inline)) pgdval_t native_pgd_val(pgd_t pgd)
1699{
1700 return pgd.pgd;
1701}
1702static inline __attribute__((always_inline)) pgdval_t pgd_flags(pgd_t pgd)
1703{
1704 return native_pgd_val(pgd) & (~((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))));
1705}
1706typedef struct { pgd_t pgd; } pud_t;
1707static inline __attribute__((always_inline)) int pgd_none(pgd_t pgd) { return 0; }
1708static inline __attribute__((always_inline)) int pgd_bad(pgd_t pgd) { return 0; }
1709static inline __attribute__((always_inline)) int pgd_present(pgd_t pgd) { return 1; }
1710static inline __attribute__((always_inline)) void pgd_clear(pgd_t *pgd) { }
1711static inline __attribute__((always_inline)) pud_t * pud_offset(pgd_t * pgd, unsigned long address)
1712{
1713 return (pud_t *)pgd;
1714}
1715static inline __attribute__((always_inline)) pudval_t native_pud_val(pud_t pud)
1716{
1717 return native_pgd_val(pud.pgd);
1718}
1719typedef struct { pmdval_t pmd; } pmd_t;
1720static inline __attribute__((always_inline)) pmd_t native_make_pmd(pmdval_t val)
1721{
1722 return (pmd_t) { val };
1723}
1724static inline __attribute__((always_inline)) pmdval_t native_pmd_val(pmd_t pmd)
1725{
1726 return pmd.pmd;
1727}
1728static inline __attribute__((always_inline)) pudval_t pud_flags(pud_t pud)
1729{
1730 return native_pud_val(pud) & (~((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))));
1731}
1732static inline __attribute__((always_inline)) pmdval_t pmd_flags(pmd_t pmd)
1733{
1734 return native_pmd_val(pmd) & (~((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))));
1735}
1736static inline __attribute__((always_inline)) pte_t native_make_pte(pteval_t val)
1737{
1738 return (pte_t) { .pte = val };
1739}
1740static inline __attribute__((always_inline)) pteval_t native_pte_val(pte_t pte)
1741{
1742 return pte.pte;
1743}
1744static inline __attribute__((always_inline)) pteval_t pte_flags(pte_t pte)
1745{
1746 return native_pte_val(pte) & (~((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))));
1747}
1748typedef struct page *pgtable_t;
1749extern pteval_t __supported_pte_mask;
1750extern void set_nx(void);
1751extern int nx_enabled;
1752extern pgprot_t pgprot_writecombine(pgprot_t prot);
1753struct file;
1754pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
1755 unsigned long size, pgprot_t vma_prot);
1756int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
1757 unsigned long size, pgprot_t *vma_prot);
1758void set_pte_vaddr(unsigned long vaddr, pte_t pte);
1759extern void native_pagetable_reserve(u64 start, u64 end);
1760extern void native_pagetable_setup_start(pgd_t *base);
1761extern void native_pagetable_setup_done(pgd_t *base);
1762struct seq_file;
1763extern void arch_report_meminfo(struct seq_file *m);
1764enum {
1765 PG_LEVEL_NONE,
1766 PG_LEVEL_4K,
1767 PG_LEVEL_2M,
1768 PG_LEVEL_1G,
1769 PG_LEVEL_NUM
1770};
1771extern void update_page_count(int level, unsigned long pages);
1772extern pte_t *lookup_address(unsigned long address, unsigned int *level);
1773struct desc_struct {
1774 union {
1775 struct {
1776 unsigned int a;
1777 unsigned int b;
1778 };
1779 struct {
1780 u16 limit0;
1781 u16 base0;
1782 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
1783 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
1784 };
1785 };
1786} __attribute__((packed));
1787enum {
1788 GATE_INTERRUPT = 0xE,
1789 GATE_TRAP = 0xF,
1790 GATE_CALL = 0xC,
1791 GATE_TASK = 0x5,
1792};
1793struct gate_struct64 {
1794 u16 offset_low;
1795 u16 segment;
1796 unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
1797 u16 offset_middle;
1798 u32 offset_high;
1799 u32 zero1;
1800} __attribute__((packed));
1801enum {
1802 DESC_TSS = 0x9,
1803 DESC_LDT = 0x2,
1804 DESCTYPE_S = 0x10,
1805};
1806struct ldttss_desc64 {
1807 u16 limit0;
1808 u16 base0;
1809 unsigned base1 : 8, type : 5, dpl : 2, p : 1;
1810 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
1811 u32 base3;
1812 u32 zero1;
1813} __attribute__((packed));
1814typedef struct desc_struct gate_desc;
1815typedef struct desc_struct ldt_desc;
1816typedef struct desc_struct tss_desc;
1817struct desc_ptr {
1818 unsigned short size;
1819 unsigned long address;
1820} __attribute__((packed)) ;
1821enum km_type {
1822 KM_BOUNCE_READ,
1823 KM_SKB_SUNRPC_DATA,
1824 KM_SKB_DATA_SOFTIRQ,
1825 KM_USER0,
1826 KM_USER1,
1827 KM_BIO_SRC_IRQ,
1828 KM_BIO_DST_IRQ,
1829 KM_PTE0,
1830 KM_PTE1,
1831 KM_IRQ0,
1832 KM_IRQ1,
1833 KM_SOFTIRQ0,
1834 KM_SOFTIRQ1,
1835 KM_SYNC_ICACHE,
1836 KM_SYNC_DCACHE,
1837 KM_UML_USERCOPY,
1838 KM_IRQ_PTE,
1839 KM_NMI,
1840 KM_NMI_PTE,
1841 KM_KDB,
1842 KM_TYPE_NR
1843};
1844struct page;
1845struct thread_struct;
1846struct desc_ptr;
1847struct tss_struct;
1848struct mm_struct;
1849struct desc_struct;
1850struct task_struct;
1851struct cpumask;
1852struct paravirt_callee_save {
1853 void *func;
1854};
1855struct pv_info {
1856 unsigned int kernel_rpl;
1857 int shared_kernel_pmd;
1858 int paravirt_enabled;
1859 const char *name;
1860};
1861struct pv_init_ops {
1862 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
1863 unsigned long addr, unsigned len);
1864};
1865struct pv_lazy_ops {
1866 void (*enter)(void);
1867 void (*leave)(void);
1868};
1869struct pv_time_ops {
1870 unsigned long long (*sched_clock)(void);
1871 unsigned long (*get_tsc_khz)(void);
1872};
1873struct pv_cpu_ops {
1874 unsigned long (*get_debugreg)(int regno);
1875 void (*set_debugreg)(int regno, unsigned long value);
1876 void (*clts)(void);
1877 unsigned long (*read_cr0)(void);
1878 void (*write_cr0)(unsigned long);
1879 unsigned long (*read_cr4_safe)(void);
1880 unsigned long (*read_cr4)(void);
1881 void (*write_cr4)(unsigned long);
1882 void (*load_tr_desc)(void);
1883 void (*load_gdt)(const struct desc_ptr *);
1884 void (*load_idt)(const struct desc_ptr *);
1885 void (*store_gdt)(struct desc_ptr *);
1886 void (*store_idt)(struct desc_ptr *);
1887 void (*set_ldt)(const void *desc, unsigned entries);
1888 unsigned long (*store_tr)(void);
1889 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
1890 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
1891 const void *desc);
1892 void (*write_gdt_entry)(struct desc_struct *,
1893 int entrynum, const void *desc, int size);
1894 void (*write_idt_entry)(gate_desc *,
1895 int entrynum, const gate_desc *gate);
1896 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
1897 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
1898 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
1899 void (*set_iopl_mask)(unsigned mask);
1900 void (*wbinvd)(void);
1901 void (*io_delay)(void);
1902 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
1903 unsigned int *ecx, unsigned int *edx);
1904 u64 (*read_msr)(unsigned int msr, int *err);
1905 int (*rdmsr_regs)(u32 *regs);
1906 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
1907 int (*wrmsr_regs)(u32 *regs);
1908 u64 (*read_tsc)(void);
1909 u64 (*read_pmc)(int counter);
1910 unsigned long long (*read_tscp)(unsigned int *aux);
1911 void (*irq_enable_sysexit)(void);
1912 void (*usergs_sysret64)(void);
1913 void (*usergs_sysret32)(void);
1914 void (*iret)(void);
1915 void (*swapgs)(void);
1916 void (*start_context_switch)(struct task_struct *prev);
1917 void (*end_context_switch)(struct task_struct *next);
1918};
1919struct pv_irq_ops {
1920 struct paravirt_callee_save save_fl;
1921 struct paravirt_callee_save restore_fl;
1922 struct paravirt_callee_save irq_disable;
1923 struct paravirt_callee_save irq_enable;
1924 void (*safe_halt)(void);
1925 void (*halt)(void);
1926};
1927struct pv_apic_ops {
1928 void (*startup_ipi_hook)(int phys_apicid,
1929 unsigned long start_eip,
1930 unsigned long start_esp);
1931};
1932struct pv_mmu_ops {
1933 unsigned long (*read_cr2)(void);
1934 void (*write_cr2)(unsigned long);
1935 unsigned long (*read_cr3)(void);
1936 void (*write_cr3)(unsigned long);
1937 void (*activate_mm)(struct mm_struct *prev,
1938 struct mm_struct *next);
1939 void (*dup_mmap)(struct mm_struct *oldmm,
1940 struct mm_struct *mm);
1941 void (*exit_mmap)(struct mm_struct *mm);
1942 void (*flush_tlb_user)(void);
1943 void (*flush_tlb_kernel)(void);
1944 void (*flush_tlb_single)(unsigned long addr);
1945 void (*flush_tlb_others)(const struct cpumask *cpus,
1946 struct mm_struct *mm,
1947 unsigned long va);
1948 int (*pgd_alloc)(struct mm_struct *mm);
1949 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
1950 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
1951 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
1952 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
1953 void (*release_pte)(unsigned long pfn);
1954 void (*release_pmd)(unsigned long pfn);
1955 void (*release_pud)(unsigned long pfn);
1956 void (*set_pte)(pte_t *ptep, pte_t pteval);
1957 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
1958 pte_t *ptep, pte_t pteval);
1959 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
1960 void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr,
1961 pmd_t *pmdp, pmd_t pmdval);
1962 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
1963 pte_t *ptep);
1964 void (*pte_update_defer)(struct mm_struct *mm,
1965 unsigned long addr, pte_t *ptep);
1966 void (*pmd_update)(struct mm_struct *mm, unsigned long addr,
1967 pmd_t *pmdp);
1968 void (*pmd_update_defer)(struct mm_struct *mm,
1969 unsigned long addr, pmd_t *pmdp);
1970 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
1971 pte_t *ptep);
1972 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
1973 pte_t *ptep, pte_t pte);
1974 struct paravirt_callee_save pte_val;
1975 struct paravirt_callee_save make_pte;
1976 struct paravirt_callee_save pgd_val;
1977 struct paravirt_callee_save make_pgd;
1978 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
1979 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
1980 pte_t *ptep);
1981 void (*pmd_clear)(pmd_t *pmdp);
1982 void (*set_pud)(pud_t *pudp, pud_t pudval);
1983 struct paravirt_callee_save pmd_val;
1984 struct paravirt_callee_save make_pmd;
1985 struct pv_lazy_ops lazy_mode;
1986 void (*set_fixmap)(unsigned idx,
1987 phys_addr_t phys, pgprot_t flags);
1988};
1989struct arch_spinlock;
1990struct pv_lock_ops {
1991 int (*spin_is_locked)(struct arch_spinlock *lock);
1992 int (*spin_is_contended)(struct arch_spinlock *lock);
1993 void (*spin_lock)(struct arch_spinlock *lock);
1994 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
1995 int (*spin_trylock)(struct arch_spinlock *lock);
1996 void (*spin_unlock)(struct arch_spinlock *lock);
1997};
1998struct paravirt_patch_template {
1999 struct pv_init_ops pv_init_ops;
2000 struct pv_time_ops pv_time_ops;
2001 struct pv_cpu_ops pv_cpu_ops;
2002 struct pv_irq_ops pv_irq_ops;
2003 struct pv_apic_ops pv_apic_ops;
2004 struct pv_mmu_ops pv_mmu_ops;
2005 struct pv_lock_ops pv_lock_ops;
2006};
2007extern struct pv_info pv_info;
2008extern struct pv_init_ops pv_init_ops;
2009extern struct pv_time_ops pv_time_ops;
2010extern struct pv_cpu_ops pv_cpu_ops;
2011extern struct pv_irq_ops pv_irq_ops;
2012extern struct pv_apic_ops pv_apic_ops;
2013extern struct pv_mmu_ops pv_mmu_ops;
2014extern struct pv_lock_ops pv_lock_ops;
2015unsigned paravirt_patch_nop(void);
2016unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
2017unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
2018unsigned paravirt_patch_ignore(unsigned len);
2019unsigned paravirt_patch_call(void *insnbuf,
2020 const void *target, u16 tgt_clobbers,
2021 unsigned long addr, u16 site_clobbers,
2022 unsigned len);
2023unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
2024 unsigned long addr, unsigned len);
2025unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
2026 unsigned long addr, unsigned len);
2027unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
2028 const char *start, const char *end);
2029unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
2030 unsigned long addr, unsigned len);
2031int paravirt_disable_iospace(void);
2032enum paravirt_lazy_mode {
2033 PARAVIRT_LAZY_NONE,
2034 PARAVIRT_LAZY_MMU,
2035 PARAVIRT_LAZY_CPU,
2036};
2037enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
2038void paravirt_start_context_switch(struct task_struct *prev);
2039void paravirt_end_context_switch(struct task_struct *next);
2040void paravirt_enter_lazy_mmu(void);
2041void paravirt_leave_lazy_mmu(void);
2042void _paravirt_nop(void);
2043u32 _paravirt_ident_32(u32);
2044u64 _paravirt_ident_64(u64);
2045struct paravirt_patch_site {
2046 u8 *instr;
2047 u8 instrtype;
2048 u8 len;
2049 u16 clobbers;
2050};
2051extern struct paravirt_patch_site __parainstructions[],
2052 __parainstructions_end[];
2053extern int __bitmap_empty(const unsigned long *bitmap, int bits);
2054extern int __bitmap_full(const unsigned long *bitmap, int bits);
2055extern int __bitmap_equal(const unsigned long *bitmap1,
2056 const unsigned long *bitmap2, int bits);
2057extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
2058 int bits);
2059extern void __bitmap_shift_right(unsigned long *dst,
2060 const unsigned long *src, int shift, int bits);
2061extern void __bitmap_shift_left(unsigned long *dst,
2062 const unsigned long *src, int shift, int bits);
2063extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
2064 const unsigned long *bitmap2, int bits);
2065extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
2066 const unsigned long *bitmap2, int bits);
2067extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
2068 const unsigned long *bitmap2, int bits);
2069extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
2070 const unsigned long *bitmap2, int bits);
2071extern int __bitmap_intersects(const unsigned long *bitmap1,
2072 const unsigned long *bitmap2, int bits);
2073extern int __bitmap_subset(const unsigned long *bitmap1,
2074 const unsigned long *bitmap2, int bits);
2075extern int __bitmap_weight(const unsigned long *bitmap, int bits);
2076extern void bitmap_set(unsigned long *map, int i, int len);
2077extern void bitmap_clear(unsigned long *map, int start, int nr);
2078extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
2079 unsigned long size,
2080 unsigned long start,
2081 unsigned int nr,
2082 unsigned long align_mask);
2083extern int bitmap_scnprintf(char *buf, unsigned int len,
2084 const unsigned long *src, int nbits);
2085extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
2086 unsigned long *dst, int nbits);
2087extern int bitmap_parse_user(const char *ubuf, unsigned int ulen,
2088 unsigned long *dst, int nbits);
2089extern int bitmap_scnlistprintf(char *buf, unsigned int len,
2090 const unsigned long *src, int nbits);
2091extern int bitmap_parselist(const char *buf, unsigned long *maskp,
2092 int nmaskbits);
2093extern int bitmap_parselist_user(const char *ubuf, unsigned int ulen,
2094 unsigned long *dst, int nbits);
2095extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
2096 const unsigned long *old, const unsigned long *new, int bits);
2097extern int bitmap_bitremap(int oldbit,
2098 const unsigned long *old, const unsigned long *new, int bits);
2099extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
2100 const unsigned long *relmap, int bits);
2101extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
2102 int sz, int bits);
2103extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
2104extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
2105extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
2106extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
2107static inline __attribute__((always_inline)) void bitmap_zero(unsigned long *dst, int nbits)
2108{
2109 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 159, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2110 *dst = 0UL;
2111 else {
2112 int len = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(unsigned long);
2113 __builtin_memset(dst, 0, len);
2114 }
2115}
2116static inline __attribute__((always_inline)) void bitmap_fill(unsigned long *dst, int nbits)
2117{
2118 size_t nlongs = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)));
2119 if (__builtin_constant_p(((!(__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!((!(__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 170, }; ______r = !!((!(__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; })) {
2120 int len = (nlongs - 1) * sizeof(unsigned long);
2121 __builtin_memset(dst, 0xff, len);
2122 }
2123 dst[nlongs - 1] = ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL );
2124}
2125static inline __attribute__((always_inline)) void bitmap_copy(unsigned long *dst, const unsigned long *src,
2126 int nbits)
2127{
2128 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 180, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2129 *dst = *src;
2130 else {
2131 int len = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(unsigned long);
2132 __builtin_memcpy(dst, src, len);
2133 }
2134}
2135static inline __attribute__((always_inline)) int bitmap_and(unsigned long *dst, const unsigned long *src1,
2136 const unsigned long *src2, int nbits)
2137{
2138 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 191, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2139 return (*dst = *src1 & *src2) != 0;
2140 return __bitmap_and(dst, src1, src2, nbits);
2141}
2142static inline __attribute__((always_inline)) void bitmap_or(unsigned long *dst, const unsigned long *src1,
2143 const unsigned long *src2, int nbits)
2144{
2145 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 199, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2146 *dst = *src1 | *src2;
2147 else
2148 __bitmap_or(dst, src1, src2, nbits);
2149}
2150static inline __attribute__((always_inline)) void bitmap_xor(unsigned long *dst, const unsigned long *src1,
2151 const unsigned long *src2, int nbits)
2152{
2153 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 208, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2154 *dst = *src1 ^ *src2;
2155 else
2156 __bitmap_xor(dst, src1, src2, nbits);
2157}
2158static inline __attribute__((always_inline)) int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
2159 const unsigned long *src2, int nbits)
2160{
2161 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 217, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2162 return (*dst = *src1 & ~(*src2)) != 0;
2163 return __bitmap_andnot(dst, src1, src2, nbits);
2164}
2165static inline __attribute__((always_inline)) void bitmap_complement(unsigned long *dst, const unsigned long *src,
2166 int nbits)
2167{
2168 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 225, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2169 *dst = ~(*src) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL );
2170 else
2171 __bitmap_complement(dst, src, nbits);
2172}
2173static inline __attribute__((always_inline)) int bitmap_equal(const unsigned long *src1,
2174 const unsigned long *src2, int nbits)
2175{
2176 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 234, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2177 return ! ((*src1 ^ *src2) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
2178 else
2179 return __bitmap_equal(src1, src2, nbits);
2180}
2181static inline __attribute__((always_inline)) int bitmap_intersects(const unsigned long *src1,
2182 const unsigned long *src2, int nbits)
2183{
2184 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 243, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2185 return ((*src1 & *src2) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )) != 0;
2186 else
2187 return __bitmap_intersects(src1, src2, nbits);
2188}
2189static inline __attribute__((always_inline)) int bitmap_subset(const unsigned long *src1,
2190 const unsigned long *src2, int nbits)
2191{
2192 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 252, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2193 return ! ((*src1 & ~(*src2)) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
2194 else
2195 return __bitmap_subset(src1, src2, nbits);
2196}
2197static inline __attribute__((always_inline)) int bitmap_empty(const unsigned long *src, int nbits)
2198{
2199 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 260, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2200 return ! (*src & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
2201 else
2202 return __bitmap_empty(src, nbits);
2203}
2204static inline __attribute__((always_inline)) int bitmap_full(const unsigned long *src, int nbits)
2205{
2206 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 268, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2207 return ! (~(*src) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
2208 else
2209 return __bitmap_full(src, nbits);
2210}
2211static inline __attribute__((always_inline)) int bitmap_weight(const unsigned long *src, int nbits)
2212{
2213 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 276, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2214 return hweight_long(*src & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
2215 return __bitmap_weight(src, nbits);
2216}
2217static inline __attribute__((always_inline)) void bitmap_shift_right(unsigned long *dst,
2218 const unsigned long *src, int n, int nbits)
2219{
2220 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 284, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2221 *dst = *src >> n;
2222 else
2223 __bitmap_shift_right(dst, src, n, nbits);
2224}
2225static inline __attribute__((always_inline)) void bitmap_shift_left(unsigned long *dst,
2226 const unsigned long *src, int n, int nbits)
2227{
2228 if (__builtin_constant_p((((__builtin_constant_p(nbits) && (nbits) <= 32)))) ? !!(((__builtin_constant_p(nbits) && (nbits) <= 32))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bitmap.h", .line = 293, }; ______r = !!(((__builtin_constant_p(nbits) && (nbits) <= 32))); ______f.miss_hit[______r]++; ______r; }))
2229 *dst = (*src << n) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL );
2230 else
2231 __bitmap_shift_left(dst, src, n, nbits);
2232}
2233static inline __attribute__((always_inline)) int bitmap_parse(const char *buf, unsigned int buflen,
2234 unsigned long *maskp, int nmaskbits)
2235{
2236 return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
2237}
2238typedef struct cpumask { unsigned long bits[(((8) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; } cpumask_t;
2239extern int nr_cpu_ids;
2240extern const struct cpumask *const cpu_possible_mask;
2241extern const struct cpumask *const cpu_online_mask;
2242extern const struct cpumask *const cpu_present_mask;
2243extern const struct cpumask *const cpu_active_mask;
2244static inline __attribute__((always_inline)) unsigned int cpumask_check(unsigned int cpu)
2245{
2246 return cpu;
2247}
2248static inline __attribute__((always_inline)) unsigned int cpumask_first(const struct cpumask *srcp)
2249{
2250 return find_first_bit(((srcp)->bits), 8);
2251}
2252static inline __attribute__((always_inline)) unsigned int cpumask_next(int n, const struct cpumask *srcp)
2253{
2254 if (__builtin_constant_p(((n != -1))) ? !!((n != -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/cpumask.h", .line = 172, }; ______r = !!((n != -1)); ______f.miss_hit[______r]++; ______r; }))
2255 cpumask_check(n);
2256 return find_next_bit(((srcp)->bits), 8, n+1);
2257}
2258static inline __attribute__((always_inline)) unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
2259{
2260 if (__builtin_constant_p(((n != -1))) ? !!((n != -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/cpumask.h", .line = 187, }; ______r = !!((n != -1)); ______f.miss_hit[______r]++; ______r; }))
2261 cpumask_check(n);
2262 return find_next_zero_bit(((srcp)->bits), 8, n+1);
2263}
2264int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
2265int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
2266static inline __attribute__((always_inline)) void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
2267{
2268 set_bit(cpumask_check(cpu), ((dstp)->bits));
2269}
2270static inline __attribute__((always_inline)) void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
2271{
2272 clear_bit(cpumask_check(cpu), ((dstp)->bits));
2273}
2274static inline __attribute__((always_inline)) int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
2275{
2276 return test_and_set_bit(cpumask_check(cpu), ((cpumask)->bits));
2277}
2278static inline __attribute__((always_inline)) int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
2279{
2280 return test_and_clear_bit(cpumask_check(cpu), ((cpumask)->bits));
2281}
2282static inline __attribute__((always_inline)) void cpumask_setall(struct cpumask *dstp)
2283{
2284 bitmap_fill(((dstp)->bits), 8);
2285}
2286static inline __attribute__((always_inline)) void cpumask_clear(struct cpumask *dstp)
2287{
2288 bitmap_zero(((dstp)->bits), 8);
2289}
2290static inline __attribute__((always_inline)) int cpumask_and(struct cpumask *dstp,
2291 const struct cpumask *src1p,
2292 const struct cpumask *src2p)
2293{
2294 return bitmap_and(((dstp)->bits), ((src1p)->bits),
2295 ((src2p)->bits), 8);
2296}
2297static inline __attribute__((always_inline)) void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
2298 const struct cpumask *src2p)
2299{
2300 bitmap_or(((dstp)->bits), ((src1p)->bits),
2301 ((src2p)->bits), 8);
2302}
2303static inline __attribute__((always_inline)) void cpumask_xor(struct cpumask *dstp,
2304 const struct cpumask *src1p,
2305 const struct cpumask *src2p)
2306{
2307 bitmap_xor(((dstp)->bits), ((src1p)->bits),
2308 ((src2p)->bits), 8);
2309}
2310static inline __attribute__((always_inline)) int cpumask_andnot(struct cpumask *dstp,
2311 const struct cpumask *src1p,
2312 const struct cpumask *src2p)
2313{
2314 return bitmap_andnot(((dstp)->bits), ((src1p)->bits),
2315 ((src2p)->bits), 8);
2316}
2317static inline __attribute__((always_inline)) void cpumask_complement(struct cpumask *dstp,
2318 const struct cpumask *srcp)
2319{
2320 bitmap_complement(((dstp)->bits), ((srcp)->bits),
2321 8);
2322}
2323static inline __attribute__((always_inline)) bool cpumask_equal(const struct cpumask *src1p,
2324 const struct cpumask *src2p)
2325{
2326 return bitmap_equal(((src1p)->bits), ((src2p)->bits),
2327 8);
2328}
2329static inline __attribute__((always_inline)) bool cpumask_intersects(const struct cpumask *src1p,
2330 const struct cpumask *src2p)
2331{
2332 return bitmap_intersects(((src1p)->bits), ((src2p)->bits),
2333 8);
2334}
2335static inline __attribute__((always_inline)) int cpumask_subset(const struct cpumask *src1p,
2336 const struct cpumask *src2p)
2337{
2338 return bitmap_subset(((src1p)->bits), ((src2p)->bits),
2339 8);
2340}
2341static inline __attribute__((always_inline)) bool cpumask_empty(const struct cpumask *srcp)
2342{
2343 return bitmap_empty(((srcp)->bits), 8);
2344}
2345static inline __attribute__((always_inline)) bool cpumask_full(const struct cpumask *srcp)
2346{
2347 return bitmap_full(((srcp)->bits), 8);
2348}
2349static inline __attribute__((always_inline)) unsigned int cpumask_weight(const struct cpumask *srcp)
2350{
2351 return bitmap_weight(((srcp)->bits), 8);
2352}
2353static inline __attribute__((always_inline)) void cpumask_shift_right(struct cpumask *dstp,
2354 const struct cpumask *srcp, int n)
2355{
2356 bitmap_shift_right(((dstp)->bits), ((srcp)->bits), n,
2357 8);
2358}
2359static inline __attribute__((always_inline)) void cpumask_shift_left(struct cpumask *dstp,
2360 const struct cpumask *srcp, int n)
2361{
2362 bitmap_shift_left(((dstp)->bits), ((srcp)->bits), n,
2363 8);
2364}
2365static inline __attribute__((always_inline)) void cpumask_copy(struct cpumask *dstp,
2366 const struct cpumask *srcp)
2367{
2368 bitmap_copy(((dstp)->bits), ((srcp)->bits), 8);
2369}
2370static inline __attribute__((always_inline)) int cpumask_scnprintf(char *buf, int len,
2371 const struct cpumask *srcp)
2372{
2373 return bitmap_scnprintf(buf, len, ((srcp)->bits), 8);
2374}
2375static inline __attribute__((always_inline)) int cpumask_parse_user(const char *buf, int len,
2376 struct cpumask *dstp)
2377{
2378 return bitmap_parse_user(buf, len, ((dstp)->bits), 8);
2379}
2380static inline __attribute__((always_inline)) int cpumask_parselist_user(const char *buf, int len,
2381 struct cpumask *dstp)
2382{
2383 return bitmap_parselist_user(buf, len, ((dstp)->bits),
2384 8);
2385}
2386static inline __attribute__((always_inline)) int cpulist_scnprintf(char *buf, int len,
2387 const struct cpumask *srcp)
2388{
2389 return bitmap_scnlistprintf(buf, len, ((srcp)->bits),
2390 8);
2391}
2392static inline __attribute__((always_inline)) int cpulist_parse(const char *buf, struct cpumask *dstp)
2393{
2394 return bitmap_parselist(buf, ((dstp)->bits), 8);
2395}
2396static inline __attribute__((always_inline)) size_t cpumask_size(void)
2397{
2398 return (((8) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(long);
2399}
2400typedef struct cpumask cpumask_var_t[1];
2401static inline __attribute__((always_inline)) bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
2402{
2403 return true;
2404}
2405static inline __attribute__((always_inline)) bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
2406 int node)
2407{
2408 return true;
2409}
2410static inline __attribute__((always_inline)) bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
2411{
2412 cpumask_clear(*mask);
2413 return true;
2414}
2415static inline __attribute__((always_inline)) bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
2416 int node)
2417{
2418 cpumask_clear(*mask);
2419 return true;
2420}
2421static inline __attribute__((always_inline)) void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
2422{
2423}
2424static inline __attribute__((always_inline)) void free_cpumask_var(cpumask_var_t mask)
2425{
2426}
2427static inline __attribute__((always_inline)) void free_bootmem_cpumask_var(cpumask_var_t mask)
2428{
2429}
2430extern const unsigned long cpu_all_bits[(((8) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))];
2431void set_cpu_possible(unsigned int cpu, bool possible);
2432void set_cpu_present(unsigned int cpu, bool present);
2433void set_cpu_online(unsigned int cpu, bool online);
2434void set_cpu_active(unsigned int cpu, bool active);
2435void init_cpu_present(const struct cpumask *src);
2436void init_cpu_possible(const struct cpumask *src);
2437void init_cpu_online(const struct cpumask *src);
2438static inline __attribute__((always_inline)) int __check_is_bitmap(const unsigned long *bitmap)
2439{
2440 return 1;
2441}
2442extern const unsigned long
2443 cpu_bit_bitmap[32 +1][(((8) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))];
2444static inline __attribute__((always_inline)) const struct cpumask *get_cpu_mask(unsigned int cpu)
2445{
2446 const unsigned long *p = cpu_bit_bitmap[1 + cpu % 32];
2447 p -= cpu / 32;
2448 return ((struct cpumask *)(1 ? (p) : (void *)sizeof(__check_is_bitmap(p))));
2449}
2450int __first_cpu(const cpumask_t *srcp);
2451int __next_cpu(int n, const cpumask_t *srcp);
2452int __any_online_cpu(const cpumask_t *mask);
2453static inline __attribute__((always_inline)) void __cpu_set(int cpu, volatile cpumask_t *dstp)
2454{
2455 set_bit(cpu, dstp->bits);
2456}
2457static inline __attribute__((always_inline)) void __cpu_clear(int cpu, volatile cpumask_t *dstp)
2458{
2459 clear_bit(cpu, dstp->bits);
2460}
2461static inline __attribute__((always_inline)) void __cpus_setall(cpumask_t *dstp, int nbits)
2462{
2463 bitmap_fill(dstp->bits, nbits);
2464}
2465static inline __attribute__((always_inline)) void __cpus_clear(cpumask_t *dstp, int nbits)
2466{
2467 bitmap_zero(dstp->bits, nbits);
2468}
2469static inline __attribute__((always_inline)) int __cpu_test_and_set(int cpu, cpumask_t *addr)
2470{
2471 return test_and_set_bit(cpu, addr->bits);
2472}
2473static inline __attribute__((always_inline)) int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
2474 const cpumask_t *src2p, int nbits)
2475{
2476 return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
2477}
2478static inline __attribute__((always_inline)) void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
2479 const cpumask_t *src2p, int nbits)
2480{
2481 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
2482}
2483static inline __attribute__((always_inline)) void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
2484 const cpumask_t *src2p, int nbits)
2485{
2486 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
2487}
2488static inline __attribute__((always_inline)) int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
2489 const cpumask_t *src2p, int nbits)
2490{
2491 return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
2492}
2493static inline __attribute__((always_inline)) int __cpus_equal(const cpumask_t *src1p,
2494 const cpumask_t *src2p, int nbits)
2495{
2496 return bitmap_equal(src1p->bits, src2p->bits, nbits);
2497}
2498static inline __attribute__((always_inline)) int __cpus_intersects(const cpumask_t *src1p,
2499 const cpumask_t *src2p, int nbits)
2500{
2501 return bitmap_intersects(src1p->bits, src2p->bits, nbits);
2502}
2503static inline __attribute__((always_inline)) int __cpus_subset(const cpumask_t *src1p,
2504 const cpumask_t *src2p, int nbits)
2505{
2506 return bitmap_subset(src1p->bits, src2p->bits, nbits);
2507}
2508static inline __attribute__((always_inline)) int __cpus_empty(const cpumask_t *srcp, int nbits)
2509{
2510 return bitmap_empty(srcp->bits, nbits);
2511}
2512static inline __attribute__((always_inline)) int __cpus_weight(const cpumask_t *srcp, int nbits)
2513{
2514 return bitmap_weight(srcp->bits, nbits);
2515}
2516static inline __attribute__((always_inline)) void __cpus_shift_left(cpumask_t *dstp,
2517 const cpumask_t *srcp, int n, int nbits)
2518{
2519 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
2520}
2521static inline __attribute__((always_inline)) int paravirt_enabled(void)
2522{
2523 return pv_info.paravirt_enabled;
2524}
2525static inline __attribute__((always_inline)) void load_sp0(struct tss_struct *tss,
2526 struct thread_struct *thread)
2527{
2528 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.load_sp0); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.load_sp0) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.load_sp0)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(tss)), "d" ((unsigned long)(thread)) : "memory", "cc" ); });
2529}
2530static inline __attribute__((always_inline)) void __cpuid(unsigned int *eax, unsigned int *ebx,
2531 unsigned int *ecx, unsigned int *edx)
2532{
2533 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.cpuid); asm volatile("push %[_arg4];" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "lea 4(%%esp),%%esp;" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.cpuid) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.cpuid)), [paravirt_clobber] "i" (((1 << 4) - 1)), "0" ((u32)(eax)), "1" ((u32)(ebx)), "2" ((u32)(ecx)), [_arg4] "mr" ((u32)(edx)) : "memory", "cc" ); });
2534}
2535static inline __attribute__((always_inline)) unsigned long paravirt_get_debugreg(int reg)
2536{
2537 return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.get_debugreg); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 39, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.get_debugreg) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.get_debugreg)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(reg)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.get_debugreg) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.get_debugreg)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(reg)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; });
2538}
2539static inline __attribute__((always_inline)) void set_debugreg(unsigned long val, int reg)
2540{
2541 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.set_debugreg); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.set_debugreg) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.set_debugreg)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(reg)), "d" ((unsigned long)(val)) : "memory", "cc" ); });
2542}
2543static inline __attribute__((always_inline)) void clts(void)
2544{
2545 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.clts); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.clts) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.clts)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); });
2546}
2547static inline __attribute__((always_inline)) unsigned long read_cr0(void)
2548{
2549 return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_cr0); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 54, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_cr0) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_cr0)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_cr0) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_cr0)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; });
2550}
2551static inline __attribute__((always_inline)) void write_cr0(unsigned long x)
2552{
2553 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.write_cr0); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_cr0) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_cr0)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(x)) : "memory", "cc" ); });
2554}
2555static inline __attribute__((always_inline)) unsigned long read_cr2(void)
2556{
2557 return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.read_cr2); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 64, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.read_cr2) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.read_cr2)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.read_cr2) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.read_cr2)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; });
2558}
2559static inline __attribute__((always_inline)) void write_cr2(unsigned long x)
2560{
2561 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.write_cr2); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.write_cr2) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.write_cr2)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(x)) : "memory", "cc" ); });
2562}
2563static inline __attribute__((always_inline)) unsigned long read_cr3(void)
2564{
2565 return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.read_cr3); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 74, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.read_cr3) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.read_cr3)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.read_cr3) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.read_cr3)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; });
2566}
2567static inline __attribute__((always_inline)) void write_cr3(unsigned long x)
2568{
2569 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.write_cr3); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.write_cr3) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.write_cr3)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(x)) : "memory", "cc" ); });
2570}
2571static inline __attribute__((always_inline)) unsigned long read_cr4(void)
2572{
2573 return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_cr4); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 84, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_cr4) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_cr4)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_cr4) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_cr4)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; });
2574}
2575static inline __attribute__((always_inline)) unsigned long read_cr4_safe(void)
2576{
2577 return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_cr4_safe); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 88, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_cr4_safe) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_cr4_safe)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_cr4_safe) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_cr4_safe)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; });
2578}
2579static inline __attribute__((always_inline)) void write_cr4(unsigned long x)
2580{
2581 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.write_cr4); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_cr4) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_cr4)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(x)) : "memory", "cc" ); });
2582}
2583static inline __attribute__((always_inline)) void arch_safe_halt(void)
2584{
2585 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_irq_ops.safe_halt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.safe_halt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.safe_halt)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); });
2586}
2587static inline __attribute__((always_inline)) void halt(void)
2588{
2589 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_irq_ops.halt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.halt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.halt)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); });
2590}
2591static inline __attribute__((always_inline)) void wbinvd(void)
2592{
2593 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.wbinvd); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.wbinvd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.wbinvd)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); });
2594}
2595static inline __attribute__((always_inline)) u64 paravirt_read_msr(unsigned msr, int *err)
2596{
2597 return ({ u64 __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_msr); if (__builtin_constant_p(((sizeof(u64) > sizeof(unsigned long)))) ? !!((sizeof(u64) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 127, }; ______r = !!((sizeof(u64) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_msr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_msr)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(msr)), "d" ((unsigned long)(err)) : "memory", "cc" ); __ret = (u64)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_msr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_msr)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(msr)), "d" ((unsigned long)(err)) : "memory", "cc" ); __ret = (u64)__eax; } __ret; });
2598}
2599static inline __attribute__((always_inline)) int paravirt_rdmsr_regs(u32 *regs)
2600{
2601 return ({ int __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.rdmsr_regs); if (__builtin_constant_p(((sizeof(int) > sizeof(unsigned long)))) ? !!((sizeof(int) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 132, }; ______r = !!((sizeof(int) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.rdmsr_regs) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.rdmsr_regs)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(regs)) : "memory", "cc" ); __ret = (int)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.rdmsr_regs) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.rdmsr_regs)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(regs)) : "memory", "cc" ); __ret = (int)__eax; } __ret; });
2602}
2603static inline __attribute__((always_inline)) int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
2604{
2605 return ({ int __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.write_msr); if (__builtin_constant_p(((sizeof(int) > sizeof(unsigned long)))) ? !!((sizeof(int) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 137, }; ______r = !!((sizeof(int) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_msr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_msr)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(msr)), "d" ((unsigned long)(low)), "c" ((unsigned long)(high)) : "memory", "cc" ); __ret = (int)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_msr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_msr)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(msr)), "d" ((unsigned long)(low)), "c" ((unsigned long)(high)) : "memory", "cc" ); __ret = (int)__eax; } __ret; });
2606}
2607static inline __attribute__((always_inline)) int paravirt_wrmsr_regs(u32 *regs)
2608{
2609 return ({ int __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.wrmsr_regs); if (__builtin_constant_p(((sizeof(int) > sizeof(unsigned long)))) ? !!((sizeof(int) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 142, }; ______r = !!((sizeof(int) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.wrmsr_regs) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.wrmsr_regs)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(regs)) : "memory", "cc" ); __ret = (int)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.wrmsr_regs) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.wrmsr_regs)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(regs)) : "memory", "cc" ); __ret = (int)__eax; } __ret; });
2610}
2611static inline __attribute__((always_inline)) int rdmsrl_safe(unsigned msr, unsigned long long *p)
2612{
2613 int err;
2614 *p = paravirt_read_msr(msr, &err);
2615 return err;
2616}
2617static inline __attribute__((always_inline)) int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
2618{
2619 u32 gprs[8] = { 0 };
2620 int err;
2621 gprs[1] = msr;
2622 gprs[7] = 0x9c5a203a;
2623 err = paravirt_rdmsr_regs(gprs);
2624 *p = gprs[0] | ((u64)gprs[2] << 32);
2625 return err;
2626}
2627static inline __attribute__((always_inline)) int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
2628{
2629 u32 gprs[8] = { 0 };
2630 gprs[0] = (u32)val;
2631 gprs[1] = msr;
2632 gprs[2] = val >> 32;
2633 gprs[7] = 0x9c5a203a;
2634 return paravirt_wrmsr_regs(gprs);
2635}
2636static inline __attribute__((always_inline)) u64 paravirt_read_tsc(void)
2637{
2638 return ({ u64 __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_tsc); if (__builtin_constant_p(((sizeof(u64) > sizeof(unsigned long)))) ? !!((sizeof(u64) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 217, }; ______r = !!((sizeof(u64) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_tsc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_tsc)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (u64)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_tsc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_tsc)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (u64)__eax; } __ret; });
2639}
2640static inline __attribute__((always_inline)) unsigned long long paravirt_sched_clock(void)
2641{
2642 return ({ unsigned long long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_time_ops.sched_clock); if (__builtin_constant_p(((sizeof(unsigned long long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 230, }; ______r = !!((sizeof(unsigned long long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_time_ops.sched_clock) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_time_ops.sched_clock)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_time_ops.sched_clock) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_time_ops.sched_clock)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long long)__eax; } __ret; });
2643}
2644static inline __attribute__((always_inline)) unsigned long long paravirt_read_pmc(int counter)
2645{
2646 return ({ u64 __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_pmc); if (__builtin_constant_p(((sizeof(u64) > sizeof(unsigned long)))) ? !!((sizeof(u64) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 235, }; ______r = !!((sizeof(u64) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_pmc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_pmc)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(counter)) : "memory", "cc" ); __ret = (u64)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_pmc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_pmc)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(counter)) : "memory", "cc" ); __ret = (u64)__eax; } __ret; });
2647}
2648static inline __attribute__((always_inline)) unsigned long long paravirt_rdtscp(unsigned int *aux)
2649{
2650 return ({ u64 __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.read_tscp); if (__builtin_constant_p(((sizeof(u64) > sizeof(unsigned long)))) ? !!((sizeof(u64) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 247, }; ______r = !!((sizeof(u64) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_tscp) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_tscp)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(aux)) : "memory", "cc" ); __ret = (u64)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.read_tscp) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.read_tscp)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(aux)) : "memory", "cc" ); __ret = (u64)__eax; } __ret; });
2651}
2652static inline __attribute__((always_inline)) void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
2653{
2654 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.alloc_ldt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.alloc_ldt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.alloc_ldt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(ldt)), "d" ((unsigned long)(entries)) : "memory", "cc" ); });
2655}
2656static inline __attribute__((always_inline)) void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
2657{
2658 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.free_ldt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.free_ldt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.free_ldt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(ldt)), "d" ((unsigned long)(entries)) : "memory", "cc" ); });
2659}
2660static inline __attribute__((always_inline)) void load_TR_desc(void)
2661{
2662 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.load_tr_desc); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.load_tr_desc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.load_tr_desc)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); });
2663}
2664static inline __attribute__((always_inline)) void load_gdt(const struct desc_ptr *dtr)
2665{
2666 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.load_gdt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.load_gdt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.load_gdt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(dtr)) : "memory", "cc" ); });
2667}
2668static inline __attribute__((always_inline)) void load_idt(const struct desc_ptr *dtr)
2669{
2670 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.load_idt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.load_idt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.load_idt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(dtr)) : "memory", "cc" ); });
2671}
2672static inline __attribute__((always_inline)) void set_ldt(const void *addr, unsigned entries)
2673{
2674 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.set_ldt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.set_ldt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.set_ldt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(addr)), "d" ((unsigned long)(entries)) : "memory", "cc" ); });
2675}
2676static inline __attribute__((always_inline)) void store_gdt(struct desc_ptr *dtr)
2677{
2678 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.store_gdt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.store_gdt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.store_gdt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(dtr)) : "memory", "cc" ); });
2679}
2680static inline __attribute__((always_inline)) void store_idt(struct desc_ptr *dtr)
2681{
2682 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.store_idt); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.store_idt) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.store_idt)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(dtr)) : "memory", "cc" ); });
2683}
2684static inline __attribute__((always_inline)) unsigned long paravirt_store_tr(void)
2685{
2686 return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.store_tr); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 302, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.store_tr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.store_tr)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.store_tr) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.store_tr)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; });
2687}
2688static inline __attribute__((always_inline)) void load_TLS(struct thread_struct *t, unsigned cpu)
2689{
2690 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.load_tls); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.load_tls) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.load_tls)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(t)), "d" ((unsigned long)(cpu)) : "memory", "cc" ); });
2691}
2692static inline __attribute__((always_inline)) void write_ldt_entry(struct desc_struct *dt, int entry,
2693 const void *desc)
2694{
2695 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.write_ldt_entry); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_ldt_entry) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_ldt_entry)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(dt)), "d" ((unsigned long)(entry)), "c" ((unsigned long)(desc)) : "memory", "cc" ); });
2696}
2697static inline __attribute__((always_inline)) void write_gdt_entry(struct desc_struct *dt, int entry,
2698 void *desc, int type)
2699{
2700 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.write_gdt_entry); asm volatile("push %[_arg4];" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "lea 4(%%esp),%%esp;" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_gdt_entry) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_gdt_entry)), [paravirt_clobber] "i" (((1 << 4) - 1)), "0" ((u32)(dt)), "1" ((u32)(entry)), "2" ((u32)(desc)), [_arg4] "mr" ((u32)(type)) : "memory", "cc" ); });
2701}
2702static inline __attribute__((always_inline)) void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
2703{
2704 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.write_idt_entry); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.write_idt_entry) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.write_idt_entry)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(dt)), "d" ((unsigned long)(entry)), "c" ((unsigned long)(g)) : "memory", "cc" ); });
2705}
2706static inline __attribute__((always_inline)) void set_iopl_mask(unsigned mask)
2707{
2708 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.set_iopl_mask); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.set_iopl_mask) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.set_iopl_mask)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mask)) : "memory", "cc" ); });
2709}
2710static inline __attribute__((always_inline)) void slow_down_io(void)
2711{
2712 pv_cpu_ops.io_delay();
2713}
2714static inline __attribute__((always_inline)) void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
2715 unsigned long start_esp)
2716{
2717 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_apic_ops.startup_ipi_hook); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_apic_ops.startup_ipi_hook) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_apic_ops.startup_ipi_hook)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(phys_apicid)), "d" ((unsigned long)(start_eip)), "c" ((unsigned long)(start_esp)) : "memory", "cc" ); })
2718 ;
2719}
2720static inline __attribute__((always_inline)) void paravirt_activate_mm(struct mm_struct *prev,
2721 struct mm_struct *next)
2722{
2723 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.activate_mm); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.activate_mm) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.activate_mm)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(prev)), "d" ((unsigned long)(next)) : "memory", "cc" ); });
2724}
2725static inline __attribute__((always_inline)) void arch_dup_mmap(struct mm_struct *oldmm,
2726 struct mm_struct *mm)
2727{
2728 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.dup_mmap); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.dup_mmap) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.dup_mmap)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(oldmm)), "d" ((unsigned long)(mm)) : "memory", "cc" ); });
2729}
2730static inline __attribute__((always_inline)) void arch_exit_mmap(struct mm_struct *mm)
2731{
2732 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.exit_mmap); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.exit_mmap) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.exit_mmap)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)) : "memory", "cc" ); });
2733}
2734static inline __attribute__((always_inline)) void __flush_tlb(void)
2735{
2736 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.flush_tlb_user); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.flush_tlb_user) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.flush_tlb_user)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); });
2737}
2738static inline __attribute__((always_inline)) void __flush_tlb_global(void)
2739{
2740 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.flush_tlb_kernel); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.flush_tlb_kernel) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.flush_tlb_kernel)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); });
2741}
2742static inline __attribute__((always_inline)) void __flush_tlb_single(unsigned long addr)
2743{
2744 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.flush_tlb_single); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.flush_tlb_single) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.flush_tlb_single)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(addr)) : "memory", "cc" ); });
2745}
2746static inline __attribute__((always_inline)) void flush_tlb_others(const struct cpumask *cpumask,
2747 struct mm_struct *mm,
2748 unsigned long va)
2749{
2750 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.flush_tlb_others); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.flush_tlb_others) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.flush_tlb_others)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(cpumask)), "d" ((unsigned long)(mm)), "c" ((unsigned long)(va)) : "memory", "cc" ); });
2751}
2752static inline __attribute__((always_inline)) int paravirt_pgd_alloc(struct mm_struct *mm)
2753{
2754 return ({ int __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pgd_alloc); if (__builtin_constant_p(((sizeof(int) > sizeof(unsigned long)))) ? !!((sizeof(int) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 397, }; ______r = !!((sizeof(int) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_alloc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_alloc)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)) : "memory", "cc" ); __ret = (int)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_alloc) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_alloc)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)) : "memory", "cc" ); __ret = (int)__eax; } __ret; });
2755}
2756static inline __attribute__((always_inline)) void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
2757{
2758 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pgd_free); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_free) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_free)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(pgd)) : "memory", "cc" ); });
2759}
2760static inline __attribute__((always_inline)) void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
2761{
2762 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.alloc_pte); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.alloc_pte) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.alloc_pte)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(pfn)) : "memory", "cc" ); });
2763}
2764static inline __attribute__((always_inline)) void paravirt_release_pte(unsigned long pfn)
2765{
2766 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.release_pte); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.release_pte) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.release_pte)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pfn)) : "memory", "cc" ); });
2767}
2768static inline __attribute__((always_inline)) void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
2769{
2770 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.alloc_pmd); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.alloc_pmd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.alloc_pmd)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(pfn)) : "memory", "cc" ); });
2771}
2772static inline __attribute__((always_inline)) void paravirt_release_pmd(unsigned long pfn)
2773{
2774 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.release_pmd); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.release_pmd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.release_pmd)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pfn)) : "memory", "cc" ); });
2775}
2776static inline __attribute__((always_inline)) void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
2777{
2778 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.alloc_pud); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.alloc_pud) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.alloc_pud)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(pfn)) : "memory", "cc" ); });
2779}
2780static inline __attribute__((always_inline)) void paravirt_release_pud(unsigned long pfn)
2781{
2782 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.release_pud); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.release_pud) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.release_pud)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pfn)) : "memory", "cc" ); });
2783}
2784static inline __attribute__((always_inline)) void pte_update(struct mm_struct *mm, unsigned long addr,
2785 pte_t *ptep)
2786{
2787 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pte_update); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_update) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_update)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(ptep)) : "memory", "cc" ); });
2788}
2789static inline __attribute__((always_inline)) void pmd_update(struct mm_struct *mm, unsigned long addr,
2790 pmd_t *pmdp)
2791{
2792 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pmd_update); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_update) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_update)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(pmdp)) : "memory", "cc" ); });
2793}
2794static inline __attribute__((always_inline)) void pte_update_defer(struct mm_struct *mm, unsigned long addr,
2795 pte_t *ptep)
2796{
2797 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pte_update_defer); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_update_defer) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_update_defer)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(ptep)) : "memory", "cc" ); });
2798}
2799static inline __attribute__((always_inline)) void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
2800 pmd_t *pmdp)
2801{
2802 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pmd_update_defer); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_update_defer) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_update_defer)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(pmdp)) : "memory", "cc" ); });
2803}
2804static inline __attribute__((always_inline)) pte_t __pte(pteval_t val)
2805{
2806 pteval_t ret;
2807 if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(long)))) ? !!((sizeof(pteval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 460, }; ______r = !!((sizeof(pteval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2808 ret = ({ pteval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.make_pte.func); if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(unsigned long)))) ? !!((sizeof(pteval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2809 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2810 , .line =
2811 463
2812 , }; ______r = !!((sizeof(pteval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pte.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pte.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pte.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pte.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pteval_t)__eax; } __ret; })
2813 ;
2814 else
2815 ret = ({ pteval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.make_pte.func); if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(unsigned long)))) ? !!((sizeof(pteval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2816 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2817 , .line =
2818 467
2819 , }; ______r = !!((sizeof(pteval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pte.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pte.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pte.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pte.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pteval_t)__eax; } __ret; })
2820 ;
2821 return (pte_t) { .pte = ret };
2822}
2823static inline __attribute__((always_inline)) pteval_t pte_val(pte_t pte)
2824{
2825 pteval_t ret;
2826 if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(long)))) ? !!((sizeof(pteval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 476, }; ______r = !!((sizeof(pteval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2827 ret = ({ pteval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pte_val.func); if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(unsigned long)))) ? !!((sizeof(pteval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2828 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2829 , .line =
2830 478
2831 , }; ______r = !!((sizeof(pteval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pte.pte)), "d" ((unsigned long)((u64)pte.pte >> 32)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pte.pte)), "d" ((unsigned long)((u64)pte.pte >> 32)) : "memory", "cc" ); __ret = (pteval_t)__eax; } __ret; })
2832 ;
2833 else
2834 ret = ({ pteval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pte_val.func); if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(unsigned long)))) ? !!((sizeof(pteval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2835 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2836 , .line =
2837 481
2838 , }; ______r = !!((sizeof(pteval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pte.pte)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pte.pte)) : "memory", "cc" ); __ret = (pteval_t)__eax; } __ret; })
2839 ;
2840 return ret;
2841}
2842static inline __attribute__((always_inline)) pgd_t __pgd(pgdval_t val)
2843{
2844 pgdval_t ret;
2845 if (__builtin_constant_p(((sizeof(pgdval_t) > sizeof(long)))) ? !!((sizeof(pgdval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 490, }; ______r = !!((sizeof(pgdval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2846 ret = ({ pgdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.make_pgd.func); if (__builtin_constant_p(((sizeof(pgdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pgdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2847 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2848 , .line =
2849 492
2850 , }; ______r = !!((sizeof(pgdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pgd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pgd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pgdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pgd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pgd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pgdval_t)__eax; } __ret; })
2851 ;
2852 else
2853 ret = ({ pgdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.make_pgd.func); if (__builtin_constant_p(((sizeof(pgdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pgdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2854 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2855 , .line =
2856 495
2857 , }; ______r = !!((sizeof(pgdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pgd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pgd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pgdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pgd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pgd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pgdval_t)__eax; } __ret; })
2858 ;
2859 return (pgd_t) { ret };
2860}
2861static inline __attribute__((always_inline)) pgdval_t pgd_val(pgd_t pgd)
2862{
2863 pgdval_t ret;
2864 if (__builtin_constant_p(((sizeof(pgdval_t) > sizeof(long)))) ? !!((sizeof(pgdval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 504, }; ______r = !!((sizeof(pgdval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2865 ret = ({ pgdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pgd_val.func); if (__builtin_constant_p(((sizeof(pgdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pgdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2866 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2867 , .line =
2868 506
2869 , }; ______r = !!((sizeof(pgdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pgd.pgd)), "d" ((unsigned long)((u64)pgd.pgd >> 32)) : "memory", "cc" ); __ret = (pgdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pgd.pgd)), "d" ((unsigned long)((u64)pgd.pgd >> 32)) : "memory", "cc" ); __ret = (pgdval_t)__eax; } __ret; })
2870 ;
2871 else
2872 ret = ({ pgdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pgd_val.func); if (__builtin_constant_p(((sizeof(pgdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pgdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2873 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2874 , .line =
2875 509
2876 , }; ______r = !!((sizeof(pgdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pgd.pgd)) : "memory", "cc" ); __ret = (pgdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pgd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pgd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pgd.pgd)) : "memory", "cc" ); __ret = (pgdval_t)__eax; } __ret; })
2877 ;
2878 return ret;
2879}
2880static inline __attribute__((always_inline)) pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
2881 pte_t *ptep)
2882{
2883 pteval_t ret;
2884 ret = ({ pteval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.ptep_modify_prot_start); if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(unsigned long)))) ? !!((sizeof(pteval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2885 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2886 , .line =
2887 521
2888 , }; ______r = !!((sizeof(pteval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.ptep_modify_prot_start) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.ptep_modify_prot_start)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(ptep)) : "memory", "cc" ); __ret = (pteval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.ptep_modify_prot_start) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.ptep_modify_prot_start)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(ptep)) : "memory", "cc" ); __ret = (pteval_t)__eax; } __ret; })
2889 ;
2890 return (pte_t) { .pte = ret };
2891}
2892static inline __attribute__((always_inline)) void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
2893 pte_t *ptep, pte_t pte)
2894{
2895 if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(long)))) ? !!((sizeof(pteval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 529, }; ______r = !!((sizeof(pteval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2896 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
2897 else
2898 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.ptep_modify_prot_commit); asm volatile("push %[_arg4];" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "lea 4(%%esp),%%esp;" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.ptep_modify_prot_commit) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.ptep_modify_prot_commit)), [paravirt_clobber] "i" (((1 << 4) - 1)), "0" ((u32)(mm)), "1" ((u32)(addr)), "2" ((u32)(ptep)), [_arg4] "mr" ((u32)(pte.pte)) : "memory", "cc" ); })
2899 ;
2900}
2901static inline __attribute__((always_inline)) void set_pte(pte_t *ptep, pte_t pte)
2902{
2903 if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(long)))) ? !!((sizeof(pteval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 539, }; ______r = !!((sizeof(pteval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2904 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pte); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pte) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pte)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(ptep)), "d" ((unsigned long)(pte.pte)), "c" ((unsigned long)((u64)pte.pte >> 32)) : "memory", "cc" ); })
2905 ;
2906 else
2907 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pte); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pte) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pte)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(ptep)), "d" ((unsigned long)(pte.pte)) : "memory", "cc" ); })
2908 ;
2909}
2910static inline __attribute__((always_inline)) void set_pte_at(struct mm_struct *mm, unsigned long addr,
2911 pte_t *ptep, pte_t pte)
2912{
2913 if (__builtin_constant_p(((sizeof(pteval_t) > sizeof(long)))) ? !!((sizeof(pteval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 550, }; ______r = !!((sizeof(pteval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2914 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
2915 else
2916 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pte_at); asm volatile("push %[_arg4];" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "lea 4(%%esp),%%esp;" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pte_at) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pte_at)), [paravirt_clobber] "i" (((1 << 4) - 1)), "0" ((u32)(mm)), "1" ((u32)(addr)), "2" ((u32)(ptep)), [_arg4] "mr" ((u32)(pte.pte)) : "memory", "cc" ); });
2917}
2918static inline __attribute__((always_inline)) void set_pmd_at(struct mm_struct *mm, unsigned long addr,
2919 pmd_t *pmdp, pmd_t pmd)
2920{
2921 if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(long)))) ? !!((sizeof(pmdval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 561, }; ______r = !!((sizeof(pmdval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2922 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
2923 else
2924 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pmd_at); asm volatile("push %[_arg4];" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "lea 4(%%esp),%%esp;" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pmd_at) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pmd_at)), [paravirt_clobber] "i" (((1 << 4) - 1)), "0" ((u32)(mm)), "1" ((u32)(addr)), "2" ((u32)(pmdp)), [_arg4] "mr" ((u32)(native_pmd_val(pmd))) : "memory", "cc" ); })
2925 ;
2926}
2927static inline __attribute__((always_inline)) void set_pmd(pmd_t *pmdp, pmd_t pmd)
2928{
2929 pmdval_t val = native_pmd_val(pmd);
2930 if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(long)))) ? !!((sizeof(pmdval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 574, }; ______r = !!((sizeof(pmdval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2931 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pmd); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pmd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pmd)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pmdp)), "d" ((unsigned long)(val)), "c" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); });
2932 else
2933 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pmd); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pmd) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pmd)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pmdp)), "d" ((unsigned long)(val)) : "memory", "cc" ); });
2934}
2935static inline __attribute__((always_inline)) pmd_t __pmd(pmdval_t val)
2936{
2937 pmdval_t ret;
2938 if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(long)))) ? !!((sizeof(pmdval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 585, }; ______r = !!((sizeof(pmdval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2939 ret = ({ pmdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.make_pmd.func); if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pmdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2940 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2941 , .line =
2942 587
2943 , }; ______r = !!((sizeof(pmdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pmd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pmd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pmdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pmd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pmd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)), "d" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); __ret = (pmdval_t)__eax; } __ret; })
2944 ;
2945 else
2946 ret = ({ pmdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.make_pmd.func); if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pmdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2947 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2948 , .line =
2949 590
2950 , }; ______r = !!((sizeof(pmdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pmd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pmd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pmdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.make_pmd.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.make_pmd.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(val)) : "memory", "cc" ); __ret = (pmdval_t)__eax; } __ret; })
2951 ;
2952 return (pmd_t) { ret };
2953}
2954static inline __attribute__((always_inline)) pmdval_t pmd_val(pmd_t pmd)
2955{
2956 pmdval_t ret;
2957 if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(long)))) ? !!((sizeof(pmdval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 599, }; ______r = !!((sizeof(pmdval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2958 ret = ({ pmdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pmd_val.func); if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pmdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2959 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2960 , .line =
2961 601
2962 , }; ______r = !!((sizeof(pmdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pmd.pmd)), "d" ((unsigned long)((u64)pmd.pmd >> 32)) : "memory", "cc" ); __ret = (pmdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pmd.pmd)), "d" ((unsigned long)((u64)pmd.pmd >> 32)) : "memory", "cc" ); __ret = (pmdval_t)__eax; } __ret; })
2963 ;
2964 else
2965 ret = ({ pmdval_t __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pmd_val.func); if (__builtin_constant_p(((sizeof(pmdval_t) > sizeof(unsigned long)))) ? !!((sizeof(pmdval_t) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
2966 "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h"
2967 , .line =
2968 604
2969 , }; ______r = !!((sizeof(pmdval_t) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pmd.pmd)) : "memory", "cc" ); __ret = (pmdval_t)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_val.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_val.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(pmd.pmd)) : "memory", "cc" ); __ret = (pmdval_t)__eax; } __ret; })
2970 ;
2971 return ret;
2972}
2973static inline __attribute__((always_inline)) void set_pud(pud_t *pudp, pud_t pud)
2974{
2975 pudval_t val = native_pud_val(pud);
2976 if (__builtin_constant_p(((sizeof(pudval_t) > sizeof(long)))) ? !!((sizeof(pudval_t) > sizeof(long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 613, }; ______r = !!((sizeof(pudval_t) > sizeof(long))); ______f.miss_hit[______r]++; ______r; }))
2977 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pud); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pud) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pud)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pudp)), "d" ((unsigned long)(val)), "c" ((unsigned long)((u64)val >> 32)) : "memory", "cc" ); })
2978 ;
2979 else
2980 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pud); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pud) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pud)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pudp)), "d" ((unsigned long)(val)) : "memory", "cc" ); })
2981 ;
2982}
2983static inline __attribute__((always_inline)) void set_pte_atomic(pte_t *ptep, pte_t pte)
2984{
2985 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.set_pte_atomic); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.set_pte_atomic) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.set_pte_atomic)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(ptep)), "d" ((unsigned long)(pte.pte)), "c" ((unsigned long)(pte.pte >> 32)) : "memory", "cc" ); })
2986 ;
2987}
2988static inline __attribute__((always_inline)) void pte_clear(struct mm_struct *mm, unsigned long addr,
2989 pte_t *ptep)
2990{
2991 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pte_clear); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pte_clear) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pte_clear)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(mm)), "d" ((unsigned long)(addr)), "c" ((unsigned long)(ptep)) : "memory", "cc" ); });
2992}
2993static inline __attribute__((always_inline)) void pmd_clear(pmd_t *pmdp)
2994{
2995 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.pmd_clear); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.pmd_clear) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.pmd_clear)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(pmdp)) : "memory", "cc" ); });
2996}
2997static inline __attribute__((always_inline)) void arch_start_context_switch(struct task_struct *prev)
2998{
2999 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.start_context_switch); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.start_context_switch) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.start_context_switch)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(prev)) : "memory", "cc" ); });
3000}
3001static inline __attribute__((always_inline)) void arch_end_context_switch(struct task_struct *next)
3002{
3003 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_cpu_ops.end_context_switch); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_cpu_ops.end_context_switch) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_cpu_ops.end_context_switch)), [paravirt_clobber] "i" (((1 << 4) - 1)), "a" ((unsigned long)(next)) : "memory", "cc" ); });
3004}
3005static inline __attribute__((always_inline)) void arch_enter_lazy_mmu_mode(void)
3006{
3007 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.lazy_mode.enter); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.lazy_mode.enter) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.lazy_mode.enter)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); });
3008}
3009static inline __attribute__((always_inline)) void arch_leave_lazy_mmu_mode(void)
3010{
3011 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_mmu_ops.lazy_mode.leave); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx), "=c" (__ecx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_mmu_ops.lazy_mode.leave) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_mmu_ops.lazy_mode.leave)), [paravirt_clobber] "i" (((1 << 4) - 1)) : "memory", "cc" ); });
3012}
3013void arch_flush_lazy_mmu_mode(void);
3014static inline __attribute__((always_inline)) void __set_fixmap(unsigned idx,
3015 phys_addr_t phys, pgprot_t flags)
3016{
3017 pv_mmu_ops.set_fixmap(idx, phys, flags);
3018}
3019static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) unsigned long arch_local_save_flags(void)
3020{
3021 return ({ unsigned long __ret; unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_irq_ops.save_fl.func); if (__builtin_constant_p(((sizeof(unsigned long) > sizeof(unsigned long)))) ? !!((sizeof(unsigned long) > sizeof(unsigned long))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/paravirt.h", .line = 853, }; ______r = !!((sizeof(unsigned long) > sizeof(unsigned long))); ______f.miss_hit[______r]++; ______r; })) { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.save_fl.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.save_fl.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))) : "memory", "cc" ); __ret = (unsigned long)((((u64)__edx) << 32) | __eax); } else { asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.save_fl.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.save_fl.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))) : "memory", "cc" ); __ret = (unsigned long)__eax; } __ret; });
3022}
3023static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void arch_local_irq_restore(unsigned long f)
3024{
3025 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_irq_ops.restore_fl.func); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.restore_fl.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.restore_fl.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))), "a" ((unsigned long)(f)) : "memory", "cc" ); });
3026}
3027static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void arch_local_irq_disable(void)
3028{
3029 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_irq_ops.irq_disable.func); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.irq_disable.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.irq_disable.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))) : "memory", "cc" ); });
3030}
3031static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void arch_local_irq_enable(void)
3032{
3033 ({ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; ((void)pv_irq_ops.irq_enable.func); asm volatile("" "771:\n\t" "call *%c[paravirt_opptr];" "\n" "772:\n" ".pushsection .parainstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " " 771b\n" " .byte " "%c[paravirt_typenum]" "\n" " .byte 772b-771b\n" " .short " "%c[paravirt_clobber]" "\n" ".popsection\n" "" : "=a" (__eax), "=d" (__edx) : [paravirt_typenum] "i" ((__builtin_offsetof(struct paravirt_patch_template,pv_irq_ops.irq_enable.func) / sizeof(void *))), [paravirt_opptr] "i" (&(pv_irq_ops.irq_enable.func)), [paravirt_clobber] "i" (((1 << 0) | (1 << 2))) : "memory", "cc" ); });
3034}
3035static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) unsigned long arch_local_irq_save(void)
3036{
3037 unsigned long f;
3038 f = arch_local_save_flags();
3039 arch_local_irq_disable();
3040 return f;
3041}
3042extern void default_banner(void);
3043static inline __attribute__((always_inline)) int arch_irqs_disabled_flags(unsigned long flags)
3044{
3045 return !(flags & 0x00000200);
3046}
3047static inline __attribute__((always_inline)) int arch_irqs_disabled(void)
3048{
3049 unsigned long flags = arch_local_save_flags();
3050 return arch_irqs_disabled_flags(flags);
3051}
3052 extern void trace_softirqs_on(unsigned long ip);
3053 extern void trace_softirqs_off(unsigned long ip);
3054 extern void trace_hardirqs_on(void);
3055 extern void trace_hardirqs_off(void);
3056 extern void stop_critical_timings(void);
3057 extern void start_critical_timings(void);
3058struct task_struct;
3059struct task_struct *__switch_to(struct task_struct *prev,
3060 struct task_struct *next);
3061struct tss_struct;
3062void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
3063 struct tss_struct *tss);
3064extern void show_regs_common(void);
3065extern void native_load_gs_index(unsigned);
3066static inline __attribute__((always_inline)) unsigned long get_limit(unsigned long segment)
3067{
3068 unsigned long __limit;
3069 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
3070 return __limit + 1;
3071}
3072static inline __attribute__((always_inline)) void native_clts(void)
3073{
3074 asm volatile("clts");
3075}
3076static unsigned long __force_order;
3077static inline __attribute__((always_inline)) unsigned long native_read_cr0(void)
3078{
3079 unsigned long val;
3080 asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
3081 return val;
3082}
3083static inline __attribute__((always_inline)) void native_write_cr0(unsigned long val)
3084{
3085 asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
3086}
3087static inline __attribute__((always_inline)) unsigned long native_read_cr2(void)
3088{
3089 unsigned long val;
3090 asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
3091 return val;
3092}
3093static inline __attribute__((always_inline)) void native_write_cr2(unsigned long val)
3094{
3095 asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
3096}
3097static inline __attribute__((always_inline)) unsigned long native_read_cr3(void)
3098{
3099 unsigned long val;
3100 asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
3101 return val;
3102}
3103static inline __attribute__((always_inline)) void native_write_cr3(unsigned long val)
3104{
3105 asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
3106}
3107static inline __attribute__((always_inline)) unsigned long native_read_cr4(void)
3108{
3109 unsigned long val;
3110 asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
3111 return val;
3112}
3113static inline __attribute__((always_inline)) unsigned long native_read_cr4_safe(void)
3114{
3115 unsigned long val;
3116 asm volatile("1: mov %%cr4, %0\n"
3117 "2:\n"
3118 " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "2b" "\n" " .previous\n"
3119 : "=r" (val), "=m" (__force_order) : "0" (0));
3120 return val;
3121}
3122static inline __attribute__((always_inline)) void native_write_cr4(unsigned long val)
3123{
3124 asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
3125}
3126static inline __attribute__((always_inline)) void native_wbinvd(void)
3127{
3128 asm volatile("wbinvd": : :"memory");
3129}
3130static inline __attribute__((always_inline)) void clflush(volatile void *__p)
3131{
3132 asm volatile("clflush %0" : "+m" (*(volatile char *)__p));
3133}
3134void disable_hlt(void);
3135void enable_hlt(void);
3136void cpu_idle_wait(void);
3137extern unsigned long arch_align_stack(unsigned long sp);
3138extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
3139void default_idle(void);
3140void stop_this_cpu(void *dummy);
3141static inline __attribute__((always_inline)) __attribute__((always_inline)) void rdtsc_barrier(void)
3142{
3143 asm volatile ("661:\n\t" ".byte " "0x8d,0x76,0x00" "\n" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(3*32+17)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "mfence" "\n664:\n" ".previous" : : : "memory");
3144 asm volatile ("661:\n\t" ".byte " "0x8d,0x76,0x00" "\n" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(3*32+18)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "lfence" "\n664:\n" ".previous" : : : "memory");
3145}
3146extern unsigned int __invalid_size_argument_for_IOC;
3147extern cpumask_var_t cpu_callin_mask;
3148extern cpumask_var_t cpu_callout_mask;
3149extern cpumask_var_t cpu_initialized_mask;
3150extern cpumask_var_t cpu_sibling_setup_mask;
3151extern void setup_cpu_local_masks(void);
3152struct msr {
3153 union {
3154 struct {
3155 u32 l;
3156 u32 h;
3157 };
3158 u64 q;
3159 };
3160};
3161struct msr_info {
3162 u32 msr_no;
3163 struct msr reg;
3164 struct msr *msrs;
3165 int err;
3166};
3167struct msr_regs_info {
3168 u32 *regs;
3169 int err;
3170};
3171static inline __attribute__((always_inline)) unsigned long long native_read_tscp(unsigned int *aux)
3172{
3173 unsigned long low, high;
3174 asm volatile(".byte 0x0f,0x01,0xf9"
3175 : "=a" (low), "=d" (high), "=c" (*aux));
3176 return low | ((u64)high << 32);
3177}
3178static inline __attribute__((always_inline)) unsigned long long native_read_msr(unsigned int msr)
3179{
3180 unsigned long long val;
3181 asm volatile("rdmsr" : "=A" (val) : "c" (msr));
3182 return (val);
3183}
3184static inline __attribute__((always_inline)) unsigned long long native_read_msr_safe(unsigned int msr,
3185 int *err)
3186{
3187 unsigned long long val;
3188 asm volatile("2: rdmsr ; xor %[err],%[err]\n"
3189 "1:\n\t"
3190 ".section .fixup,\"ax\"\n\t"
3191 "3: mov %[fault],%[err] ; jmp 1b\n\t"
3192 ".previous\n\t"
3193 " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "2b" "," "3b" "\n" " .previous\n"
3194 : [err] "=r" (*err), "=A" (val)
3195 : "c" (msr), [fault] "i" (-5));
3196 return (val);
3197}
3198static inline __attribute__((always_inline)) void native_write_msr(unsigned int msr,
3199 unsigned low, unsigned high)
3200{
3201 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
3202}
3203__attribute__((no_instrument_function)) static inline __attribute__((always_inline)) int native_write_msr_safe(unsigned int msr,
3204 unsigned low, unsigned high)
3205{
3206 int err;
3207 asm volatile("2: wrmsr ; xor %[err],%[err]\n"
3208 "1:\n\t"
3209 ".section .fixup,\"ax\"\n\t"
3210 "3: mov %[fault],%[err] ; jmp 1b\n\t"
3211 ".previous\n\t"
3212 " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "2b" "," "3b" "\n" " .previous\n"
3213 : [err] "=a" (err)
3214 : "c" (msr), "0" (low), "d" (high),
3215 [fault] "i" (-5)
3216 : "memory");
3217 return err;
3218}
3219extern unsigned long long native_read_tsc(void);
3220extern int native_rdmsr_safe_regs(u32 regs[8]);
3221extern int native_wrmsr_safe_regs(u32 regs[8]);
3222static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long long __native_read_tsc(void)
3223{
3224 unsigned long long val;
3225 asm volatile("rdtsc" : "=A" (val));
3226 return (val);
3227}
3228static inline __attribute__((always_inline)) unsigned long long native_read_pmc(int counter)
3229{
3230 unsigned long long val;
3231 asm volatile("rdpmc" : "=A" (val) : "c" (counter));
3232 return (val);
3233}
3234struct msr *msrs_alloc(void);
3235void msrs_free(struct msr *msrs);
3236int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
3237int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
3238void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
3239void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
3240int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
3241int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
3242int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
3243int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
3244struct exec_domain;
3245struct pt_regs;
3246extern int register_exec_domain(struct exec_domain *);
3247extern int unregister_exec_domain(struct exec_domain *);
3248extern int __set_personality(unsigned int);
3249enum {
3250 UNAME26 = 0x0020000,
3251 ADDR_NO_RANDOMIZE = 0x0040000,
3252 FDPIC_FUNCPTRS = 0x0080000,
3253 MMAP_PAGE_ZERO = 0x0100000,
3254 ADDR_COMPAT_LAYOUT = 0x0200000,
3255 READ_IMPLIES_EXEC = 0x0400000,
3256 ADDR_LIMIT_32BIT = 0x0800000,
3257 SHORT_INODE = 0x1000000,
3258 WHOLE_SECONDS = 0x2000000,
3259 STICKY_TIMEOUTS = 0x4000000,
3260 ADDR_LIMIT_3GB = 0x8000000,
3261};
3262enum {
3263 PER_LINUX = 0x0000,
3264 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
3265 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
3266 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
3267 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
3268 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS |
3269 WHOLE_SECONDS | SHORT_INODE,
3270 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
3271 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
3272 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
3273 PER_BSD = 0x0006,
3274 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
3275 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
3276 PER_LINUX32 = 0x0008,
3277 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
3278 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,
3279 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,
3280 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,
3281 PER_RISCOS = 0x000c,
3282 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
3283 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
3284 PER_OSF4 = 0x000f,
3285 PER_HPUX = 0x0010,
3286 PER_MASK = 0x00ff,
3287};
3288typedef void (*handler_t)(int, struct pt_regs *);
3289struct exec_domain {
3290 const char *name;
3291 handler_t handler;
3292 unsigned char pers_low;
3293 unsigned char pers_high;
3294 unsigned long *signal_map;
3295 unsigned long *signal_invmap;
3296 struct map_segment *err_map;
3297 struct map_segment *socktype_map;
3298 struct map_segment *sockopt_map;
3299 struct map_segment *af_map;
3300 struct module *module;
3301 struct exec_domain *next;
3302};
3303extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
3304extern u64 div64_u64(u64 dividend, u64 divisor);
3305extern s64 div64_s64(s64 dividend, s64 divisor);
3306static inline __attribute__((always_inline)) u64 div_u64(u64 dividend, u32 divisor)
3307{
3308 u32 remainder;
3309 return div_u64_rem(dividend, divisor, &remainder);
3310}
3311static inline __attribute__((always_inline)) s64 div_s64(s64 dividend, s32 divisor)
3312{
3313 s32 remainder;
3314 return div_s64_rem(dividend, divisor, &remainder);
3315}
3316u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
3317static inline __attribute__((always_inline)) __attribute__((always_inline)) u32
3318__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
3319{
3320 u32 ret = 0;
3321 while (dividend >= divisor) {
3322 asm("" : "+rm"(dividend));
3323 dividend -= divisor;
3324 ret++;
3325 }
3326 *remainder = dividend;
3327 return ret;
3328}
3329static inline __attribute__((always_inline)) void * __attribute__((warn_unused_result)) ERR_PTR(long error)
3330{
3331 return (void *) error;
3332}
3333static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) PTR_ERR(const void *ptr)
3334{
3335 return (long) ptr;
3336}
3337static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) IS_ERR(const void *ptr)
3338{
3339 return (__builtin_constant_p(((unsigned long)ptr) >= (unsigned long)-4095) ? !!(((unsigned long)ptr) >= (unsigned long)-4095) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/err.h", .line = 34, }; ______r = __builtin_expect(!!(((unsigned long)ptr) >= (unsigned long)-4095), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }));
3340}
3341static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) IS_ERR_OR_NULL(const void *ptr)
3342{
3343 return !ptr || (__builtin_constant_p(((unsigned long)ptr) >= (unsigned long)-4095) ? !!(((unsigned long)ptr) >= (unsigned long)-4095) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/err.h", .line = 39, }; ______r = __builtin_expect(!!(((unsigned long)ptr) >= (unsigned long)-4095), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }));
3344}
3345static inline __attribute__((always_inline)) void * __attribute__((warn_unused_result)) ERR_CAST(const void *ptr)
3346{
3347 return (void *) ptr;
3348}
3349static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) PTR_RET(const void *ptr)
3350{
3351 if (__builtin_constant_p(((IS_ERR(ptr)))) ? !!((IS_ERR(ptr))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/err.h", .line = 57, }; ______r = !!((IS_ERR(ptr))); ______f.miss_hit[______r]++; ______r; }))
3352 return PTR_ERR(ptr);
3353 else
3354 return 0;
3355}
3356static inline __attribute__((always_inline)) void *current_text_addr(void)
3357{
3358 void *pc;
3359 asm volatile("mov $1f, %0; 1:":"=r" (pc));
3360 return pc;
3361}
3362struct cpuinfo_x86 {
3363 __u8 x86;
3364 __u8 x86_vendor;
3365 __u8 x86_model;
3366 __u8 x86_mask;
3367 char wp_works_ok;
3368 char hlt_works_ok;
3369 char hard_math;
3370 char rfu;
3371 char fdiv_bug;
3372 char f00f_bug;
3373 char coma_bug;
3374 char pad0;
3375 __u8 x86_virt_bits;
3376 __u8 x86_phys_bits;
3377 __u8 x86_coreid_bits;
3378 __u32 extended_cpuid_level;
3379 int cpuid_level;
3380 __u32 x86_capability[10];
3381 char x86_vendor_id[16];
3382 char x86_model_id[64];
3383 int x86_cache_size;
3384 int x86_cache_alignment;
3385 int x86_power;
3386 unsigned long loops_per_jiffy;
3387 u16 x86_max_cores;
3388 u16 apicid;
3389 u16 initial_apicid;
3390 u16 x86_clflush_size;
3391 u16 booted_cores;
3392 u16 phys_proc_id;
3393 u16 cpu_core_id;
3394 u8 compute_unit_id;
3395 u16 cpu_index;
3396} __attribute__((__aligned__((1 << (6)))));
3397extern struct cpuinfo_x86 boot_cpu_data;
3398extern struct cpuinfo_x86 new_cpu_data;
3399extern struct tss_struct doublefault_tss;
3400extern __u32 cpu_caps_cleared[10];
3401extern __u32 cpu_caps_set[10];
3402extern __attribute__((section(".data..percpu" ""))) __typeof__(struct cpuinfo_x86) cpu_info __attribute__((__aligned__((1 << (6)))));
3403extern const struct seq_operations cpuinfo_op;
3404static inline __attribute__((always_inline)) int hlt_works(int cpu)
3405{
3406 return (*({ do { const void *__vpp_verify = (typeof((&(cpu_info))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(cpu_info))) *)(&(cpu_info)))); (typeof((typeof(*(&(cpu_info))) *)(&(cpu_info)))) (__ptr + (((__per_cpu_offset[cpu])))); }); })).hlt_works_ok;
3407}
3408extern void cpu_detect(struct cpuinfo_x86 *c);
3409extern struct pt_regs *idle_regs(struct pt_regs *);
3410extern void early_cpu_init(void);
3411extern void identify_boot_cpu(void);
3412extern void identify_secondary_cpu(struct cpuinfo_x86 *);
3413extern void print_cpu_info(struct cpuinfo_x86 *);
3414extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
3415extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
3416extern unsigned short num_cache_leaves;
3417extern void detect_extended_topology(struct cpuinfo_x86 *c);
3418extern void detect_ht(struct cpuinfo_x86 *c);
3419static inline __attribute__((always_inline)) void native_cpuid(unsigned int *eax, unsigned int *ebx,
3420 unsigned int *ecx, unsigned int *edx)
3421{
3422 asm volatile("cpuid"
3423 : "=a" (*eax),
3424 "=b" (*ebx),
3425 "=c" (*ecx),
3426 "=d" (*edx)
3427 : "0" (*eax), "2" (*ecx));
3428}
3429static inline __attribute__((always_inline)) void load_cr3(pgd_t *pgdir)
3430{
3431 write_cr3((((unsigned long)(pgdir)) - ((unsigned long)(0xC0000000UL))));
3432}
3433struct x86_hw_tss {
3434 unsigned short back_link, __blh;
3435 unsigned long sp0;
3436 unsigned short ss0, __ss0h;
3437 unsigned long sp1;
3438 unsigned short ss1, __ss1h;
3439 unsigned long sp2;
3440 unsigned short ss2, __ss2h;
3441 unsigned long __cr3;
3442 unsigned long ip;
3443 unsigned long flags;
3444 unsigned long ax;
3445 unsigned long cx;
3446 unsigned long dx;
3447 unsigned long bx;
3448 unsigned long sp;
3449 unsigned long bp;
3450 unsigned long si;
3451 unsigned long di;
3452 unsigned short es, __esh;
3453 unsigned short cs, __csh;
3454 unsigned short ss, __ssh;
3455 unsigned short ds, __dsh;
3456 unsigned short fs, __fsh;
3457 unsigned short gs, __gsh;
3458 unsigned short ldt, __ldth;
3459 unsigned short trace;
3460 unsigned short io_bitmap_base;
3461} __attribute__((packed));
3462struct tss_struct {
3463 struct x86_hw_tss x86_tss;
3464 unsigned long io_bitmap[((65536/8)/sizeof(long)) + 1];
3465 unsigned long stack[64];
3466} __attribute__((__aligned__((1 << (6)))));
3467extern __attribute__((section(".data..percpu" ""))) __typeof__(struct tss_struct) init_tss __attribute__((__aligned__((1 << (6)))));
3468struct orig_ist {
3469 unsigned long ist[7];
3470};
3471struct i387_fsave_struct {
3472 u32 cwd;
3473 u32 swd;
3474 u32 twd;
3475 u32 fip;
3476 u32 fcs;
3477 u32 foo;
3478 u32 fos;
3479 u32 st_space[20];
3480 u32 status;
3481};
3482struct i387_fxsave_struct {
3483 u16 cwd;
3484 u16 swd;
3485 u16 twd;
3486 u16 fop;
3487 union {
3488 struct {
3489 u64 rip;
3490 u64 rdp;
3491 };
3492 struct {
3493 u32 fip;
3494 u32 fcs;
3495 u32 foo;
3496 u32 fos;
3497 };
3498 };
3499 u32 mxcsr;
3500 u32 mxcsr_mask;
3501 u32 st_space[32];
3502 u32 xmm_space[64];
3503 u32 padding[12];
3504 union {
3505 u32 padding1[12];
3506 u32 sw_reserved[12];
3507 };
3508} __attribute__((aligned(16)));
3509struct i387_soft_struct {
3510 u32 cwd;
3511 u32 swd;
3512 u32 twd;
3513 u32 fip;
3514 u32 fcs;
3515 u32 foo;
3516 u32 fos;
3517 u32 st_space[20];
3518 u8 ftop;
3519 u8 changed;
3520 u8 lookahead;
3521 u8 no_update;
3522 u8 rm;
3523 u8 alimit;
3524 struct math_emu_info *info;
3525 u32 entry_eip;
3526};
3527struct ymmh_struct {
3528 u32 ymmh_space[64];
3529};
3530struct xsave_hdr_struct {
3531 u64 xstate_bv;
3532 u64 reserved1[2];
3533 u64 reserved2[5];
3534} __attribute__((packed));
3535struct xsave_struct {
3536 struct i387_fxsave_struct i387;
3537 struct xsave_hdr_struct xsave_hdr;
3538 struct ymmh_struct ymmh;
3539} __attribute__ ((packed, aligned (64)));
3540union thread_xstate {
3541 struct i387_fsave_struct fsave;
3542 struct i387_fxsave_struct fxsave;
3543 struct i387_soft_struct soft;
3544 struct xsave_struct xsave;
3545};
3546struct fpu {
3547 union thread_xstate *state;
3548};
3549struct stack_canary {
3550 char __pad[20];
3551 unsigned long canary;
3552};
3553extern __attribute__((section(".data..percpu" ""))) __typeof__(struct stack_canary) stack_canary __attribute__((__aligned__((1 << (6)))));
3554extern unsigned int xstate_size;
3555extern void free_thread_xstate(struct task_struct *);
3556extern struct kmem_cache *task_xstate_cachep;
3557struct perf_event;
3558struct thread_struct {
3559 struct desc_struct tls_array[3];
3560 unsigned long sp0;
3561 unsigned long sp;
3562 unsigned long sysenter_cs;
3563 unsigned long ip;
3564 unsigned long gs;
3565 struct perf_event *ptrace_bps[4];
3566 unsigned long debugreg6;
3567 unsigned long ptrace_dr7;
3568 unsigned long cr2;
3569 unsigned long trap_no;
3570 unsigned long error_code;
3571 struct fpu fpu;
3572 struct vm86_struct *vm86_info;
3573 unsigned long screen_bitmap;
3574 unsigned long v86flags;
3575 unsigned long v86mask;
3576 unsigned long saved_sp0;
3577 unsigned int saved_fs;
3578 unsigned int saved_gs;
3579 unsigned long *io_bitmap_ptr;
3580 unsigned long iopl;
3581 unsigned io_bitmap_max;
3582};
3583static inline __attribute__((always_inline)) unsigned long native_get_debugreg(int regno)
3584{
3585 unsigned long val = 0;
3586 switch (regno) {
3587 case 0:
3588 asm("mov %%db0, %0" :"=r" (val));
3589 break;
3590 case 1:
3591 asm("mov %%db1, %0" :"=r" (val));
3592 break;
3593 case 2:
3594 asm("mov %%db2, %0" :"=r" (val));
3595 break;
3596 case 3:
3597 asm("mov %%db3, %0" :"=r" (val));
3598 break;
3599 case 6:
3600 asm("mov %%db6, %0" :"=r" (val));
3601 break;
3602 case 7:
3603 asm("mov %%db7, %0" :"=r" (val));
3604 break;
3605 default:
3606 do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h"), "i" (499), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0);
3607 }
3608 return val;
3609}
3610static inline __attribute__((always_inline)) void native_set_debugreg(int regno, unsigned long value)
3611{
3612 switch (regno) {
3613 case 0:
3614 asm("mov %0, %%db0" ::"r" (value));
3615 break;
3616 case 1:
3617 asm("mov %0, %%db1" ::"r" (value));
3618 break;
3619 case 2:
3620 asm("mov %0, %%db2" ::"r" (value));
3621 break;
3622 case 3:
3623 asm("mov %0, %%db3" ::"r" (value));
3624 break;
3625 case 6:
3626 asm("mov %0, %%db6" ::"r" (value));
3627 break;
3628 case 7:
3629 asm("mov %0, %%db7" ::"r" (value));
3630 break;
3631 default:
3632 do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h"), "i" (526), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0);
3633 }
3634}
3635static inline __attribute__((always_inline)) void native_set_iopl_mask(unsigned mask)
3636{
3637 unsigned int reg;
3638 asm volatile ("pushfl;"
3639 "popl %0;"
3640 "andl %1, %0;"
3641 "orl %2, %0;"
3642 "pushl %0;"
3643 "popfl"
3644 : "=&r" (reg)
3645 : "i" (~0x00003000), "r" (mask));
3646}
3647static inline __attribute__((always_inline)) void
3648native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
3649{
3650 tss->x86_tss.sp0 = thread->sp0;
3651 if (__builtin_constant_p((((__builtin_constant_p(tss->x86_tss.ss1 != thread->sysenter_cs) ? !!(tss->x86_tss.ss1 != thread->sysenter_cs) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 555, }; ______r = __builtin_expect(!!(tss->x86_tss.ss1 != thread->sysenter_cs), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(tss->x86_tss.ss1 != thread->sysenter_cs) ? !!(tss->x86_tss.ss1 != thread->sysenter_cs) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 555, }; ______r = __builtin_expect(!!(tss->x86_tss.ss1 != thread->sysenter_cs), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 555, }; ______r = !!(((__builtin_constant_p(tss->x86_tss.ss1 != thread->sysenter_cs) ? !!(tss->x86_tss.ss1 != thread->sysenter_cs) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 555, }; ______r = __builtin_expect(!!(tss->x86_tss.ss1 != thread->sysenter_cs), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) {
3652 tss->x86_tss.ss1 = thread->sysenter_cs;
3653 do { paravirt_write_msr(0x00000174, thread->sysenter_cs, 0); } while (0);
3654 }
3655}
3656static inline __attribute__((always_inline)) void native_swapgs(void)
3657{
3658}
3659extern unsigned long mmu_cr4_features;
3660static inline __attribute__((always_inline)) void set_in_cr4(unsigned long mask)
3661{
3662 unsigned long cr4;
3663 mmu_cr4_features |= mask;
3664 cr4 = read_cr4();
3665 cr4 |= mask;
3666 write_cr4(cr4);
3667}
3668static inline __attribute__((always_inline)) void clear_in_cr4(unsigned long mask)
3669{
3670 unsigned long cr4;
3671 mmu_cr4_features &= ~mask;
3672 cr4 = read_cr4();
3673 cr4 &= ~mask;
3674 write_cr4(cr4);
3675}
3676typedef struct {
3677 unsigned long seg;
3678} mm_segment_t;
3679extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
3680extern void release_thread(struct task_struct *);
3681extern void prepare_to_copy(struct task_struct *tsk);
3682unsigned long get_wchan(struct task_struct *p);
3683static inline __attribute__((always_inline)) void cpuid(unsigned int op,
3684 unsigned int *eax, unsigned int *ebx,
3685 unsigned int *ecx, unsigned int *edx)
3686{
3687 *eax = op;
3688 *ecx = 0;
3689 __cpuid(eax, ebx, ecx, edx);
3690}
3691static inline __attribute__((always_inline)) void cpuid_count(unsigned int op, int count,
3692 unsigned int *eax, unsigned int *ebx,
3693 unsigned int *ecx, unsigned int *edx)
3694{
3695 *eax = op;
3696 *ecx = count;
3697 __cpuid(eax, ebx, ecx, edx);
3698}
3699static inline __attribute__((always_inline)) unsigned int cpuid_eax(unsigned int op)
3700{
3701 unsigned int eax, ebx, ecx, edx;
3702 cpuid(op, &eax, &ebx, &ecx, &edx);
3703 return eax;
3704}
3705static inline __attribute__((always_inline)) unsigned int cpuid_ebx(unsigned int op)
3706{
3707 unsigned int eax, ebx, ecx, edx;
3708 cpuid(op, &eax, &ebx, &ecx, &edx);
3709 return ebx;
3710}
3711static inline __attribute__((always_inline)) unsigned int cpuid_ecx(unsigned int op)
3712{
3713 unsigned int eax, ebx, ecx, edx;
3714 cpuid(op, &eax, &ebx, &ecx, &edx);
3715 return ecx;
3716}
3717static inline __attribute__((always_inline)) unsigned int cpuid_edx(unsigned int op)
3718{
3719 unsigned int eax, ebx, ecx, edx;
3720 cpuid(op, &eax, &ebx, &ecx, &edx);
3721 return edx;
3722}
3723static inline __attribute__((always_inline)) void rep_nop(void)
3724{
3725 asm volatile("rep; nop" ::: "memory");
3726}
3727static inline __attribute__((always_inline)) void cpu_relax(void)
3728{
3729 rep_nop();
3730}
3731static inline __attribute__((always_inline)) void sync_core(void)
3732{
3733 int tmp;
3734 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
3735 : "ebx", "ecx", "edx", "memory");
3736}
3737static inline __attribute__((always_inline)) void __monitor(const void *eax, unsigned long ecx,
3738 unsigned long edx)
3739{
3740 asm volatile(".byte 0x0f, 0x01, 0xc8;"
3741 :: "a" (eax), "c" (ecx), "d"(edx));
3742}
3743static inline __attribute__((always_inline)) void __mwait(unsigned long eax, unsigned long ecx)
3744{
3745 asm volatile(".byte 0x0f, 0x01, 0xc9;"
3746 :: "a" (eax), "c" (ecx));
3747}
3748static inline __attribute__((always_inline)) void __sti_mwait(unsigned long eax, unsigned long ecx)
3749{
3750 trace_hardirqs_on();
3751 asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
3752 :: "a" (eax), "c" (ecx));
3753}
3754extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
3755extern void select_idle_routine(const struct cpuinfo_x86 *c);
3756extern void init_amd_e400_c1e_mask(void);
3757extern unsigned long boot_option_idle_override;
3758extern bool amd_e400_c1e_detected;
3759enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
3760 IDLE_POLL, IDLE_FORCE_MWAIT};
3761extern void enable_sep_cpu(void);
3762extern int sysenter_setup(void);
3763extern void early_trap_init(void);
3764extern struct desc_ptr early_gdt_descr;
3765extern void cpu_set_gdt(int);
3766extern void switch_to_new_gdt(int);
3767extern void load_percpu_segment(int);
3768extern void cpu_init(void);
3769static inline __attribute__((always_inline)) unsigned long get_debugctlmsr(void)
3770{
3771 unsigned long debugctlmsr = 0;
3772 do { int _err; debugctlmsr = paravirt_read_msr(0x000001d9, &_err); } while (0);
3773 return debugctlmsr;
3774}
3775static inline __attribute__((always_inline)) void update_debugctlmsr(unsigned long debugctlmsr)
3776{
3777 do { paravirt_write_msr(0x000001d9, (u32)((u64)(debugctlmsr)), ((u64)(debugctlmsr))>>32); } while (0);
3778}
3779extern unsigned int machine_id;
3780extern unsigned int machine_submodel_id;
3781extern unsigned int BIOS_revision;
3782extern int bootloader_type;
3783extern int bootloader_version;
3784extern char ignore_fpu_irq;
3785static inline __attribute__((always_inline)) void prefetch(const void *x)
3786{
3787 asm volatile ("661:\n\t" ".byte " "0x8d,0x74,0x26,0x00" "\n" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(0*32+25)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "prefetchnta (%1)" "\n664:\n" ".previous" : : "i" (0), "r" (x))
3788 ;
3789}
3790static inline __attribute__((always_inline)) void prefetchw(const void *x)
3791{
3792 asm volatile ("661:\n\t" ".byte " "0x8d,0x74,0x26,0x00" "\n" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(1*32+31)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "prefetchw (%1)" "\n664:\n" ".previous" : : "i" (0), "r" (x))
3793 ;
3794}
3795static inline __attribute__((always_inline)) void spin_lock_prefetch(const void *x)
3796{
3797 prefetchw(x);
3798}
3799extern unsigned long thread_saved_pc(struct task_struct *tsk);
3800extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
3801 unsigned long new_sp);
3802extern int get_tsc_mode(unsigned long adr);
3803extern int set_tsc_mode(unsigned int val);
3804extern int amd_get_nb_id(int cpu);
3805struct aperfmperf {
3806 u64 aperf, mperf;
3807};
3808static inline __attribute__((always_inline)) void get_aperfmperf(struct aperfmperf *am)
3809{
3810 ({ static bool __warned; int __ret_warn_once = !!(!(__builtin_constant_p((3*32+28)) && ( ((((3*32+28))>>5)==0 && (1UL<<(((3*32+28))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((3*32+28))>>5)==1 && (1UL<<(((3*32+28))&31) & (0|0))) || ((((3*32+28))>>5)==2 && (1UL<<(((3*32+28))&31) & 0)) || ((((3*32+28))>>5)==3 && (1UL<<(((3*32+28))&31) & (0))) || ((((3*32+28))>>5)==4 && (1UL<<(((3*32+28))&31) & 0)) || ((((3*32+28))>>5)==5 && (1UL<<(((3*32+28))&31) & 0)) || ((((3*32+28))>>5)==6 && (1UL<<(((3*32+28))&31) & 0)) || ((((3*32+28))>>5)==7 && (1UL<<(((3*32+28))&31) & 0)) || ((((3*32+28))>>5)==8 && (1UL<<(((3*32+28))&31) & 0)) || ((((3*32+28))>>5)==9 && (1UL<<(((3*32+28))&31) & 0)) ) ? 1 : (__builtin_constant_p(((3*32+28))) ? constant_test_bit(((3*32+28)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((3*32+28)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))); if (__builtin_constant_p((((__builtin_constant_p(__ret_warn_once) ? !!(__ret_warn_once) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_once), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(__ret_warn_once) ? !!(__ret_warn_once) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_once), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = !!(((__builtin_constant_p(__ret_warn_once) ? !!(__ret_warn_once) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_once), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) if (__builtin_constant_p(((({ int __ret_warn_on = !!(!__warned); if (__builtin_constant_p((((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) warn_slowpath_null("/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", 981); (__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); })))) ? !!((({ int __ret_warn_on = !!(!__warned); if (__builtin_constant_p((((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) warn_slowpath_null("/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", 981); (__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = !!((({ int __ret_warn_on = !!(!__warned); if (__builtin_constant_p((((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) warn_slowpath_null("/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", 981); (__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); }))); ______f.miss_hit[______r]++; ______r; })) __warned = true; (__builtin_constant_p(__ret_warn_once) ? !!(__ret_warn_once) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 981, }; ______r = __builtin_expect(!!(__ret_warn_once), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); });
3811 do { int _err; am->aperf = paravirt_read_msr(0x000000e8, &_err); } while (0);
3812 do { int _err; am->mperf = paravirt_read_msr(0x000000e7, &_err); } while (0);
3813}
3814static inline __attribute__((always_inline))
3815unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
3816 struct aperfmperf *new)
3817{
3818 u64 aperf = new->aperf - old->aperf;
3819 u64 mperf = new->mperf - old->mperf;
3820 unsigned long ratio = aperf;
3821 mperf >>= 10;
3822 if (__builtin_constant_p(((mperf))) ? !!((mperf)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/processor.h", .line = 998, }; ______r = !!((mperf)); ______f.miss_hit[______r]++; ______r; }))
3823 ratio = div64_u64(aperf, mperf);
3824 return ratio;
3825}
3826extern const int amd_erratum_383[];
3827extern const int amd_erratum_400[];
3828extern bool cpu_has_amd_erratum(const int *);
3829extern void mcount(void);
3830static inline __attribute__((always_inline)) unsigned long ftrace_call_adjust(unsigned long addr)
3831{
3832 return addr;
3833}
3834struct dyn_arch_ftrace {
3835};
3836static inline __attribute__((always_inline)) int atomic_read(const atomic_t *v)
3837{
3838 return (*(volatile int *)&(v)->counter);
3839}
3840static inline __attribute__((always_inline)) void atomic_set(atomic_t *v, int i)
3841{
3842 v->counter = i;
3843}
3844static inline __attribute__((always_inline)) void atomic_add(int i, atomic_t *v)
3845{
3846 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "addl %1,%0"
3847 : "+m" (v->counter)
3848 : "ir" (i));
3849}
3850static inline __attribute__((always_inline)) void atomic_sub(int i, atomic_t *v)
3851{
3852 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "subl %1,%0"
3853 : "+m" (v->counter)
3854 : "ir" (i));
3855}
3856static inline __attribute__((always_inline)) int atomic_sub_and_test(int i, atomic_t *v)
3857{
3858 unsigned char c;
3859 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "subl %2,%0; sete %1"
3860 : "+m" (v->counter), "=qm" (c)
3861 : "ir" (i) : "memory");
3862 return c;
3863}
3864static inline __attribute__((always_inline)) void atomic_inc(atomic_t *v)
3865{
3866 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "incl %0"
3867 : "+m" (v->counter));
3868}
3869static inline __attribute__((always_inline)) void atomic_dec(atomic_t *v)
3870{
3871 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "decl %0"
3872 : "+m" (v->counter));
3873}
3874static inline __attribute__((always_inline)) int atomic_dec_and_test(atomic_t *v)
3875{
3876 unsigned char c;
3877 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "decl %0; sete %1"
3878 : "+m" (v->counter), "=qm" (c)
3879 : : "memory");
3880 return c != 0;
3881}
3882static inline __attribute__((always_inline)) int atomic_inc_and_test(atomic_t *v)
3883{
3884 unsigned char c;
3885 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "incl %0; sete %1"
3886 : "+m" (v->counter), "=qm" (c)
3887 : : "memory");
3888 return c != 0;
3889}
3890static inline __attribute__((always_inline)) int atomic_add_negative(int i, atomic_t *v)
3891{
3892 unsigned char c;
3893 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "addl %2,%0; sets %1"
3894 : "+m" (v->counter), "=qm" (c)
3895 : "ir" (i) : "memory");
3896 return c;
3897}
3898static inline __attribute__((always_inline)) int atomic_add_return(int i, atomic_t *v)
3899{
3900 int __i;
3901 __i = i;
3902 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "xaddl %0, %1"
3903 : "+r" (i), "+m" (v->counter)
3904 : : "memory");
3905 return i + __i;
3906}
3907static inline __attribute__((always_inline)) int atomic_sub_return(int i, atomic_t *v)
3908{
3909 return atomic_add_return(-i, v);
3910}
3911static inline __attribute__((always_inline)) int atomic_cmpxchg(atomic_t *v, int old, int new)
3912{
3913 return ({ __typeof__(*(((&v->counter)))) __ret; __typeof__(*(((&v->counter)))) __old = (((old))); __typeof__(*(((&v->counter)))) __new = (((new))); switch ((sizeof(*&v->counter))) { case 1: { volatile u8 *__ptr = (volatile u8 *)(((&v->counter))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgb %2,%1" : "=a" (__ret), "+m" (*__ptr) : "q" (__new), "0" (__old) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)(((&v->counter))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgw %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)(((&v->counter))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgl %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } default: __cmpxchg_wrong_size(); } __ret; });
3914}
3915static inline __attribute__((always_inline)) int atomic_xchg(atomic_t *v, int new)
3916{
3917 return ({ __typeof(*((&v->counter))) __x = ((new)); switch (sizeof(*&v->counter)) { case 1: { volatile u8 *__ptr = (volatile u8 *)((&v->counter)); asm volatile("xchgb %0,%1" : "=q" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)((&v->counter)); asm volatile("xchgw %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)((&v->counter)); asm volatile("xchgl %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } default: __xchg_wrong_size(); } __x; });
3918}
3919static inline __attribute__((always_inline)) int atomic_add_unless(atomic_t *v, int a, int u)
3920{
3921 int c, old;
3922 c = atomic_read(v);
3923 for (;;) {
3924 if (__builtin_constant_p((((__builtin_constant_p(c == (u)) ? !!(c == (u)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 237, }; ______r = __builtin_expect(!!(c == (u)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(c == (u)) ? !!(c == (u)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 237, }; ______r = __builtin_expect(!!(c == (u)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 237, }; ______r = !!(((__builtin_constant_p(c == (u)) ? !!(c == (u)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 237, }; ______r = __builtin_expect(!!(c == (u)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
3925 break;
3926 old = atomic_cmpxchg((v), c, c + (a));
3927 if (__builtin_constant_p((((__builtin_constant_p(old == c) ? !!(old == c) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 240, }; ______r = __builtin_expect(!!(old == c), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(old == c) ? !!(old == c) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 240, }; ______r = __builtin_expect(!!(old == c), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 240, }; ______r = !!(((__builtin_constant_p(old == c) ? !!(old == c) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 240, }; ______r = __builtin_expect(!!(old == c), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
3928 break;
3929 c = old;
3930 }
3931 return c != (u);
3932}
3933static inline __attribute__((always_inline)) int atomic_dec_if_positive(atomic_t *v)
3934{
3935 int c, old, dec;
3936 c = atomic_read(v);
3937 for (;;) {
3938 dec = c - 1;
3939 if (__builtin_constant_p((((__builtin_constant_p(dec < 0) ? !!(dec < 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 262, }; ______r = __builtin_expect(!!(dec < 0), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(dec < 0) ? !!(dec < 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 262, }; ______r = __builtin_expect(!!(dec < 0), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 262, }; ______r = !!(((__builtin_constant_p(dec < 0) ? !!(dec < 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 262, }; ______r = __builtin_expect(!!(dec < 0), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
3940 break;
3941 old = atomic_cmpxchg((v), c, dec);
3942 if (__builtin_constant_p((((__builtin_constant_p(old == c) ? !!(old == c) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 265, }; ______r = __builtin_expect(!!(old == c), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(old == c) ? !!(old == c) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 265, }; ______r = __builtin_expect(!!(old == c), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 265, }; ______r = !!(((__builtin_constant_p(old == c) ? !!(old == c) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/atomic.h", .line = 265, }; ______r = __builtin_expect(!!(old == c), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
3943 break;
3944 c = old;
3945 }
3946 return dec;
3947}
3948static inline __attribute__((always_inline)) short int atomic_inc_short(short int *v)
3949{
3950 asm(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "addw $1, %0" : "+m" (*v));
3951 return *v;
3952}
3953typedef struct {
3954 u64 __attribute__((aligned(8))) counter;
3955} atomic64_t;
3956static inline __attribute__((always_inline)) long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
3957{
3958 return ((__typeof__(*(&v->counter)))__cmpxchg64((&v->counter), (unsigned long long)(o), (unsigned long long)(n)));
3959}
3960static inline __attribute__((always_inline)) long long atomic64_xchg(atomic64_t *v, long long n)
3961{
3962 long long o;
3963 unsigned high = (unsigned)(n >> 32);
3964 unsigned low = (unsigned)n;
3965 asm volatile("call atomic64_" "xchg" "_cx8"
3966 : "=A" (o), "+b" (low), "+c" (high)
3967 : "S" (v)
3968 : "memory"
3969 );
3970 return o;
3971}
3972static inline __attribute__((always_inline)) void atomic64_set(atomic64_t *v, long long i)
3973{
3974 unsigned high = (unsigned)(i >> 32);
3975 unsigned low = (unsigned)i;
3976 asm volatile("call atomic64_" "set" "_cx8"
3977 : "+b" (low), "+c" (high)
3978 : "S" (v)
3979 : "eax", "edx", "memory"
3980 );
3981}
3982static inline __attribute__((always_inline)) long long atomic64_read(atomic64_t *v)
3983{
3984 long long r;
3985 asm volatile("call atomic64_" "read" "_cx8"
3986 : "=A" (r), "+c" (v)
3987 : : "memory"
3988 );
3989 return r;
3990 }
3991static inline __attribute__((always_inline)) long long atomic64_add_return(long long i, atomic64_t *v)
3992{
3993 asm volatile("call atomic64_" "add_return" "_cx8"
3994 : "+A" (i), "+c" (v)
3995 : : "memory"
3996 );
3997 return i;
3998}
3999static inline __attribute__((always_inline)) long long atomic64_sub_return(long long i, atomic64_t *v)
4000{
4001 asm volatile("call atomic64_" "sub_return" "_cx8"
4002 : "+A" (i), "+c" (v)
4003 : : "memory"
4004 );
4005 return i;
4006}
4007static inline __attribute__((always_inline)) long long atomic64_inc_return(atomic64_t *v)
4008{
4009 long long a;
4010 asm volatile("call atomic64_" "inc_return" "_cx8"
4011 : "=A" (a)
4012 : "S" (v)
4013 : "memory", "ecx"
4014 );
4015 return a;
4016}
4017static inline __attribute__((always_inline)) long long atomic64_dec_return(atomic64_t *v)
4018{
4019 long long a;
4020 asm volatile("call atomic64_" "dec_return" "_cx8"
4021 : "=A" (a)
4022 : "S" (v)
4023 : "memory", "ecx"
4024 );
4025 return a;
4026}
4027static inline __attribute__((always_inline)) long long atomic64_add(long long i, atomic64_t *v)
4028{
4029 asm volatile("call atomic64_" "add_return" "_cx8"
4030 : "+A" (i), "+c" (v)
4031 : : "memory"
4032 );
4033 return i;
4034}
4035static inline __attribute__((always_inline)) long long atomic64_sub(long long i, atomic64_t *v)
4036{
4037 asm volatile("call atomic64_" "sub_return" "_cx8"
4038 : "+A" (i), "+c" (v)
4039 : : "memory"
4040 );
4041 return i;
4042}
4043static inline __attribute__((always_inline)) int atomic64_sub_and_test(long long i, atomic64_t *v)
4044{
4045 return atomic64_sub_return(i, v) == 0;
4046}
4047static inline __attribute__((always_inline)) void atomic64_inc(atomic64_t *v)
4048{
4049 asm volatile("call atomic64_" "inc_return" "_cx8"
4050 : : "S" (v)
4051 : "memory", "eax", "ecx", "edx"
4052 );
4053}
4054static inline __attribute__((always_inline)) void atomic64_dec(atomic64_t *v)
4055{
4056 asm volatile("call atomic64_" "dec_return" "_cx8"
4057 : : "S" (v)
4058 : "memory", "eax", "ecx", "edx"
4059 );
4060}
4061static inline __attribute__((always_inline)) int atomic64_dec_and_test(atomic64_t *v)
4062{
4063 return atomic64_dec_return(v) == 0;
4064}
4065static inline __attribute__((always_inline)) int atomic64_inc_and_test(atomic64_t *v)
4066{
4067 return atomic64_inc_return(v) == 0;
4068}
4069static inline __attribute__((always_inline)) int atomic64_add_negative(long long i, atomic64_t *v)
4070{
4071 return atomic64_add_return(i, v) < 0;
4072}
4073static inline __attribute__((always_inline)) int atomic64_add_unless(atomic64_t *v, long long a, long long u)
4074{
4075 unsigned low = (unsigned)u;
4076 unsigned high = (unsigned)(u >> 32);
4077 asm volatile("call atomic64_" "add_unless" "_cx8" "\n\t"
4078 : "+A" (a), "+c" (v), "+S" (low), "+D" (high)
4079 : : "memory");
4080 return (int)a;
4081}
4082static inline __attribute__((always_inline)) int atomic64_inc_not_zero(atomic64_t *v)
4083{
4084 int r;
4085 asm volatile("call atomic64_" "inc_not_zero" "_cx8"
4086 : "=a" (r)
4087 : "S" (v)
4088 : "ecx", "edx", "memory"
4089 );
4090 return r;
4091}
4092static inline __attribute__((always_inline)) long long atomic64_dec_if_positive(atomic64_t *v)
4093{
4094 long long r;
4095 asm volatile("call atomic64_" "dec_if_positive" "_cx8"
4096 : "=A" (r)
4097 : "S" (v)
4098 : "ecx", "memory"
4099 );
4100 return r;
4101}
4102typedef atomic_t atomic_long_t;
4103static inline __attribute__((always_inline)) long atomic_long_read(atomic_long_t *l)
4104{
4105 atomic_t *v = (atomic_t *)l;
4106 return (long)atomic_read(v);
4107}
4108static inline __attribute__((always_inline)) void atomic_long_set(atomic_long_t *l, long i)
4109{
4110 atomic_t *v = (atomic_t *)l;
4111 atomic_set(v, i);
4112}
4113static inline __attribute__((always_inline)) void atomic_long_inc(atomic_long_t *l)
4114{
4115 atomic_t *v = (atomic_t *)l;
4116 atomic_inc(v);
4117}
4118static inline __attribute__((always_inline)) void atomic_long_dec(atomic_long_t *l)
4119{
4120 atomic_t *v = (atomic_t *)l;
4121 atomic_dec(v);
4122}
4123static inline __attribute__((always_inline)) void atomic_long_add(long i, atomic_long_t *l)
4124{
4125 atomic_t *v = (atomic_t *)l;
4126 atomic_add(i, v);
4127}
4128static inline __attribute__((always_inline)) void atomic_long_sub(long i, atomic_long_t *l)
4129{
4130 atomic_t *v = (atomic_t *)l;
4131 atomic_sub(i, v);
4132}
4133static inline __attribute__((always_inline)) int atomic_long_sub_and_test(long i, atomic_long_t *l)
4134{
4135 atomic_t *v = (atomic_t *)l;
4136 return atomic_sub_and_test(i, v);
4137}
4138static inline __attribute__((always_inline)) int atomic_long_dec_and_test(atomic_long_t *l)
4139{
4140 atomic_t *v = (atomic_t *)l;
4141 return atomic_dec_and_test(v);
4142}
4143static inline __attribute__((always_inline)) int atomic_long_inc_and_test(atomic_long_t *l)
4144{
4145 atomic_t *v = (atomic_t *)l;
4146 return atomic_inc_and_test(v);
4147}
4148static inline __attribute__((always_inline)) int atomic_long_add_negative(long i, atomic_long_t *l)
4149{
4150 atomic_t *v = (atomic_t *)l;
4151 return atomic_add_negative(i, v);
4152}
4153static inline __attribute__((always_inline)) long atomic_long_add_return(long i, atomic_long_t *l)
4154{
4155 atomic_t *v = (atomic_t *)l;
4156 return (long)atomic_add_return(i, v);
4157}
4158static inline __attribute__((always_inline)) long atomic_long_sub_return(long i, atomic_long_t *l)
4159{
4160 atomic_t *v = (atomic_t *)l;
4161 return (long)atomic_sub_return(i, v);
4162}
4163static inline __attribute__((always_inline)) long atomic_long_inc_return(atomic_long_t *l)
4164{
4165 atomic_t *v = (atomic_t *)l;
4166 return (long)(atomic_add_return(1, v));
4167}
4168static inline __attribute__((always_inline)) long atomic_long_dec_return(atomic_long_t *l)
4169{
4170 atomic_t *v = (atomic_t *)l;
4171 return (long)(atomic_sub_return(1, v));
4172}
4173static inline __attribute__((always_inline)) long atomic_long_add_unless(atomic_long_t *l, long a, long u)
4174{
4175 atomic_t *v = (atomic_t *)l;
4176 return (long)atomic_add_unless(v, a, u);
4177}
4178struct thread_info {
4179 struct task_struct *task;
4180 struct exec_domain *exec_domain;
4181 __u32 flags;
4182 __u32 status;
4183 __u32 cpu;
4184 int preempt_count;
4185 mm_segment_t addr_limit;
4186 struct restart_block restart_block;
4187 void *sysenter_return;
4188 unsigned long previous_esp;
4189 __u8 supervisor_stack[0];
4190 int uaccess_err;
4191};
4192register unsigned long current_stack_pointer asm("esp") __attribute__((__used__));
4193static inline __attribute__((always_inline)) struct thread_info *current_thread_info(void)
4194{
4195 return (struct thread_info *)
4196 (current_stack_pointer & ~((((1UL) << 12) << 1) - 1));
4197}
4198static inline __attribute__((always_inline)) void set_restore_sigmask(void)
4199{
4200 struct thread_info *ti = current_thread_info();
4201 ti->status |= 0x0008;
4202 set_bit(2, (unsigned long *)&ti->flags);
4203}
4204extern void arch_task_cache_init(void);
4205extern void free_thread_info(struct thread_info *ti);
4206extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
4207static inline __attribute__((always_inline)) void set_ti_thread_flag(struct thread_info *ti, int flag)
4208{
4209 set_bit(flag, (unsigned long *)&ti->flags);
4210}
4211static inline __attribute__((always_inline)) void clear_ti_thread_flag(struct thread_info *ti, int flag)
4212{
4213 clear_bit(flag, (unsigned long *)&ti->flags);
4214}
4215static inline __attribute__((always_inline)) int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
4216{
4217 return test_and_set_bit(flag, (unsigned long *)&ti->flags);
4218}
4219static inline __attribute__((always_inline)) int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
4220{
4221 return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
4222}
4223static inline __attribute__((always_inline)) int test_ti_thread_flag(struct thread_info *ti, int flag)
4224{
4225 return (__builtin_constant_p((flag)) ? constant_test_bit((flag), ((unsigned long *)&ti->flags)) : variable_test_bit((flag), ((unsigned long *)&ti->flags)));
4226}
4227static inline __attribute__((always_inline)) void INIT_LIST_HEAD(struct list_head *list)
4228{
4229 list->next = list;
4230 list->prev = list;
4231}
4232static inline __attribute__((always_inline)) void __list_add(struct list_head *new,
4233 struct list_head *prev,
4234 struct list_head *next)
4235{
4236 next->prev = new;
4237 new->next = next;
4238 new->prev = prev;
4239 prev->next = new;
4240}
4241static inline __attribute__((always_inline)) void list_add(struct list_head *new, struct list_head *head)
4242{
4243 __list_add(new, head, head->next);
4244}
4245static inline __attribute__((always_inline)) void list_add_tail(struct list_head *new, struct list_head *head)
4246{
4247 __list_add(new, head->prev, head);
4248}
4249static inline __attribute__((always_inline)) void __list_del(struct list_head * prev, struct list_head * next)
4250{
4251 next->prev = prev;
4252 prev->next = next;
4253}
4254static inline __attribute__((always_inline)) void __list_del_entry(struct list_head *entry)
4255{
4256 __list_del(entry->prev, entry->next);
4257}
4258static inline __attribute__((always_inline)) void list_del(struct list_head *entry)
4259{
4260 __list_del(entry->prev, entry->next);
4261 entry->next = ((void *) 0x00100100 + (0x0UL));
4262 entry->prev = ((void *) 0x00200200 + (0x0UL));
4263}
4264static inline __attribute__((always_inline)) void list_replace(struct list_head *old,
4265 struct list_head *new)
4266{
4267 new->next = old->next;
4268 new->next->prev = new;
4269 new->prev = old->prev;
4270 new->prev->next = new;
4271}
4272static inline __attribute__((always_inline)) void list_replace_init(struct list_head *old,
4273 struct list_head *new)
4274{
4275 list_replace(old, new);
4276 INIT_LIST_HEAD(old);
4277}
4278static inline __attribute__((always_inline)) void list_del_init(struct list_head *entry)
4279{
4280 __list_del_entry(entry);
4281 INIT_LIST_HEAD(entry);
4282}
4283static inline __attribute__((always_inline)) void list_move(struct list_head *list, struct list_head *head)
4284{
4285 __list_del_entry(list);
4286 list_add(list, head);
4287}
4288static inline __attribute__((always_inline)) void list_move_tail(struct list_head *list,
4289 struct list_head *head)
4290{
4291 __list_del_entry(list);
4292 list_add_tail(list, head);
4293}
4294static inline __attribute__((always_inline)) int list_is_last(const struct list_head *list,
4295 const struct list_head *head)
4296{
4297 return list->next == head;
4298}
4299static inline __attribute__((always_inline)) int list_empty(const struct list_head *head)
4300{
4301 return head->next == head;
4302}
4303static inline __attribute__((always_inline)) int list_empty_careful(const struct list_head *head)
4304{
4305 struct list_head *next = head->next;
4306 return (next == head) && (next == head->prev);
4307}
4308static inline __attribute__((always_inline)) void list_rotate_left(struct list_head *head)
4309{
4310 struct list_head *first;
4311 if (__builtin_constant_p(((!list_empty(head)))) ? !!((!list_empty(head))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 218, }; ______r = !!((!list_empty(head))); ______f.miss_hit[______r]++; ______r; })) {
4312 first = head->next;
4313 list_move_tail(first, head);
4314 }
4315}
4316static inline __attribute__((always_inline)) int list_is_singular(const struct list_head *head)
4317{
4318 return !list_empty(head) && (head->next == head->prev);
4319}
4320static inline __attribute__((always_inline)) void __list_cut_position(struct list_head *list,
4321 struct list_head *head, struct list_head *entry)
4322{
4323 struct list_head *new_first = entry->next;
4324 list->next = head->next;
4325 list->next->prev = list;
4326 list->prev = entry;
4327 entry->next = list;
4328 head->next = new_first;
4329 new_first->prev = head;
4330}
4331static inline __attribute__((always_inline)) void list_cut_position(struct list_head *list,
4332 struct list_head *head, struct list_head *entry)
4333{
4334 if (__builtin_constant_p(((list_empty(head)))) ? !!((list_empty(head))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 262, }; ______r = !!((list_empty(head))); ______f.miss_hit[______r]++; ______r; }))
4335 return;
4336 if (__builtin_constant_p(((list_is_singular(head) && (head->next != entry && head != entry)))) ? !!((list_is_singular(head) && (head->next != entry && head != entry))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
4337 "include/linux/list.h"
4338 , .line =
4339 265
4340 , }; ______r = !!((list_is_singular(head) && (head->next != entry && head != entry))); ______f.miss_hit[______r]++; ______r; }))
4341 return;
4342 if (__builtin_constant_p(((entry == head))) ? !!((entry == head)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 267, }; ______r = !!((entry == head)); ______f.miss_hit[______r]++; ______r; }))
4343 INIT_LIST_HEAD(list);
4344 else
4345 __list_cut_position(list, head, entry);
4346}
4347static inline __attribute__((always_inline)) void __list_splice(const struct list_head *list,
4348 struct list_head *prev,
4349 struct list_head *next)
4350{
4351 struct list_head *first = list->next;
4352 struct list_head *last = list->prev;
4353 first->prev = prev;
4354 prev->next = first;
4355 last->next = next;
4356 next->prev = last;
4357}
4358static inline __attribute__((always_inline)) void list_splice(const struct list_head *list,
4359 struct list_head *head)
4360{
4361 if (__builtin_constant_p(((!list_empty(list)))) ? !!((!list_empty(list))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 295, }; ______r = !!((!list_empty(list))); ______f.miss_hit[______r]++; ______r; }))
4362 __list_splice(list, head, head->next);
4363}
4364static inline __attribute__((always_inline)) void list_splice_tail(struct list_head *list,
4365 struct list_head *head)
4366{
4367 if (__builtin_constant_p(((!list_empty(list)))) ? !!((!list_empty(list))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 307, }; ______r = !!((!list_empty(list))); ______f.miss_hit[______r]++; ______r; }))
4368 __list_splice(list, head->prev, head);
4369}
4370static inline __attribute__((always_inline)) void list_splice_init(struct list_head *list,
4371 struct list_head *head)
4372{
4373 if (__builtin_constant_p(((!list_empty(list)))) ? !!((!list_empty(list))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 321, }; ______r = !!((!list_empty(list))); ______f.miss_hit[______r]++; ______r; })) {
4374 __list_splice(list, head, head->next);
4375 INIT_LIST_HEAD(list);
4376 }
4377}
4378static inline __attribute__((always_inline)) void list_splice_tail_init(struct list_head *list,
4379 struct list_head *head)
4380{
4381 if (__builtin_constant_p(((!list_empty(list)))) ? !!((!list_empty(list))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 338, }; ______r = !!((!list_empty(list))); ______f.miss_hit[______r]++; ______r; })) {
4382 __list_splice(list, head->prev, head);
4383 INIT_LIST_HEAD(list);
4384 }
4385}
4386static inline __attribute__((always_inline)) void INIT_HLIST_NODE(struct hlist_node *h)
4387{
4388 h->next = ((void *)0);
4389 h->pprev = ((void *)0);
4390}
4391static inline __attribute__((always_inline)) int hlist_unhashed(const struct hlist_node *h)
4392{
4393 return !h->pprev;
4394}
4395static inline __attribute__((always_inline)) int hlist_empty(const struct hlist_head *h)
4396{
4397 return !h->first;
4398}
4399static inline __attribute__((always_inline)) void __hlist_del(struct hlist_node *n)
4400{
4401 struct hlist_node *next = n->next;
4402 struct hlist_node **pprev = n->pprev;
4403 *pprev = next;
4404 if (__builtin_constant_p(((next))) ? !!((next)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 591, }; ______r = !!((next)); ______f.miss_hit[______r]++; ______r; }))
4405 next->pprev = pprev;
4406}
4407static inline __attribute__((always_inline)) void hlist_del(struct hlist_node *n)
4408{
4409 __hlist_del(n);
4410 n->next = ((void *) 0x00100100 + (0x0UL));
4411 n->pprev = ((void *) 0x00200200 + (0x0UL));
4412}
4413static inline __attribute__((always_inline)) void hlist_del_init(struct hlist_node *n)
4414{
4415 if (__builtin_constant_p(((!hlist_unhashed(n)))) ? !!((!hlist_unhashed(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 604, }; ______r = !!((!hlist_unhashed(n))); ______f.miss_hit[______r]++; ______r; })) {
4416 __hlist_del(n);
4417 INIT_HLIST_NODE(n);
4418 }
4419}
4420static inline __attribute__((always_inline)) void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
4421{
4422 struct hlist_node *first = h->first;
4423 n->next = first;
4424 if (__builtin_constant_p(((first))) ? !!((first)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 614, }; ______r = !!((first)); ______f.miss_hit[______r]++; ______r; }))
4425 first->pprev = &n->next;
4426 h->first = n;
4427 n->pprev = &h->first;
4428}
4429static inline __attribute__((always_inline)) void hlist_add_before(struct hlist_node *n,
4430 struct hlist_node *next)
4431{
4432 n->pprev = next->pprev;
4433 n->next = next;
4434 next->pprev = &n->next;
4435 *(n->pprev) = n;
4436}
4437static inline __attribute__((always_inline)) void hlist_add_after(struct hlist_node *n,
4438 struct hlist_node *next)
4439{
4440 next->next = n->next;
4441 n->next = next;
4442 next->pprev = &n->next;
4443 if (__builtin_constant_p(((next->next))) ? !!((next->next)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 637, }; ______r = !!((next->next)); ______f.miss_hit[______r]++; ______r; }))
4444 next->next->pprev = &next->next;
4445}
4446static inline __attribute__((always_inline)) void hlist_add_fake(struct hlist_node *n)
4447{
4448 n->pprev = &n->next;
4449}
4450static inline __attribute__((always_inline)) void hlist_move_list(struct hlist_head *old,
4451 struct hlist_head *new)
4452{
4453 new->first = old->first;
4454 if (__builtin_constant_p(((new->first))) ? !!((new->first)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list.h", .line = 655, }; ______r = !!((new->first)); ______f.miss_hit[______r]++; ______r; }))
4455 new->first->pprev = &new->first;
4456 old->first = ((void *)0);
4457}
4458 extern void add_preempt_count(int val);
4459 extern void sub_preempt_count(int val);
4460 __attribute__((regparm(0))) void preempt_schedule(void);
4461struct preempt_notifier;
4462struct preempt_ops {
4463 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
4464 void (*sched_out)(struct preempt_notifier *notifier,
4465 struct task_struct *next);
4466};
4467struct preempt_notifier {
4468 struct hlist_node link;
4469 struct preempt_ops *ops;
4470};
4471void preempt_notifier_register(struct preempt_notifier *notifier);
4472void preempt_notifier_unregister(struct preempt_notifier *notifier);
4473static inline __attribute__((always_inline)) void preempt_notifier_init(struct preempt_notifier *notifier,
4474 struct preempt_ops *ops)
4475{
4476 INIT_HLIST_NODE(&notifier->link);
4477 notifier->ops = ops;
4478}
4479struct task_struct;
4480struct lockdep_map;
4481extern int prove_locking;
4482extern int lock_stat;
4483struct task_struct;
4484extern int debug_locks;
4485extern int debug_locks_silent;
4486static inline __attribute__((always_inline)) int __debug_locks_off(void)
4487{
4488 return ({ __typeof(*((&debug_locks))) __x = ((0)); switch (sizeof(*&debug_locks)) { case 1: { volatile u8 *__ptr = (volatile u8 *)((&debug_locks)); asm volatile("xchgb %0,%1" : "=q" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)((&debug_locks)); asm volatile("xchgw %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)((&debug_locks)); asm volatile("xchgl %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } default: __xchg_wrong_size(); } __x; });
4489}
4490extern int debug_locks_off(void);
4491struct task_struct;
4492extern void debug_show_all_locks(void);
4493extern void debug_show_held_locks(struct task_struct *task);
4494extern void debug_check_no_locks_freed(const void *from, unsigned long len);
4495extern void debug_check_no_locks_held(struct task_struct *task);
4496struct task_struct;
4497struct pt_regs;
4498struct task_struct;
4499struct stack_trace {
4500 unsigned int nr_entries, max_entries;
4501 unsigned long *entries;
4502 int skip;
4503};
4504extern void save_stack_trace(struct stack_trace *trace);
4505extern void save_stack_trace_regs(struct stack_trace *trace,
4506 struct pt_regs *regs);
4507extern void save_stack_trace_tsk(struct task_struct *tsk,
4508 struct stack_trace *trace);
4509extern void print_stack_trace(struct stack_trace *trace, int spaces);
4510extern void save_stack_trace_user(struct stack_trace *trace);
4511struct lockdep_subclass_key {
4512 char __one_byte;
4513} __attribute__ ((__packed__));
4514struct lock_class_key {
4515 struct lockdep_subclass_key subkeys[8UL];
4516};
4517extern struct lock_class_key __lockdep_no_validate__;
4518struct lock_class {
4519 struct list_head hash_entry;
4520 struct list_head lock_entry;
4521 struct lockdep_subclass_key *key;
4522 unsigned int subclass;
4523 unsigned int dep_gen_id;
4524 unsigned long usage_mask;
4525 struct stack_trace usage_traces[(1+3*4)];
4526 struct list_head locks_after, locks_before;
4527 unsigned int version;
4528 unsigned long ops;
4529 const char *name;
4530 int name_version;
4531};
4532struct lockdep_map {
4533 struct lock_class_key *key;
4534 struct lock_class *class_cache[2];
4535 const char *name;
4536};
4537struct lock_list {
4538 struct list_head entry;
4539 struct lock_class *class;
4540 struct stack_trace trace;
4541 int distance;
4542 struct lock_list *parent;
4543};
4544struct lock_chain {
4545 u8 irq_context;
4546 u8 depth;
4547 u16 base;
4548 struct list_head entry;
4549 u64 chain_key;
4550};
4551struct held_lock {
4552 u64 prev_chain_key;
4553 unsigned long acquire_ip;
4554 struct lockdep_map *instance;
4555 struct lockdep_map *nest_lock;
4556 unsigned int class_idx:13;
4557 unsigned int irq_context:2;
4558 unsigned int trylock:1;
4559 unsigned int read:2;
4560 unsigned int check:2;
4561 unsigned int hardirqs_off:1;
4562 unsigned int references:11;
4563};
4564extern void lockdep_init(void);
4565extern void lockdep_info(void);
4566extern void lockdep_reset(void);
4567extern void lockdep_reset_lock(struct lockdep_map *lock);
4568extern void lockdep_free_key_range(void *start, unsigned long size);
4569extern void lockdep_sys_exit(void);
4570extern void lockdep_off(void);
4571extern void lockdep_on(void);
4572extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
4573 struct lock_class_key *key, int subclass);
4574static inline __attribute__((always_inline)) int lockdep_match_key(struct lockdep_map *lock,
4575 struct lock_class_key *key)
4576{
4577 return lock->key == key;
4578}
4579extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
4580 int trylock, int read, int check,
4581 struct lockdep_map *nest_lock, unsigned long ip);
4582extern void lock_release(struct lockdep_map *lock, int nested,
4583 unsigned long ip);
4584extern int lock_is_held(struct lockdep_map *lock);
4585extern void lock_set_class(struct lockdep_map *lock, const char *name,
4586 struct lock_class_key *key, unsigned int subclass,
4587 unsigned long ip);
4588static inline __attribute__((always_inline)) void lock_set_subclass(struct lockdep_map *lock,
4589 unsigned int subclass, unsigned long ip)
4590{
4591 lock_set_class(lock, lock->name, lock->key, subclass, ip);
4592}
4593extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
4594extern void lockdep_clear_current_reclaim_state(void);
4595extern void lockdep_trace_alloc(gfp_t mask);
4596extern void print_irqtrace_events(struct task_struct *curr);
4597extern void ftrace_nmi_enter(void);
4598extern void ftrace_nmi_exit(void);
4599extern void cpu_idle(void);
4600typedef void (*smp_call_func_t)(void *info);
4601struct call_single_data {
4602 struct list_head list;
4603 smp_call_func_t func;
4604 void *info;
4605 u16 flags;
4606 u16 priv;
4607};
4608extern unsigned int total_cpus;
4609int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
4610 int wait);
4611struct mpf_intel {
4612 char signature[4];
4613 unsigned int physptr;
4614 unsigned char length;
4615 unsigned char specification;
4616 unsigned char checksum;
4617 unsigned char feature1;
4618 unsigned char feature2;
4619 unsigned char feature3;
4620 unsigned char feature4;
4621 unsigned char feature5;
4622};
4623struct mpc_table {
4624 char signature[4];
4625 unsigned short length;
4626 char spec;
4627 char checksum;
4628 char oem[8];
4629 char productid[12];
4630 unsigned int oemptr;
4631 unsigned short oemsize;
4632 unsigned short oemcount;
4633 unsigned int lapic;
4634 unsigned int reserved;
4635};
4636struct mpc_cpu {
4637 unsigned char type;
4638 unsigned char apicid;
4639 unsigned char apicver;
4640 unsigned char cpuflag;
4641 unsigned int cpufeature;
4642 unsigned int featureflag;
4643 unsigned int reserved[2];
4644};
4645struct mpc_bus {
4646 unsigned char type;
4647 unsigned char busid;
4648 unsigned char bustype[6];
4649};
4650struct mpc_ioapic {
4651 unsigned char type;
4652 unsigned char apicid;
4653 unsigned char apicver;
4654 unsigned char flags;
4655 unsigned int apicaddr;
4656};
4657struct mpc_intsrc {
4658 unsigned char type;
4659 unsigned char irqtype;
4660 unsigned short irqflag;
4661 unsigned char srcbus;
4662 unsigned char srcbusirq;
4663 unsigned char dstapic;
4664 unsigned char dstirq;
4665};
4666enum mp_irq_source_types {
4667 mp_INT = 0,
4668 mp_NMI = 1,
4669 mp_SMI = 2,
4670 mp_ExtINT = 3
4671};
4672struct mpc_lintsrc {
4673 unsigned char type;
4674 unsigned char irqtype;
4675 unsigned short irqflag;
4676 unsigned char srcbusid;
4677 unsigned char srcbusirq;
4678 unsigned char destapic;
4679 unsigned char destapiclint;
4680};
4681struct mpc_oemtable {
4682 char signature[4];
4683 unsigned short length;
4684 char rev;
4685 char checksum;
4686 char mpc[8];
4687};
4688enum mp_bustype {
4689 MP_BUS_ISA = 1,
4690 MP_BUS_EISA,
4691 MP_BUS_PCI,
4692 MP_BUS_MCA,
4693};
4694struct screen_info {
4695 __u8 orig_x;
4696 __u8 orig_y;
4697 __u16 ext_mem_k;
4698 __u16 orig_video_page;
4699 __u8 orig_video_mode;
4700 __u8 orig_video_cols;
4701 __u8 flags;
4702 __u8 unused2;
4703 __u16 orig_video_ega_bx;
4704 __u16 unused3;
4705 __u8 orig_video_lines;
4706 __u8 orig_video_isVGA;
4707 __u16 orig_video_points;
4708 __u16 lfb_width;
4709 __u16 lfb_height;
4710 __u16 lfb_depth;
4711 __u32 lfb_base;
4712 __u32 lfb_size;
4713 __u16 cl_magic, cl_offset;
4714 __u16 lfb_linelength;
4715 __u8 red_size;
4716 __u8 red_pos;
4717 __u8 green_size;
4718 __u8 green_pos;
4719 __u8 blue_size;
4720 __u8 blue_pos;
4721 __u8 rsvd_size;
4722 __u8 rsvd_pos;
4723 __u16 vesapm_seg;
4724 __u16 vesapm_off;
4725 __u16 pages;
4726 __u16 vesa_attributes;
4727 __u32 capabilities;
4728 __u8 _reserved[6];
4729} __attribute__((packed));
4730extern struct screen_info screen_info;
4731typedef unsigned short apm_event_t;
4732typedef unsigned short apm_eventinfo_t;
4733struct apm_bios_info {
4734 __u16 version;
4735 __u16 cseg;
4736 __u32 offset;
4737 __u16 cseg_16;
4738 __u16 dseg;
4739 __u16 flags;
4740 __u16 cseg_len;
4741 __u16 cseg_16_len;
4742 __u16 dseg_len;
4743};
4744struct apm_info {
4745 struct apm_bios_info bios;
4746 unsigned short connection_version;
4747 int get_power_status_broken;
4748 int get_power_status_swabinminutes;
4749 int allow_ints;
4750 int forbid_idle;
4751 int realmode_power_off;
4752 int disabled;
4753};
4754extern struct apm_info apm_info;
4755struct edd_device_params {
4756 __u16 length;
4757 __u16 info_flags;
4758 __u32 num_default_cylinders;
4759 __u32 num_default_heads;
4760 __u32 sectors_per_track;
4761 __u64 number_of_sectors;
4762 __u16 bytes_per_sector;
4763 __u32 dpte_ptr;
4764 __u16 key;
4765 __u8 device_path_info_length;
4766 __u8 reserved2;
4767 __u16 reserved3;
4768 __u8 host_bus_type[4];
4769 __u8 interface_type[8];
4770 union {
4771 struct {
4772 __u16 base_address;
4773 __u16 reserved1;
4774 __u32 reserved2;
4775 } __attribute__ ((packed)) isa;
4776 struct {
4777 __u8 bus;
4778 __u8 slot;
4779 __u8 function;
4780 __u8 channel;
4781 __u32 reserved;
4782 } __attribute__ ((packed)) pci;
4783 struct {
4784 __u64 reserved;
4785 } __attribute__ ((packed)) ibnd;
4786 struct {
4787 __u64 reserved;
4788 } __attribute__ ((packed)) xprs;
4789 struct {
4790 __u64 reserved;
4791 } __attribute__ ((packed)) htpt;
4792 struct {
4793 __u64 reserved;
4794 } __attribute__ ((packed)) unknown;
4795 } interface_path;
4796 union {
4797 struct {
4798 __u8 device;
4799 __u8 reserved1;
4800 __u16 reserved2;
4801 __u32 reserved3;
4802 __u64 reserved4;
4803 } __attribute__ ((packed)) ata;
4804 struct {
4805 __u8 device;
4806 __u8 lun;
4807 __u8 reserved1;
4808 __u8 reserved2;
4809 __u32 reserved3;
4810 __u64 reserved4;
4811 } __attribute__ ((packed)) atapi;
4812 struct {
4813 __u16 id;
4814 __u64 lun;
4815 __u16 reserved1;
4816 __u32 reserved2;
4817 } __attribute__ ((packed)) scsi;
4818 struct {
4819 __u64 serial_number;
4820 __u64 reserved;
4821 } __attribute__ ((packed)) usb;
4822 struct {
4823 __u64 eui;
4824 __u64 reserved;
4825 } __attribute__ ((packed)) i1394;
4826 struct {
4827 __u64 wwid;
4828 __u64 lun;
4829 } __attribute__ ((packed)) fibre;
4830 struct {
4831 __u64 identity_tag;
4832 __u64 reserved;
4833 } __attribute__ ((packed)) i2o;
4834 struct {
4835 __u32 array_number;
4836 __u32 reserved1;
4837 __u64 reserved2;
4838 } __attribute__ ((packed)) raid;
4839 struct {
4840 __u8 device;
4841 __u8 reserved1;
4842 __u16 reserved2;
4843 __u32 reserved3;
4844 __u64 reserved4;
4845 } __attribute__ ((packed)) sata;
4846 struct {
4847 __u64 reserved1;
4848 __u64 reserved2;
4849 } __attribute__ ((packed)) unknown;
4850 } device_path;
4851 __u8 reserved4;
4852 __u8 checksum;
4853} __attribute__ ((packed));
4854struct edd_info {
4855 __u8 device;
4856 __u8 version;
4857 __u16 interface_support;
4858 __u16 legacy_max_cylinder;
4859 __u8 legacy_max_head;
4860 __u8 legacy_sectors_per_track;
4861 struct edd_device_params params;
4862} __attribute__ ((packed));
4863struct edd {
4864 unsigned int mbr_signature[16];
4865 struct edd_info edd_info[6];
4866 unsigned char mbr_signature_nr;
4867 unsigned char edd_info_nr;
4868};
4869extern struct edd edd;
4870struct e820entry {
4871 __u64 addr;
4872 __u64 size;
4873 __u32 type;
4874} __attribute__((packed));
4875struct e820map {
4876 __u32 nr_map;
4877 struct e820entry map[128];
4878};
4879extern struct e820map e820;
4880extern struct e820map e820_saved;
4881extern unsigned long pci_mem_start;
4882extern int e820_any_mapped(u64 start, u64 end, unsigned type);
4883extern int e820_all_mapped(u64 start, u64 end, unsigned type);
4884extern void e820_add_region(u64 start, u64 size, int type);
4885extern void e820_print_map(char *who);
4886extern int
4887sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, u32 *pnr_map);
4888extern u64 e820_update_range(u64 start, u64 size, unsigned old_type,
4889 unsigned new_type);
4890extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type,
4891 int checktype);
4892extern void update_e820(void);
4893extern void e820_setup_gap(void);
4894extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
4895 unsigned long start_addr, unsigned long long end_addr);
4896struct setup_data;
4897extern void parse_e820_ext(struct setup_data *data);
4898extern void e820_mark_nosave_regions(unsigned long limit_pfn);
4899static inline __attribute__((always_inline)) void early_memtest(unsigned long start, unsigned long end)
4900{
4901}
4902extern unsigned long e820_end_of_ram_pfn(void);
4903extern unsigned long e820_end_of_low_ram_pfn(void);
4904extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
4905void memblock_x86_fill(void);
4906void memblock_find_dma_reserve(void);
4907extern void finish_e820_parsing(void);
4908extern void e820_reserve_resources(void);
4909extern void e820_reserve_resources_late(void);
4910extern void setup_memory_map(void);
4911extern char *default_machine_specific_memory_setup(void);
4912static inline __attribute__((always_inline)) bool is_ISA_range(u64 s, u64 e)
4913{
4914 return s >= 0xa0000 && e <= 0x100000;
4915}
4916struct resource {
4917 resource_size_t start;
4918 resource_size_t end;
4919 const char *name;
4920 unsigned long flags;
4921 struct resource *parent, *sibling, *child;
4922};
4923struct resource_list {
4924 struct resource_list *next;
4925 struct resource *res;
4926 struct pci_dev *dev;
4927};
4928extern struct resource ioport_resource;
4929extern struct resource iomem_resource;
4930extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
4931extern int request_resource(struct resource *root, struct resource *new);
4932extern int release_resource(struct resource *new);
4933void release_child_resources(struct resource *new);
4934extern void reserve_region_with_split(struct resource *root,
4935 resource_size_t start, resource_size_t end,
4936 const char *name);
4937extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
4938extern int insert_resource(struct resource *parent, struct resource *new);
4939extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
4940extern void arch_remove_reservations(struct resource *avail);
4941extern int allocate_resource(struct resource *root, struct resource *new,
4942 resource_size_t size, resource_size_t min,
4943 resource_size_t max, resource_size_t align,
4944 resource_size_t (*alignf)(void *,
4945 const struct resource *,
4946 resource_size_t,
4947 resource_size_t),
4948 void *alignf_data);
4949int adjust_resource(struct resource *res, resource_size_t start,
4950 resource_size_t size);
4951resource_size_t resource_alignment(struct resource *res);
4952static inline __attribute__((always_inline)) resource_size_t resource_size(const struct resource *res)
4953{
4954 return res->end - res->start + 1;
4955}
4956static inline __attribute__((always_inline)) unsigned long resource_type(const struct resource *res)
4957{
4958 return res->flags & 0x00001f00;
4959}
4960extern struct resource * __request_region(struct resource *,
4961 resource_size_t start,
4962 resource_size_t n,
4963 const char *name, int flags);
4964extern int __check_region(struct resource *, resource_size_t, resource_size_t);
4965extern void __release_region(struct resource *, resource_size_t,
4966 resource_size_t);
4967static inline __attribute__((always_inline)) int __attribute__((deprecated)) check_region(resource_size_t s,
4968 resource_size_t n)
4969{
4970 return __check_region(&ioport_resource, s, n);
4971}
4972struct device;
4973extern struct resource * __devm_request_region(struct device *dev,
4974 struct resource *parent, resource_size_t start,
4975 resource_size_t n, const char *name);
4976extern void __devm_release_region(struct device *dev, struct resource *parent,
4977 resource_size_t start, resource_size_t n);
4978extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
4979extern int iomem_is_exclusive(u64 addr);
4980extern int
4981walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
4982 void *arg, int (*func)(unsigned long, unsigned long, void *));
4983struct ist_info {
4984 __u32 signature;
4985 __u32 command;
4986 __u32 event;
4987 __u32 perf_level;
4988};
4989extern struct ist_info ist_info;
4990struct edid_info {
4991 unsigned char dummy[128];
4992};
4993extern struct edid_info edid_info;
4994struct setup_data {
4995 __u64 next;
4996 __u32 type;
4997 __u32 len;
4998 __u8 data[0];
4999};
5000struct setup_header {
5001 __u8 setup_sects;
5002 __u16 root_flags;
5003 __u32 syssize;
5004 __u16 ram_size;
5005 __u16 vid_mode;
5006 __u16 root_dev;
5007 __u16 boot_flag;
5008 __u16 jump;
5009 __u32 header;
5010 __u16 version;
5011 __u32 realmode_swtch;
5012 __u16 start_sys;
5013 __u16 kernel_version;
5014 __u8 type_of_loader;
5015 __u8 loadflags;
5016 __u16 setup_move_size;
5017 __u32 code32_start;
5018 __u32 ramdisk_image;
5019 __u32 ramdisk_size;
5020 __u32 bootsect_kludge;
5021 __u16 heap_end_ptr;
5022 __u8 ext_loader_ver;
5023 __u8 ext_loader_type;
5024 __u32 cmd_line_ptr;
5025 __u32 initrd_addr_max;
5026 __u32 kernel_alignment;
5027 __u8 relocatable_kernel;
5028 __u8 _pad2[3];
5029 __u32 cmdline_size;
5030 __u32 hardware_subarch;
5031 __u64 hardware_subarch_data;
5032 __u32 payload_offset;
5033 __u32 payload_length;
5034 __u64 setup_data;
5035} __attribute__((packed));
5036struct sys_desc_table {
5037 __u16 length;
5038 __u8 table[14];
5039};
5040struct olpc_ofw_header {
5041 __u32 ofw_magic;
5042 __u32 ofw_version;
5043 __u32 cif_handler;
5044 __u32 irq_desc_table;
5045} __attribute__((packed));
5046struct efi_info {
5047 __u32 efi_loader_signature;
5048 __u32 efi_systab;
5049 __u32 efi_memdesc_size;
5050 __u32 efi_memdesc_version;
5051 __u32 efi_memmap;
5052 __u32 efi_memmap_size;
5053 __u32 efi_systab_hi;
5054 __u32 efi_memmap_hi;
5055};
5056struct boot_params {
5057 struct screen_info screen_info;
5058 struct apm_bios_info apm_bios_info;
5059 __u8 _pad2[4];
5060 __u64 tboot_addr;
5061 struct ist_info ist_info;
5062 __u8 _pad3[16];
5063 __u8 hd0_info[16];
5064 __u8 hd1_info[16];
5065 struct sys_desc_table sys_desc_table;
5066 struct olpc_ofw_header olpc_ofw_header;
5067 __u8 _pad4[128];
5068 struct edid_info edid_info;
5069 struct efi_info efi_info;
5070 __u32 alt_mem_k;
5071 __u32 scratch;
5072 __u8 e820_entries;
5073 __u8 eddbuf_entries;
5074 __u8 edd_mbr_sig_buf_entries;
5075 __u8 _pad6[6];
5076 struct setup_header hdr;
5077 __u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)];
5078 __u32 edd_mbr_sig_buffer[16];
5079 struct e820entry e820_map[128];
5080 __u8 _pad8[48];
5081 struct edd_info eddbuf[6];
5082 __u8 _pad9[276];
5083} __attribute__((packed));
5084enum {
5085 X86_SUBARCH_PC = 0,
5086 X86_SUBARCH_LGUEST,
5087 X86_SUBARCH_XEN,
5088 X86_SUBARCH_MRST,
5089 X86_SUBARCH_CE4100,
5090 X86_NR_SUBARCHS,
5091};
5092struct mpc_bus;
5093struct mpc_cpu;
5094struct mpc_table;
5095struct x86_init_mpparse {
5096 void (*mpc_record)(unsigned int mode);
5097 void (*setup_ioapic_ids)(void);
5098 int (*mpc_apic_id)(struct mpc_cpu *m);
5099 void (*smp_read_mpc_oem)(struct mpc_table *mpc);
5100 void (*mpc_oem_pci_bus)(struct mpc_bus *m);
5101 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
5102 void (*find_smp_config)(void);
5103 void (*get_smp_config)(unsigned int early);
5104};
5105struct x86_init_resources {
5106 void (*probe_roms)(void);
5107 void (*reserve_resources)(void);
5108 char *(*memory_setup)(void);
5109};
5110struct x86_init_irqs {
5111 void (*pre_vector_init)(void);
5112 void (*intr_init)(void);
5113 void (*trap_init)(void);
5114};
5115struct x86_init_oem {
5116 void (*arch_setup)(void);
5117 void (*banner)(void);
5118};
5119struct x86_init_mapping {
5120 void (*pagetable_reserve)(u64 start, u64 end);
5121};
5122struct x86_init_paging {
5123 void (*pagetable_setup_start)(pgd_t *base);
5124 void (*pagetable_setup_done)(pgd_t *base);
5125};
5126struct x86_init_timers {
5127 void (*setup_percpu_clockev)(void);
5128 void (*tsc_pre_init)(void);
5129 void (*timer_init)(void);
5130 void (*wallclock_init)(void);
5131};
5132struct x86_init_iommu {
5133 int (*iommu_init)(void);
5134};
5135struct x86_init_pci {
5136 int (*arch_init)(void);
5137 int (*init)(void);
5138 void (*init_irq)(void);
5139 void (*fixup_irqs)(void);
5140};
5141struct x86_init_ops {
5142 struct x86_init_resources resources;
5143 struct x86_init_mpparse mpparse;
5144 struct x86_init_irqs irqs;
5145 struct x86_init_oem oem;
5146 struct x86_init_mapping mapping;
5147 struct x86_init_paging paging;
5148 struct x86_init_timers timers;
5149 struct x86_init_iommu iommu;
5150 struct x86_init_pci pci;
5151};
5152struct x86_cpuinit_ops {
5153 void (*setup_percpu_clockev)(void);
5154};
5155struct x86_platform_ops {
5156 unsigned long (*calibrate_tsc)(void);
5157 unsigned long (*get_wallclock)(void);
5158 int (*set_wallclock)(unsigned long nowtime);
5159 void (*iommu_shutdown)(void);
5160 bool (*is_untracked_pat_range)(u64 start, u64 end);
5161 void (*nmi_init)(void);
5162 int (*i8042_detect)(void);
5163};
5164struct pci_dev;
5165struct x86_msi_ops {
5166 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
5167 void (*teardown_msi_irq)(unsigned int irq);
5168 void (*teardown_msi_irqs)(struct pci_dev *dev);
5169};
5170extern struct x86_init_ops x86_init;
5171extern struct x86_cpuinit_ops x86_cpuinit;
5172extern struct x86_platform_ops x86_platform;
5173extern struct x86_msi_ops x86_msi;
5174extern void x86_init_noop(void);
5175extern void x86_init_uint_noop(unsigned int unused);
5176struct local_apic {
5177 struct { unsigned int __reserved[4]; } __reserved_01;
5178 struct { unsigned int __reserved[4]; } __reserved_02;
5179 struct {
5180 unsigned int __reserved_1 : 24,
5181 phys_apic_id : 4,
5182 __reserved_2 : 4;
5183 unsigned int __reserved[3];
5184 } id;
5185 const
5186 struct {
5187 unsigned int version : 8,
5188 __reserved_1 : 8,
5189 max_lvt : 8,
5190 __reserved_2 : 8;
5191 unsigned int __reserved[3];
5192 } version;
5193 struct { unsigned int __reserved[4]; } __reserved_03;
5194 struct { unsigned int __reserved[4]; } __reserved_04;
5195 struct { unsigned int __reserved[4]; } __reserved_05;
5196 struct { unsigned int __reserved[4]; } __reserved_06;
5197 struct {
5198 unsigned int priority : 8,
5199 __reserved_1 : 24;
5200 unsigned int __reserved_2[3];
5201 } tpr;
5202 const
5203 struct {
5204 unsigned int priority : 8,
5205 __reserved_1 : 24;
5206 unsigned int __reserved_2[3];
5207 } apr;
5208 const
5209 struct {
5210 unsigned int priority : 8,
5211 __reserved_1 : 24;
5212 unsigned int __reserved_2[3];
5213 } ppr;
5214 struct {
5215 unsigned int eoi;
5216 unsigned int __reserved[3];
5217 } eoi;
5218 struct { unsigned int __reserved[4]; } __reserved_07;
5219 struct {
5220 unsigned int __reserved_1 : 24,
5221 logical_dest : 8;
5222 unsigned int __reserved_2[3];
5223 } ldr;
5224 struct {
5225 unsigned int __reserved_1 : 28,
5226 model : 4;
5227 unsigned int __reserved_2[3];
5228 } dfr;
5229 struct {
5230 unsigned int spurious_vector : 8,
5231 apic_enabled : 1,
5232 focus_cpu : 1,
5233 __reserved_2 : 22;
5234 unsigned int __reserved_3[3];
5235 } svr;
5236 struct {
5237 unsigned int bitfield;
5238 unsigned int __reserved[3];
5239 } isr [8];
5240 struct {
5241 unsigned int bitfield;
5242 unsigned int __reserved[3];
5243 } tmr [8];
5244 struct {
5245 unsigned int bitfield;
5246 unsigned int __reserved[3];
5247 } irr [8];
5248 union {
5249 struct {
5250 unsigned int send_cs_error : 1,
5251 receive_cs_error : 1,
5252 send_accept_error : 1,
5253 receive_accept_error : 1,
5254 __reserved_1 : 1,
5255 send_illegal_vector : 1,
5256 receive_illegal_vector : 1,
5257 illegal_register_address : 1,
5258 __reserved_2 : 24;
5259 unsigned int __reserved_3[3];
5260 } error_bits;
5261 struct {
5262 unsigned int errors;
5263 unsigned int __reserved_3[3];
5264 } all_errors;
5265 } esr;
5266 struct { unsigned int __reserved[4]; } __reserved_08;
5267 struct { unsigned int __reserved[4]; } __reserved_09;
5268 struct { unsigned int __reserved[4]; } __reserved_10;
5269 struct { unsigned int __reserved[4]; } __reserved_11;
5270 struct { unsigned int __reserved[4]; } __reserved_12;
5271 struct { unsigned int __reserved[4]; } __reserved_13;
5272 struct { unsigned int __reserved[4]; } __reserved_14;
5273 struct {
5274 unsigned int vector : 8,
5275 delivery_mode : 3,
5276 destination_mode : 1,
5277 delivery_status : 1,
5278 __reserved_1 : 1,
5279 level : 1,
5280 trigger : 1,
5281 __reserved_2 : 2,
5282 shorthand : 2,
5283 __reserved_3 : 12;
5284 unsigned int __reserved_4[3];
5285 } icr1;
5286 struct {
5287 union {
5288 unsigned int __reserved_1 : 24,
5289 phys_dest : 4,
5290 __reserved_2 : 4;
5291 unsigned int __reserved_3 : 24,
5292 logical_dest : 8;
5293 } dest;
5294 unsigned int __reserved_4[3];
5295 } icr2;
5296 struct {
5297 unsigned int vector : 8,
5298 __reserved_1 : 4,
5299 delivery_status : 1,
5300 __reserved_2 : 3,
5301 mask : 1,
5302 timer_mode : 1,
5303 __reserved_3 : 14;
5304 unsigned int __reserved_4[3];
5305 } lvt_timer;
5306 struct {
5307 unsigned int vector : 8,
5308 delivery_mode : 3,
5309 __reserved_1 : 1,
5310 delivery_status : 1,
5311 __reserved_2 : 3,
5312 mask : 1,
5313 __reserved_3 : 15;
5314 unsigned int __reserved_4[3];
5315 } lvt_thermal;
5316 struct {
5317 unsigned int vector : 8,
5318 delivery_mode : 3,
5319 __reserved_1 : 1,
5320 delivery_status : 1,
5321 __reserved_2 : 3,
5322 mask : 1,
5323 __reserved_3 : 15;
5324 unsigned int __reserved_4[3];
5325 } lvt_pc;
5326 struct {
5327 unsigned int vector : 8,
5328 delivery_mode : 3,
5329 __reserved_1 : 1,
5330 delivery_status : 1,
5331 polarity : 1,
5332 remote_irr : 1,
5333 trigger : 1,
5334 mask : 1,
5335 __reserved_2 : 15;
5336 unsigned int __reserved_3[3];
5337 } lvt_lint0;
5338 struct {
5339 unsigned int vector : 8,
5340 delivery_mode : 3,
5341 __reserved_1 : 1,
5342 delivery_status : 1,
5343 polarity : 1,
5344 remote_irr : 1,
5345 trigger : 1,
5346 mask : 1,
5347 __reserved_2 : 15;
5348 unsigned int __reserved_3[3];
5349 } lvt_lint1;
5350 struct {
5351 unsigned int vector : 8,
5352 __reserved_1 : 4,
5353 delivery_status : 1,
5354 __reserved_2 : 3,
5355 mask : 1,
5356 __reserved_3 : 15;
5357 unsigned int __reserved_4[3];
5358 } lvt_error;
5359 struct {
5360 unsigned int initial_count;
5361 unsigned int __reserved_2[3];
5362 } timer_icr;
5363 const
5364 struct {
5365 unsigned int curr_count;
5366 unsigned int __reserved_2[3];
5367 } timer_ccr;
5368 struct { unsigned int __reserved[4]; } __reserved_16;
5369 struct { unsigned int __reserved[4]; } __reserved_17;
5370 struct { unsigned int __reserved[4]; } __reserved_18;
5371 struct { unsigned int __reserved[4]; } __reserved_19;
5372 struct {
5373 unsigned int divisor : 4,
5374 __reserved_1 : 28;
5375 unsigned int __reserved_2[3];
5376 } timer_dcr;
5377 struct { unsigned int __reserved[4]; } __reserved_20;
5378} __attribute__ ((packed));
5379enum ioapic_irq_destination_types {
5380 dest_Fixed = 0,
5381 dest_LowestPrio = 1,
5382 dest_SMI = 2,
5383 dest__reserved_1 = 3,
5384 dest_NMI = 4,
5385 dest_INIT = 5,
5386 dest__reserved_2 = 6,
5387 dest_ExtINT = 7
5388};
5389extern int apic_version[];
5390extern int pic_mode;
5391extern unsigned int def_to_bigsmp;
5392extern unsigned long mp_bus_not_pci[(((260) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))];
5393extern unsigned int boot_cpu_physical_apicid;
5394extern unsigned int max_physical_apicid;
5395extern int mpc_default_type;
5396extern unsigned long mp_lapic_addr;
5397extern int smp_found_config;
5398static inline __attribute__((always_inline)) void get_smp_config(void)
5399{
5400 x86_init.mpparse.get_smp_config(0);
5401}
5402static inline __attribute__((always_inline)) void early_get_smp_config(void)
5403{
5404 x86_init.mpparse.get_smp_config(1);
5405}
5406static inline __attribute__((always_inline)) void find_smp_config(void)
5407{
5408 x86_init.mpparse.find_smp_config();
5409}
5410extern void early_reserve_e820_mpc_new(void);
5411extern int enable_update_mptable;
5412extern int default_mpc_apic_id(struct mpc_cpu *m);
5413extern void default_smp_read_mpc_oem(struct mpc_table *mpc);
5414extern void default_mpc_oem_bus_info(struct mpc_bus *m, char *str);
5415extern void default_find_smp_config(void);
5416extern void default_get_smp_config(unsigned int early);
5417void __attribute__ ((__section__(".cpuinit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) generic_processor_info(int apicid, int version);
5418extern void mp_register_ioapic(int id, u32 address, u32 gsi_base);
5419extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
5420 u32 gsi);
5421extern void mp_config_acpi_legacy_irqs(void);
5422struct device;
5423extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level,
5424 int active_high_low);
5425struct physid_mask {
5426 unsigned long mask[(((256) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))];
5427};
5428typedef struct physid_mask physid_mask_t;
5429static inline __attribute__((always_inline)) unsigned long physids_coerce(physid_mask_t *map)
5430{
5431 return map->mask[0];
5432}
5433static inline __attribute__((always_inline)) void physids_promote(unsigned long physids, physid_mask_t *map)
5434{
5435 bitmap_zero((*map).mask, 256);
5436 map->mask[0] = physids;
5437}
5438static inline __attribute__((always_inline)) void physid_set_mask_of_physid(int physid, physid_mask_t *map)
5439{
5440 bitmap_zero((*map).mask, 256);
5441 set_bit(physid, (*map).mask);
5442}
5443extern physid_mask_t phys_cpu_present_map;
5444extern int generic_mps_oem_check(struct mpc_table *, char *, char *);
5445extern int default_acpi_madt_oem_check(char *, char *);
5446extern void local_bh_disable(void);
5447extern void _local_bh_enable(void);
5448extern void local_bh_enable(void);
5449extern void local_bh_enable_ip(unsigned long ip);
5450typedef struct arch_spinlock {
5451 unsigned int slock;
5452} arch_spinlock_t;
5453typedef struct {
5454 unsigned int lock;
5455} arch_rwlock_t;
5456typedef struct raw_spinlock {
5457 arch_spinlock_t raw_lock;
5458 unsigned int magic, owner_cpu;
5459 void *owner;
5460 struct lockdep_map dep_map;
5461} raw_spinlock_t;
5462typedef struct spinlock {
5463 union {
5464 struct raw_spinlock rlock;
5465 struct {
5466 u8 __padding[(__builtin_offsetof(struct raw_spinlock,dep_map))];
5467 struct lockdep_map dep_map;
5468 };
5469 };
5470} spinlock_t;
5471typedef struct {
5472 arch_rwlock_t raw_lock;
5473 unsigned int magic, owner_cpu;
5474 void *owner;
5475 struct lockdep_map dep_map;
5476} rwlock_t;
5477static inline __attribute__((always_inline)) __attribute__((always_inline)) void __ticket_spin_lock(arch_spinlock_t *lock)
5478{
5479 short inc = 0x0100;
5480 asm volatile (
5481 ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "xaddw %w0, %1\n"
5482 "1:\t"
5483 "cmpb %h0, %b0\n\t"
5484 "je 2f\n\t"
5485 "rep ; nop\n\t"
5486 "movb %1, %b0\n\t"
5487 "jmp 1b\n"
5488 "2:"
5489 : "+Q" (inc), "+m" (lock->slock)
5490 :
5491 : "memory", "cc");
5492}
5493static inline __attribute__((always_inline)) __attribute__((always_inline)) int __ticket_spin_trylock(arch_spinlock_t *lock)
5494{
5495 int tmp, new;
5496 asm volatile("movzwl %2, %0\n\t"
5497 "cmpb %h0,%b0\n\t"
5498 "leal 0x100(%" "k" "0), %1\n\t"
5499 "jne 1f\n\t"
5500 ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgw %w1,%2\n\t"
5501 "1:"
5502 "sete %b1\n\t"
5503 "movzbl %b1,%0\n\t"
5504 : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
5505 :
5506 : "memory", "cc");
5507 return tmp;
5508}
5509static inline __attribute__((always_inline)) __attribute__((always_inline)) void __ticket_spin_unlock(arch_spinlock_t *lock)
5510{
5511 asm volatile( "incb %0"
5512 : "+m" (lock->slock)
5513 :
5514 : "memory", "cc");
5515}
5516static inline __attribute__((always_inline)) int __ticket_spin_is_locked(arch_spinlock_t *lock)
5517{
5518 int tmp = (*(volatile typeof(lock->slock) *)&(lock->slock));
5519 return !!(((tmp >> 8) ^ tmp) & ((1 << 8) - 1));
5520}
5521static inline __attribute__((always_inline)) int __ticket_spin_is_contended(arch_spinlock_t *lock)
5522{
5523 int tmp = (*(volatile typeof(lock->slock) *)&(lock->slock));
5524 return (((tmp >> 8) - tmp) & ((1 << 8) - 1)) > 1;
5525}
5526static inline __attribute__((always_inline)) int arch_spin_is_locked(arch_spinlock_t *lock)
5527{
5528 return __ticket_spin_is_locked(lock);
5529}
5530static inline __attribute__((always_inline)) int arch_spin_is_contended(arch_spinlock_t *lock)
5531{
5532 return __ticket_spin_is_contended(lock);
5533}
5534static inline __attribute__((always_inline)) __attribute__((always_inline)) void arch_spin_lock(arch_spinlock_t *lock)
5535{
5536 __ticket_spin_lock(lock);
5537}
5538static inline __attribute__((always_inline)) __attribute__((always_inline)) int arch_spin_trylock(arch_spinlock_t *lock)
5539{
5540 return __ticket_spin_trylock(lock);
5541}
5542static inline __attribute__((always_inline)) __attribute__((always_inline)) void arch_spin_unlock(arch_spinlock_t *lock)
5543{
5544 __ticket_spin_unlock(lock);
5545}
5546static inline __attribute__((always_inline)) __attribute__((always_inline)) void arch_spin_lock_flags(arch_spinlock_t *lock,
5547 unsigned long flags)
5548{
5549 arch_spin_lock(lock);
5550}
5551static inline __attribute__((always_inline)) void arch_spin_unlock_wait(arch_spinlock_t *lock)
5552{
5553 while (arch_spin_is_locked(lock))
5554 cpu_relax();
5555}
5556static inline __attribute__((always_inline)) int arch_read_can_lock(arch_rwlock_t *lock)
5557{
5558 return (int)(lock)->lock > 0;
5559}
5560static inline __attribute__((always_inline)) int arch_write_can_lock(arch_rwlock_t *lock)
5561{
5562 return (lock)->lock == 0x01000000;
5563}
5564static inline __attribute__((always_inline)) void arch_read_lock(arch_rwlock_t *rw)
5565{
5566 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " subl $1,(%0)\n\t"
5567 "jns 1f\n"
5568 "call __read_lock_failed\n\t"
5569 "1:\n"
5570 ::"a" (rw) : "memory");
5571}
5572static inline __attribute__((always_inline)) void arch_write_lock(arch_rwlock_t *rw)
5573{
5574 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " subl %1,(%0)\n\t"
5575 "jz 1f\n"
5576 "call __write_lock_failed\n\t"
5577 "1:\n"
5578 ::"a" (rw), "i" (0x01000000) : "memory");
5579}
5580static inline __attribute__((always_inline)) int arch_read_trylock(arch_rwlock_t *lock)
5581{
5582 atomic_t *count = (atomic_t *)lock;
5583 if (__builtin_constant_p((((atomic_sub_return(1, count)) >= 0))) ? !!(((atomic_sub_return(1, count)) >= 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/spinlock.h", .line = 271, }; ______r = !!(((atomic_sub_return(1, count)) >= 0)); ______f.miss_hit[______r]++; ______r; }))
5584 return 1;
5585 atomic_inc(count);
5586 return 0;
5587}
5588static inline __attribute__((always_inline)) int arch_write_trylock(arch_rwlock_t *lock)
5589{
5590 atomic_t *count = (atomic_t *)lock;
5591 if (__builtin_constant_p(((atomic_sub_and_test(0x01000000, count)))) ? !!((atomic_sub_and_test(0x01000000, count))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/spinlock.h", .line = 281, }; ______r = !!((atomic_sub_and_test(0x01000000, count))); ______f.miss_hit[______r]++; ______r; }))
5592 return 1;
5593 atomic_add(0x01000000, count);
5594 return 0;
5595}
5596static inline __attribute__((always_inline)) void arch_read_unlock(arch_rwlock_t *rw)
5597{
5598 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "incl %0" :"+m" (rw->lock) : : "memory");
5599}
5600static inline __attribute__((always_inline)) void arch_write_unlock(arch_rwlock_t *rw)
5601{
5602 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "addl %1, %0"
5603 : "+m" (rw->lock) : "i" (0x01000000) : "memory");
5604}
5605static inline __attribute__((always_inline)) void smp_mb__after_lock(void) { }
5606 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
5607 struct lock_class_key *key);
5608 extern void do_raw_spin_lock(raw_spinlock_t *lock) ;
5609 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
5610 extern void do_raw_spin_unlock(raw_spinlock_t *lock) ;
5611 extern void __rwlock_init(rwlock_t *lock, const char *name,
5612 struct lock_class_key *key);
5613 extern void do_raw_read_lock(rwlock_t *lock) ;
5614 extern int do_raw_read_trylock(rwlock_t *lock);
5615 extern void do_raw_read_unlock(rwlock_t *lock) ;
5616 extern void do_raw_write_lock(rwlock_t *lock) ;
5617 extern int do_raw_write_trylock(rwlock_t *lock);
5618 extern void do_raw_write_unlock(rwlock_t *lock) ;
5619int in_lock_functions(unsigned long addr);
5620void __attribute__((section(".spinlock.text"))) _raw_spin_lock(raw_spinlock_t *lock) ;
5621void __attribute__((section(".spinlock.text"))) _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
5622 ;
5623void __attribute__((section(".spinlock.text")))
5624_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
5625 ;
5626void __attribute__((section(".spinlock.text"))) _raw_spin_lock_bh(raw_spinlock_t *lock) ;
5627void __attribute__((section(".spinlock.text"))) _raw_spin_lock_irq(raw_spinlock_t *lock)
5628 ;
5629unsigned long __attribute__((section(".spinlock.text"))) _raw_spin_lock_irqsave(raw_spinlock_t *lock)
5630 ;
5631unsigned long __attribute__((section(".spinlock.text")))
5632_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
5633 ;
5634int __attribute__((section(".spinlock.text"))) _raw_spin_trylock(raw_spinlock_t *lock);
5635int __attribute__((section(".spinlock.text"))) _raw_spin_trylock_bh(raw_spinlock_t *lock);
5636void __attribute__((section(".spinlock.text"))) _raw_spin_unlock(raw_spinlock_t *lock) ;
5637void __attribute__((section(".spinlock.text"))) _raw_spin_unlock_bh(raw_spinlock_t *lock) ;
5638void __attribute__((section(".spinlock.text"))) _raw_spin_unlock_irq(raw_spinlock_t *lock) ;
5639void __attribute__((section(".spinlock.text")))
5640_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
5641 ;
5642static inline __attribute__((always_inline)) int __raw_spin_trylock(raw_spinlock_t *lock)
5643{
5644 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5645 if (__builtin_constant_p(((do_raw_spin_trylock(lock)))) ? !!((do_raw_spin_trylock(lock))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 89, }; ______r = !!((do_raw_spin_trylock(lock))); ______f.miss_hit[______r]++; ______r; })) {
5646 lock_acquire(&lock->dep_map, 0, 1, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5647 return 1;
5648 }
5649 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 93, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 93, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 93, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 93, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5650 return 0;
5651}
5652static inline __attribute__((always_inline)) unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
5653{
5654 unsigned long flags;
5655 do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0);
5656 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5657 lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5658 do_raw_spin_lock(lock);
5659 return flags;
5660}
5661static inline __attribute__((always_inline)) void __raw_spin_lock_irq(raw_spinlock_t *lock)
5662{
5663 do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0);
5664 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5665 lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5666 do_raw_spin_lock(lock);
5667}
5668static inline __attribute__((always_inline)) void __raw_spin_lock_bh(raw_spinlock_t *lock)
5669{
5670 local_bh_disable();
5671 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5672 lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5673 do_raw_spin_lock(lock);
5674}
5675static inline __attribute__((always_inline)) void __raw_spin_lock(raw_spinlock_t *lock)
5676{
5677 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5678 lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5679 do_raw_spin_lock(lock);
5680}
5681static inline __attribute__((always_inline)) void __raw_spin_unlock(raw_spinlock_t *lock)
5682{
5683 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5684 do_raw_spin_unlock(lock);
5685 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 153, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 153, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 153, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 153, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5686}
5687static inline __attribute__((always_inline)) void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
5688 unsigned long flags)
5689{
5690 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5691 do_raw_spin_unlock(lock);
5692 do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 161, }; ______r = !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0);
5693 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 162, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 162, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 162, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 162, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5694}
5695static inline __attribute__((always_inline)) void __raw_spin_unlock_irq(raw_spinlock_t *lock)
5696{
5697 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5698 do_raw_spin_unlock(lock);
5699 do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0);
5700 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 170, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 170, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 170, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 170, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5701}
5702static inline __attribute__((always_inline)) void __raw_spin_unlock_bh(raw_spinlock_t *lock)
5703{
5704 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5705 do_raw_spin_unlock(lock);
5706 do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0);
5707 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
5708}
5709static inline __attribute__((always_inline)) int __raw_spin_trylock_bh(raw_spinlock_t *lock)
5710{
5711 local_bh_disable();
5712 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5713 if (__builtin_constant_p(((do_raw_spin_trylock(lock)))) ? !!((do_raw_spin_trylock(lock))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock_api_smp.h", .line = 185, }; ______r = !!((do_raw_spin_trylock(lock))); ______f.miss_hit[______r]++; ______r; })) {
5714 lock_acquire(&lock->dep_map, 0, 1, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5715 return 1;
5716 }
5717 do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0);
5718 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
5719 return 0;
5720}
5721void __attribute__((section(".spinlock.text"))) _raw_read_lock(rwlock_t *lock) ;
5722void __attribute__((section(".spinlock.text"))) _raw_write_lock(rwlock_t *lock) ;
5723void __attribute__((section(".spinlock.text"))) _raw_read_lock_bh(rwlock_t *lock) ;
5724void __attribute__((section(".spinlock.text"))) _raw_write_lock_bh(rwlock_t *lock) ;
5725void __attribute__((section(".spinlock.text"))) _raw_read_lock_irq(rwlock_t *lock) ;
5726void __attribute__((section(".spinlock.text"))) _raw_write_lock_irq(rwlock_t *lock) ;
5727unsigned long __attribute__((section(".spinlock.text"))) _raw_read_lock_irqsave(rwlock_t *lock)
5728 ;
5729unsigned long __attribute__((section(".spinlock.text"))) _raw_write_lock_irqsave(rwlock_t *lock)
5730 ;
5731int __attribute__((section(".spinlock.text"))) _raw_read_trylock(rwlock_t *lock);
5732int __attribute__((section(".spinlock.text"))) _raw_write_trylock(rwlock_t *lock);
5733void __attribute__((section(".spinlock.text"))) _raw_read_unlock(rwlock_t *lock) ;
5734void __attribute__((section(".spinlock.text"))) _raw_write_unlock(rwlock_t *lock) ;
5735void __attribute__((section(".spinlock.text"))) _raw_read_unlock_bh(rwlock_t *lock) ;
5736void __attribute__((section(".spinlock.text"))) _raw_write_unlock_bh(rwlock_t *lock) ;
5737void __attribute__((section(".spinlock.text"))) _raw_read_unlock_irq(rwlock_t *lock) ;
5738void __attribute__((section(".spinlock.text"))) _raw_write_unlock_irq(rwlock_t *lock) ;
5739void __attribute__((section(".spinlock.text")))
5740_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
5741 ;
5742void __attribute__((section(".spinlock.text")))
5743_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
5744 ;
5745static inline __attribute__((always_inline)) int __raw_read_trylock(rwlock_t *lock)
5746{
5747 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5748 if (__builtin_constant_p(((do_raw_read_trylock(lock)))) ? !!((do_raw_read_trylock(lock))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 120, }; ______r = !!((do_raw_read_trylock(lock))); ______f.miss_hit[______r]++; ______r; })) {
5749 lock_acquire(&lock->dep_map, 0, 1, 2, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5750 return 1;
5751 }
5752 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 124, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 124, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 124, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 124, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5753 return 0;
5754}
5755static inline __attribute__((always_inline)) int __raw_write_trylock(rwlock_t *lock)
5756{
5757 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5758 if (__builtin_constant_p(((do_raw_write_trylock(lock)))) ? !!((do_raw_write_trylock(lock))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 131, }; ______r = !!((do_raw_write_trylock(lock))); ______f.miss_hit[______r]++; ______r; })) {
5759 lock_acquire(&lock->dep_map, 0, 1, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5760 return 1;
5761 }
5762 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 135, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 135, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 135, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 135, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5763 return 0;
5764}
5765static inline __attribute__((always_inline)) void __raw_read_lock(rwlock_t *lock)
5766{
5767 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5768 lock_acquire(&lock->dep_map, 0, 0, 2, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5769 do_raw_read_lock(lock);
5770}
5771static inline __attribute__((always_inline)) unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
5772{
5773 unsigned long flags;
5774 do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0);
5775 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5776 lock_acquire(&lock->dep_map, 0, 0, 2, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5777 (do_raw_read_lock)((lock))
5778 ;
5779 return flags;
5780}
5781static inline __attribute__((always_inline)) void __raw_read_lock_irq(rwlock_t *lock)
5782{
5783 do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0);
5784 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5785 lock_acquire(&lock->dep_map, 0, 0, 2, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5786 do_raw_read_lock(lock);
5787}
5788static inline __attribute__((always_inline)) void __raw_read_lock_bh(rwlock_t *lock)
5789{
5790 local_bh_disable();
5791 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5792 lock_acquire(&lock->dep_map, 0, 0, 2, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5793 do_raw_read_lock(lock);
5794}
5795static inline __attribute__((always_inline)) unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
5796{
5797 unsigned long flags;
5798 do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0);
5799 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5800 lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5801 (do_raw_write_lock)((lock))
5802 ;
5803 return flags;
5804}
5805static inline __attribute__((always_inline)) void __raw_write_lock_irq(rwlock_t *lock)
5806{
5807 do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0);
5808 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5809 lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5810 do_raw_write_lock(lock);
5811}
5812static inline __attribute__((always_inline)) void __raw_write_lock_bh(rwlock_t *lock)
5813{
5814 local_bh_disable();
5815 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5816 lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5817 do_raw_write_lock(lock);
5818}
5819static inline __attribute__((always_inline)) void __raw_write_lock(rwlock_t *lock)
5820{
5821 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
5822 lock_acquire(&lock->dep_map, 0, 0, 0, 2, ((void *)0), (unsigned long)__builtin_return_address(0));
5823 do_raw_write_lock(lock);
5824}
5825static inline __attribute__((always_inline)) void __raw_write_unlock(rwlock_t *lock)
5826{
5827 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5828 do_raw_write_unlock(lock);
5829 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 222, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 222, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 222, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 222, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5830}
5831static inline __attribute__((always_inline)) void __raw_read_unlock(rwlock_t *lock)
5832{
5833 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5834 do_raw_read_unlock(lock);
5835 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 229, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 229, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 229, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 229, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5836}
5837static inline __attribute__((always_inline)) void
5838__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
5839{
5840 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5841 do_raw_read_unlock(lock);
5842 do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 237, }; ______r = !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0);
5843 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 238, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 238, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 238, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 238, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5844}
5845static inline __attribute__((always_inline)) void __raw_read_unlock_irq(rwlock_t *lock)
5846{
5847 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5848 do_raw_read_unlock(lock);
5849 do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0);
5850 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 246, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 246, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 246, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 246, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5851}
5852static inline __attribute__((always_inline)) void __raw_read_unlock_bh(rwlock_t *lock)
5853{
5854 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5855 do_raw_read_unlock(lock);
5856 do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0);
5857 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
5858}
5859static inline __attribute__((always_inline)) void __raw_write_unlock_irqrestore(rwlock_t *lock,
5860 unsigned long flags)
5861{
5862 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5863 do_raw_write_unlock(lock);
5864 do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 262, }; ______r = !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0);
5865 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 263, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 263, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 263, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 263, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5866}
5867static inline __attribute__((always_inline)) void __raw_write_unlock_irq(rwlock_t *lock)
5868{
5869 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5870 do_raw_write_unlock(lock);
5871 do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0);
5872 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 271, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 271, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 271, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rwlock_api_smp.h", .line = 271, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
5873}
5874static inline __attribute__((always_inline)) void __raw_write_unlock_bh(rwlock_t *lock)
5875{
5876 lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
5877 do_raw_write_unlock(lock);
5878 do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0);
5879 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
5880}
5881static inline __attribute__((always_inline)) raw_spinlock_t *spinlock_check(spinlock_t *lock)
5882{
5883 return &lock->rlock;
5884}
5885static inline __attribute__((always_inline)) void spin_lock(spinlock_t *lock)
5886{
5887 _raw_spin_lock(&lock->rlock);
5888}
5889static inline __attribute__((always_inline)) void spin_lock_bh(spinlock_t *lock)
5890{
5891 _raw_spin_lock_bh(&lock->rlock);
5892}
5893static inline __attribute__((always_inline)) int spin_trylock(spinlock_t *lock)
5894{
5895 return (_raw_spin_trylock(&lock->rlock));
5896}
5897static inline __attribute__((always_inline)) void spin_lock_irq(spinlock_t *lock)
5898{
5899 _raw_spin_lock_irq(&lock->rlock);
5900}
5901static inline __attribute__((always_inline)) void spin_unlock(spinlock_t *lock)
5902{
5903 _raw_spin_unlock(&lock->rlock);
5904}
5905static inline __attribute__((always_inline)) void spin_unlock_bh(spinlock_t *lock)
5906{
5907 _raw_spin_unlock_bh(&lock->rlock);
5908}
5909static inline __attribute__((always_inline)) void spin_unlock_irq(spinlock_t *lock)
5910{
5911 _raw_spin_unlock_irq(&lock->rlock);
5912}
5913static inline __attribute__((always_inline)) void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
5914{
5915 do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _raw_spin_unlock_irqrestore(&lock->rlock, flags); } while (0);
5916}
5917static inline __attribute__((always_inline)) int spin_trylock_bh(spinlock_t *lock)
5918{
5919 return (_raw_spin_trylock_bh(&lock->rlock));
5920}
5921static inline __attribute__((always_inline)) int spin_trylock_irq(spinlock_t *lock)
5922{
5923 return ({ do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0); (_raw_spin_trylock(&lock->rlock)) ? 1 : ({ do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0); 0; }); });
5924}
5925static inline __attribute__((always_inline)) void spin_unlock_wait(spinlock_t *lock)
5926{
5927 arch_spin_unlock_wait(&(&lock->rlock)->raw_lock);
5928}
5929static inline __attribute__((always_inline)) int spin_is_locked(spinlock_t *lock)
5930{
5931 return arch_spin_is_locked(&(&lock->rlock)->raw_lock);
5932}
5933static inline __attribute__((always_inline)) int spin_is_contended(spinlock_t *lock)
5934{
5935 return arch_spin_is_contended(&(&lock->rlock)->raw_lock);
5936}
5937static inline __attribute__((always_inline)) int spin_can_lock(spinlock_t *lock)
5938{
5939 return (!arch_spin_is_locked(&(&lock->rlock)->raw_lock));
5940}
5941static inline __attribute__((always_inline)) void assert_spin_locked(spinlock_t *lock)
5942{
5943 do { if (__builtin_constant_p((((__builtin_constant_p(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)) ? !!(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock.h", .line = 380, }; ______r = __builtin_expect(!!(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)) ? !!(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock.h", .line = 380, }; ______r = __builtin_expect(!!(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock.h", .line = 380, }; ______r = !!(((__builtin_constant_p(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)) ? !!(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/spinlock.h", .line = 380, }; ______r = __builtin_expect(!!(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/spinlock.h"), "i" (380), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
5944}
5945extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
5946typedef struct {
5947 unsigned sequence;
5948 spinlock_t lock;
5949} seqlock_t;
5950static inline __attribute__((always_inline)) void write_seqlock(seqlock_t *sl)
5951{
5952 spin_lock(&sl->lock);
5953 ++sl->sequence;
5954 __asm__ __volatile__("": : :"memory");
5955}
5956static inline __attribute__((always_inline)) void write_sequnlock(seqlock_t *sl)
5957{
5958 __asm__ __volatile__("": : :"memory");
5959 sl->sequence++;
5960 spin_unlock(&sl->lock);
5961}
5962static inline __attribute__((always_inline)) int write_tryseqlock(seqlock_t *sl)
5963{
5964 int ret = spin_trylock(&sl->lock);
5965 if (__builtin_constant_p(((ret))) ? !!((ret)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 76, }; ______r = !!((ret)); ______f.miss_hit[______r]++; ______r; })) {
5966 ++sl->sequence;
5967 __asm__ __volatile__("": : :"memory");
5968 }
5969 return ret;
5970}
5971static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned read_seqbegin(const seqlock_t *sl)
5972{
5973 unsigned ret;
5974repeat:
5975 ret = (*(volatile typeof(sl->sequence) *)&(sl->sequence));
5976 if (__builtin_constant_p((((__builtin_constant_p(ret & 1) ? !!(ret & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 90, }; ______r = __builtin_expect(!!(ret & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(ret & 1) ? !!(ret & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 90, }; ______r = __builtin_expect(!!(ret & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 90, }; ______r = !!(((__builtin_constant_p(ret & 1) ? !!(ret & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 90, }; ______r = __builtin_expect(!!(ret & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) {
5977 cpu_relax();
5978 goto repeat;
5979 }
5980 __asm__ __volatile__("": : :"memory");
5981 return ret;
5982}
5983static inline __attribute__((always_inline)) __attribute__((always_inline)) int read_seqretry(const seqlock_t *sl, unsigned start)
5984{
5985 __asm__ __volatile__("": : :"memory");
5986 return (__builtin_constant_p(sl->sequence != start) ? !!(sl->sequence != start) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 108, }; ______r = __builtin_expect(!!(sl->sequence != start), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }));
5987}
5988typedef struct seqcount {
5989 unsigned sequence;
5990} seqcount_t;
5991static inline __attribute__((always_inline)) unsigned __read_seqcount_begin(const seqcount_t *s)
5992{
5993 unsigned ret;
5994repeat:
5995 ret = s->sequence;
5996 if (__builtin_constant_p((((__builtin_constant_p(ret & 1) ? !!(ret & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 145, }; ______r = __builtin_expect(!!(ret & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(ret & 1) ? !!(ret & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 145, }; ______r = __builtin_expect(!!(ret & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 145, }; ______r = !!(((__builtin_constant_p(ret & 1) ? !!(ret & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 145, }; ______r = __builtin_expect(!!(ret & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) {
5997 cpu_relax();
5998 goto repeat;
5999 }
6000 return ret;
6001}
6002static inline __attribute__((always_inline)) unsigned read_seqcount_begin(const seqcount_t *s)
6003{
6004 unsigned ret = __read_seqcount_begin(s);
6005 __asm__ __volatile__("": : :"memory");
6006 return ret;
6007}
6008static inline __attribute__((always_inline)) int __read_seqcount_retry(const seqcount_t *s, unsigned start)
6009{
6010 return (__builtin_constant_p(s->sequence != start) ? !!(s->sequence != start) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seqlock.h", .line = 184, }; ______r = __builtin_expect(!!(s->sequence != start), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }));
6011}
6012static inline __attribute__((always_inline)) int read_seqcount_retry(const seqcount_t *s, unsigned start)
6013{
6014 __asm__ __volatile__("": : :"memory");
6015 return __read_seqcount_retry(s, start);
6016}
6017static inline __attribute__((always_inline)) void write_seqcount_begin(seqcount_t *s)
6018{
6019 s->sequence++;
6020 __asm__ __volatile__("": : :"memory");
6021}
6022static inline __attribute__((always_inline)) void write_seqcount_end(seqcount_t *s)
6023{
6024 __asm__ __volatile__("": : :"memory");
6025 s->sequence++;
6026}
6027static inline __attribute__((always_inline)) void write_seqcount_barrier(seqcount_t *s)
6028{
6029 __asm__ __volatile__("": : :"memory");
6030 s->sequence+=2;
6031}
6032struct timespec {
6033 __kernel_time_t tv_sec;
6034 long tv_nsec;
6035};
6036struct timeval {
6037 __kernel_time_t tv_sec;
6038 __kernel_suseconds_t tv_usec;
6039};
6040struct timezone {
6041 int tz_minuteswest;
6042 int tz_dsttime;
6043};
6044extern struct timezone sys_tz;
6045static inline __attribute__((always_inline)) int timespec_equal(const struct timespec *a,
6046 const struct timespec *b)
6047{
6048 return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
6049}
6050static inline __attribute__((always_inline)) int timespec_compare(const struct timespec *lhs, const struct timespec *rhs)
6051{
6052 if (__builtin_constant_p(((lhs->tv_sec < rhs->tv_sec))) ? !!((lhs->tv_sec < rhs->tv_sec)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/time.h", .line = 58, }; ______r = !!((lhs->tv_sec < rhs->tv_sec)); ______f.miss_hit[______r]++; ______r; }))
6053 return -1;
6054 if (__builtin_constant_p(((lhs->tv_sec > rhs->tv_sec))) ? !!((lhs->tv_sec > rhs->tv_sec)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/time.h", .line = 60, }; ______r = !!((lhs->tv_sec > rhs->tv_sec)); ______f.miss_hit[______r]++; ______r; }))
6055 return 1;
6056 return lhs->tv_nsec - rhs->tv_nsec;
6057}
6058static inline __attribute__((always_inline)) int timeval_compare(const struct timeval *lhs, const struct timeval *rhs)
6059{
6060 if (__builtin_constant_p(((lhs->tv_sec < rhs->tv_sec))) ? !!((lhs->tv_sec < rhs->tv_sec)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/time.h", .line = 67, }; ______r = !!((lhs->tv_sec < rhs->tv_sec)); ______f.miss_hit[______r]++; ______r; }))
6061 return -1;
6062 if (__builtin_constant_p(((lhs->tv_sec > rhs->tv_sec))) ? !!((lhs->tv_sec > rhs->tv_sec)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/time.h", .line = 69, }; ______r = !!((lhs->tv_sec > rhs->tv_sec)); ______f.miss_hit[______r]++; ______r; }))
6063 return 1;
6064 return lhs->tv_usec - rhs->tv_usec;
6065}
6066extern unsigned long mktime(const unsigned int year, const unsigned int mon,
6067 const unsigned int day, const unsigned int hour,
6068 const unsigned int min, const unsigned int sec);
6069extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec);
6070extern struct timespec timespec_add_safe(const struct timespec lhs,
6071 const struct timespec rhs);
6072static inline __attribute__((always_inline)) struct timespec timespec_add(struct timespec lhs,
6073 struct timespec rhs)
6074{
6075 struct timespec ts_delta;
6076 set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec,
6077 lhs.tv_nsec + rhs.tv_nsec);
6078 return ts_delta;
6079}
6080static inline __attribute__((always_inline)) struct timespec timespec_sub(struct timespec lhs,
6081 struct timespec rhs)
6082{
6083 struct timespec ts_delta;
6084 set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec,
6085 lhs.tv_nsec - rhs.tv_nsec);
6086 return ts_delta;
6087}
6088extern void read_persistent_clock(struct timespec *ts);
6089extern void read_boot_clock(struct timespec *ts);
6090extern int update_persistent_clock(struct timespec now);
6091extern int no_sync_cmos_clock __attribute__((__section__(".data..read_mostly")));
6092void timekeeping_init(void);
6093extern int timekeeping_suspended;
6094unsigned long get_seconds(void);
6095struct timespec current_kernel_time(void);
6096struct timespec __current_kernel_time(void);
6097struct timespec get_monotonic_coarse(void);
6098void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
6099 struct timespec *wtom, struct timespec *sleep);
6100void timekeeping_inject_sleeptime(struct timespec *delta);
6101static inline __attribute__((always_inline)) u32 arch_gettimeoffset(void) { return 0; }
6102extern void do_gettimeofday(struct timeval *tv);
6103extern int do_settimeofday(const struct timespec *tv);
6104extern int do_sys_settimeofday(const struct timespec *tv,
6105 const struct timezone *tz);
6106extern long do_utimes(int dfd, const char *filename, struct timespec *times, int flags);
6107struct itimerval;
6108extern int do_setitimer(int which, struct itimerval *value,
6109 struct itimerval *ovalue);
6110extern unsigned int alarm_setitimer(unsigned int seconds);
6111extern int do_getitimer(int which, struct itimerval *value);
6112extern void getnstimeofday(struct timespec *tv);
6113extern void getrawmonotonic(struct timespec *ts);
6114extern void getnstime_raw_and_real(struct timespec *ts_raw,
6115 struct timespec *ts_real);
6116extern void getboottime(struct timespec *ts);
6117extern void monotonic_to_bootbased(struct timespec *ts);
6118extern void get_monotonic_boottime(struct timespec *ts);
6119extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
6120extern int timekeeping_valid_for_hres(void);
6121extern u64 timekeeping_max_deferment(void);
6122extern void timekeeping_leap_insert(int leapsecond);
6123extern int timekeeping_inject_offset(struct timespec *ts);
6124struct tms;
6125extern void do_sys_times(struct tms *);
6126struct tm {
6127 int tm_sec;
6128 int tm_min;
6129 int tm_hour;
6130 int tm_mday;
6131 int tm_mon;
6132 long tm_year;
6133 int tm_wday;
6134 int tm_yday;
6135};
6136void time_to_tm(time_t totalsecs, int offset, struct tm *result);
6137static inline __attribute__((always_inline)) s64 timespec_to_ns(const struct timespec *ts)
6138{
6139 return ((s64) ts->tv_sec * 1000000000L) + ts->tv_nsec;
6140}
6141static inline __attribute__((always_inline)) s64 timeval_to_ns(const struct timeval *tv)
6142{
6143 return ((s64) tv->tv_sec * 1000000000L) +
6144 tv->tv_usec * 1000L;
6145}
6146extern struct timespec ns_to_timespec(const s64 nsec);
6147extern struct timeval ns_to_timeval(const s64 nsec);
6148static inline __attribute__((always_inline)) __attribute__((always_inline)) void timespec_add_ns(struct timespec *a, u64 ns)
6149{
6150 a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, 1000000000L, &ns);
6151 a->tv_nsec = ns;
6152}
6153struct itimerspec {
6154 struct timespec it_interval;
6155 struct timespec it_value;
6156};
6157struct itimerval {
6158 struct timeval it_interval;
6159 struct timeval it_value;
6160};
6161struct timex {
6162 unsigned int modes;
6163 long offset;
6164 long freq;
6165 long maxerror;
6166 long esterror;
6167 int status;
6168 long constant;
6169 long precision;
6170 long tolerance;
6171 struct timeval time;
6172 long tick;
6173 long ppsfreq;
6174 long jitter;
6175 int shift;
6176 long stabil;
6177 long jitcnt;
6178 long calcnt;
6179 long errcnt;
6180 long stbcnt;
6181 int tai;
6182 int :32; int :32; int :32; int :32;
6183 int :32; int :32; int :32; int :32;
6184 int :32; int :32; int :32;
6185};
6186typedef unsigned long long cycles_t;
6187extern unsigned int cpu_khz;
6188extern unsigned int tsc_khz;
6189extern void disable_TSC(void);
6190static inline __attribute__((always_inline)) cycles_t get_cycles(void)
6191{
6192 unsigned long long ret = 0;
6193 (ret = paravirt_read_tsc());
6194 return ret;
6195}
6196static inline __attribute__((always_inline)) __attribute__((always_inline)) cycles_t vget_cycles(void)
6197{
6198 return (cycles_t)__native_read_tsc();
6199}
6200extern void tsc_init(void);
6201extern void mark_tsc_unstable(char *reason);
6202extern int unsynchronized_tsc(void);
6203extern int check_tsc_unstable(void);
6204extern unsigned long native_calibrate_tsc(void);
6205extern void check_tsc_sync_source(int cpu);
6206extern void check_tsc_sync_target(void);
6207extern int notsc_setup(char *);
6208extern void save_sched_clock_state(void);
6209extern void restore_sched_clock_state(void);
6210extern unsigned long tick_usec;
6211extern unsigned long tick_nsec;
6212extern int time_status;
6213extern void ntp_init(void);
6214extern void ntp_clear(void);
6215static inline __attribute__((always_inline)) int ntp_synced(void)
6216{
6217 return !(time_status & 0x0040);
6218}
6219extern u64 tick_length;
6220extern void second_overflow(void);
6221extern void update_ntp_one_tick(void);
6222extern int do_adjtimex(struct timex *);
6223extern void hardpps(const struct timespec *, const struct timespec *);
6224int read_current_timer(unsigned long *timer_val);
6225extern u64 __attribute__((section(".data"))) jiffies_64;
6226extern unsigned long volatile __attribute__((section(".data"))) jiffies;
6227u64 get_jiffies_64(void);
6228extern unsigned long preset_lpj;
6229extern unsigned int jiffies_to_msecs(const unsigned long j);
6230extern unsigned int jiffies_to_usecs(const unsigned long j);
6231extern unsigned long msecs_to_jiffies(const unsigned int m);
6232extern unsigned long usecs_to_jiffies(const unsigned int u);
6233extern unsigned long timespec_to_jiffies(const struct timespec *value);
6234extern void jiffies_to_timespec(const unsigned long jiffies,
6235 struct timespec *value);
6236extern unsigned long timeval_to_jiffies(const struct timeval *value);
6237extern void jiffies_to_timeval(const unsigned long jiffies,
6238 struct timeval *value);
6239extern clock_t jiffies_to_clock_t(long x);
6240extern unsigned long clock_t_to_jiffies(unsigned long x);
6241extern u64 jiffies_64_to_clock_t(u64 x);
6242extern u64 nsec_to_clock_t(u64 x);
6243extern u64 nsecs_to_jiffies64(u64 n);
6244extern unsigned long nsecs_to_jiffies(u64 n);
6245union ktime {
6246 s64 tv64;
6247};
6248typedef union ktime ktime_t;
6249static inline __attribute__((always_inline)) ktime_t ktime_set(const long secs, const unsigned long nsecs)
6250{
6251 return (ktime_t) { .tv64 = (s64)secs * 1000000000L + (s64)nsecs };
6252}
6253static inline __attribute__((always_inline)) ktime_t timespec_to_ktime(struct timespec ts)
6254{
6255 return ktime_set(ts.tv_sec, ts.tv_nsec);
6256}
6257static inline __attribute__((always_inline)) ktime_t timeval_to_ktime(struct timeval tv)
6258{
6259 return ktime_set(tv.tv_sec, tv.tv_usec * 1000L);
6260}
6261static inline __attribute__((always_inline)) int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
6262{
6263 return cmp1.tv64 == cmp2.tv64;
6264}
6265static inline __attribute__((always_inline)) s64 ktime_to_us(const ktime_t kt)
6266{
6267 struct timeval tv = ns_to_timeval((kt).tv64);
6268 return (s64) tv.tv_sec * 1000000L + tv.tv_usec;
6269}
6270static inline __attribute__((always_inline)) s64 ktime_to_ms(const ktime_t kt)
6271{
6272 struct timeval tv = ns_to_timeval((kt).tv64);
6273 return (s64) tv.tv_sec * 1000L + tv.tv_usec / 1000L;
6274}
6275static inline __attribute__((always_inline)) s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
6276{
6277 return ktime_to_us(({ (ktime_t){ .tv64 = (later).tv64 - (earlier).tv64 }; }));
6278}
6279static inline __attribute__((always_inline)) ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
6280{
6281 return ({ (ktime_t){ .tv64 = (kt).tv64 + (usec * 1000) }; });
6282}
6283static inline __attribute__((always_inline)) ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
6284{
6285 return ({ (ktime_t){ .tv64 = (kt).tv64 - (usec * 1000) }; });
6286}
6287extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
6288extern void ktime_get_ts(struct timespec *ts);
6289static inline __attribute__((always_inline)) ktime_t ns_to_ktime(u64 ns)
6290{
6291 static const ktime_t ktime_zero = { .tv64 = 0 };
6292 return ({ (ktime_t){ .tv64 = (ktime_zero).tv64 + (ns) }; });
6293}
6294enum debug_obj_state {
6295 ODEBUG_STATE_NONE,
6296 ODEBUG_STATE_INIT,
6297 ODEBUG_STATE_INACTIVE,
6298 ODEBUG_STATE_ACTIVE,
6299 ODEBUG_STATE_DESTROYED,
6300 ODEBUG_STATE_NOTAVAILABLE,
6301 ODEBUG_STATE_MAX,
6302};
6303struct debug_obj_descr;
6304struct debug_obj {
6305 struct hlist_node node;
6306 enum debug_obj_state state;
6307 unsigned int astate;
6308 void *object;
6309 struct debug_obj_descr *descr;
6310};
6311struct debug_obj_descr {
6312 const char *name;
6313 void *(*debug_hint) (void *addr);
6314 int (*fixup_init) (void *addr, enum debug_obj_state state);
6315 int (*fixup_activate) (void *addr, enum debug_obj_state state);
6316 int (*fixup_destroy) (void *addr, enum debug_obj_state state);
6317 int (*fixup_free) (void *addr, enum debug_obj_state state);
6318};
6319static inline __attribute__((always_inline)) void
6320debug_object_init (void *addr, struct debug_obj_descr *descr) { }
6321static inline __attribute__((always_inline)) void
6322debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { }
6323static inline __attribute__((always_inline)) void
6324debug_object_activate (void *addr, struct debug_obj_descr *descr) { }
6325static inline __attribute__((always_inline)) void
6326debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { }
6327static inline __attribute__((always_inline)) void
6328debug_object_destroy (void *addr, struct debug_obj_descr *descr) { }
6329static inline __attribute__((always_inline)) void
6330debug_object_free (void *addr, struct debug_obj_descr *descr) { }
6331static inline __attribute__((always_inline)) void debug_objects_early_init(void) { }
6332static inline __attribute__((always_inline)) void debug_objects_mem_init(void) { }
6333static inline __attribute__((always_inline)) void
6334debug_check_no_obj_freed(const void *address, unsigned long size) { }
6335struct tvec_base;
6336struct timer_list {
6337 struct list_head entry;
6338 unsigned long expires;
6339 struct tvec_base *base;
6340 void (*function)(unsigned long);
6341 unsigned long data;
6342 int slack;
6343 int start_pid;
6344 void *start_site;
6345 char start_comm[16];
6346 struct lockdep_map lockdep_map;
6347};
6348extern struct tvec_base boot_tvec_bases;
6349void init_timer_key(struct timer_list *timer,
6350 const char *name,
6351 struct lock_class_key *key);
6352void init_timer_deferrable_key(struct timer_list *timer,
6353 const char *name,
6354 struct lock_class_key *key);
6355static inline __attribute__((always_inline)) void destroy_timer_on_stack(struct timer_list *timer) { }
6356static inline __attribute__((always_inline)) void init_timer_on_stack_key(struct timer_list *timer,
6357 const char *name,
6358 struct lock_class_key *key)
6359{
6360 init_timer_key(timer, name, key);
6361}
6362static inline __attribute__((always_inline)) void setup_timer_key(struct timer_list * timer,
6363 const char *name,
6364 struct lock_class_key *key,
6365 void (*function)(unsigned long),
6366 unsigned long data)
6367{
6368 timer->function = function;
6369 timer->data = data;
6370 init_timer_key(timer, name, key);
6371}
6372static inline __attribute__((always_inline)) void setup_timer_on_stack_key(struct timer_list *timer,
6373 const char *name,
6374 struct lock_class_key *key,
6375 void (*function)(unsigned long),
6376 unsigned long data)
6377{
6378 timer->function = function;
6379 timer->data = data;
6380 init_timer_on_stack_key(timer, name, key);
6381}
6382extern void setup_deferrable_timer_on_stack_key(struct timer_list *timer,
6383 const char *name,
6384 struct lock_class_key *key,
6385 void (*function)(unsigned long),
6386 unsigned long data);
6387static inline __attribute__((always_inline)) int timer_pending(const struct timer_list * timer)
6388{
6389 return timer->entry.next != ((void *)0);
6390}
6391extern void add_timer_on(struct timer_list *timer, int cpu);
6392extern int del_timer(struct timer_list * timer);
6393extern int mod_timer(struct timer_list *timer, unsigned long expires);
6394extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
6395extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
6396extern void set_timer_slack(struct timer_list *time, int slack_hz);
6397extern unsigned long get_next_timer_interrupt(unsigned long now);
6398extern int timer_stats_active;
6399extern void init_timer_stats(void);
6400extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
6401 void *timerf, char *comm,
6402 unsigned int timer_flag);
6403extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
6404 void *addr);
6405static inline __attribute__((always_inline)) void timer_stats_timer_set_start_info(struct timer_list *timer)
6406{
6407 if (__builtin_constant_p((((__builtin_constant_p(!timer_stats_active) ? !!(!timer_stats_active) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/timer.h", .line = 252, }; ______r = __builtin_expect(!!(!timer_stats_active), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(!timer_stats_active) ? !!(!timer_stats_active) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/timer.h", .line = 252, }; ______r = __builtin_expect(!!(!timer_stats_active), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/timer.h", .line = 252, }; ______r = !!(((__builtin_constant_p(!timer_stats_active) ? !!(!timer_stats_active) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/timer.h", .line = 252, }; ______r = __builtin_expect(!!(!timer_stats_active), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
6408 return;
6409 __timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
6410}
6411static inline __attribute__((always_inline)) void timer_stats_timer_clear_start_info(struct timer_list *timer)
6412{
6413 timer->start_site = ((void *)0);
6414}
6415extern void add_timer(struct timer_list *timer);
6416extern int try_to_del_timer_sync(struct timer_list *timer);
6417 extern int del_timer_sync(struct timer_list *timer);
6418extern void init_timers(void);
6419extern void run_local_timers(void);
6420struct hrtimer;
6421extern enum hrtimer_restart it_real_fn(struct hrtimer *);
6422unsigned long __round_jiffies(unsigned long j, int cpu);
6423unsigned long __round_jiffies_relative(unsigned long j, int cpu);
6424unsigned long round_jiffies(unsigned long j);
6425unsigned long round_jiffies_relative(unsigned long j);
6426unsigned long __round_jiffies_up(unsigned long j, int cpu);
6427unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
6428unsigned long round_jiffies_up(unsigned long j);
6429unsigned long round_jiffies_up_relative(unsigned long j);
6430struct workqueue_struct;
6431struct work_struct;
6432typedef void (*work_func_t)(struct work_struct *work);
6433enum {
6434 WORK_STRUCT_PENDING_BIT = 0,
6435 WORK_STRUCT_DELAYED_BIT = 1,
6436 WORK_STRUCT_CWQ_BIT = 2,
6437 WORK_STRUCT_LINKED_BIT = 3,
6438 WORK_STRUCT_COLOR_SHIFT = 4,
6439 WORK_STRUCT_COLOR_BITS = 4,
6440 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
6441 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
6442 WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
6443 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
6444 WORK_STRUCT_STATIC = 0,
6445 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
6446 WORK_NO_COLOR = WORK_NR_COLORS,
6447 WORK_CPU_UNBOUND = 8,
6448 WORK_CPU_NONE = 8 + 1,
6449 WORK_CPU_LAST = WORK_CPU_NONE,
6450 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
6451 WORK_STRUCT_COLOR_BITS,
6452 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
6453 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
6454 WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS,
6455 WORK_BUSY_PENDING = 1 << 0,
6456 WORK_BUSY_RUNNING = 1 << 1,
6457};
6458struct work_struct {
6459 atomic_long_t data;
6460 struct list_head entry;
6461 work_func_t func;
6462 struct lockdep_map lockdep_map;
6463};
6464struct delayed_work {
6465 struct work_struct work;
6466 struct timer_list timer;
6467};
6468static inline __attribute__((always_inline)) struct delayed_work *to_delayed_work(struct work_struct *work)
6469{
6470 return ({ const typeof( ((struct delayed_work *)0)->work ) *__mptr = (work); (struct delayed_work *)( (char *)__mptr - __builtin_offsetof(struct delayed_work,work) );});
6471}
6472struct execute_work {
6473 struct work_struct work;
6474};
6475static inline __attribute__((always_inline)) void __init_work(struct work_struct *work, int onstack) { }
6476static inline __attribute__((always_inline)) void destroy_work_on_stack(struct work_struct *work) { }
6477static inline __attribute__((always_inline)) unsigned int work_static(struct work_struct *work) { return 0; }
6478enum {
6479 WQ_NON_REENTRANT = 1 << 0,
6480 WQ_UNBOUND = 1 << 1,
6481 WQ_FREEZABLE = 1 << 2,
6482 WQ_MEM_RECLAIM = 1 << 3,
6483 WQ_HIGHPRI = 1 << 4,
6484 WQ_CPU_INTENSIVE = 1 << 5,
6485 WQ_DYING = 1 << 6,
6486 WQ_RESCUER = 1 << 7,
6487 WQ_MAX_ACTIVE = 512,
6488 WQ_MAX_UNBOUND_PER_CPU = 4,
6489 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
6490};
6491extern struct workqueue_struct *system_wq;
6492extern struct workqueue_struct *system_long_wq;
6493extern struct workqueue_struct *system_nrt_wq;
6494extern struct workqueue_struct *system_unbound_wq;
6495extern struct workqueue_struct *system_freezable_wq;
6496extern struct workqueue_struct *
6497__alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
6498 struct lock_class_key *key, const char *lock_name);
6499static inline __attribute__((always_inline)) struct workqueue_struct *
6500alloc_ordered_workqueue(const char *name, unsigned int flags)
6501{
6502 return ({ static struct lock_class_key __key; const char *__lock_name; if (__builtin_constant_p(((__builtin_constant_p(name)))) ? !!((__builtin_constant_p(name))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/workqueue.h", .line = 337, }; ______r = !!((__builtin_constant_p(name))); ______f.miss_hit[______r]++; ______r; })) __lock_name = (name); else __lock_name = "name"; __alloc_workqueue_key((name), (WQ_UNBOUND | flags), (1), &__key, __lock_name); });
6503}
6504extern void destroy_workqueue(struct workqueue_struct *wq);
6505extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
6506extern int queue_work_on(int cpu, struct workqueue_struct *wq,
6507 struct work_struct *work);
6508extern int queue_delayed_work(struct workqueue_struct *wq,
6509 struct delayed_work *work, unsigned long delay);
6510extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
6511 struct delayed_work *work, unsigned long delay);
6512extern void flush_workqueue(struct workqueue_struct *wq);
6513extern void flush_scheduled_work(void);
6514extern int schedule_work(struct work_struct *work);
6515extern int schedule_work_on(int cpu, struct work_struct *work);
6516extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
6517extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
6518 unsigned long delay);
6519extern int schedule_on_each_cpu(work_func_t func);
6520extern int keventd_up(void);
6521int execute_in_process_context(work_func_t fn, struct execute_work *);
6522extern bool flush_work(struct work_struct *work);
6523extern bool flush_work_sync(struct work_struct *work);
6524extern bool cancel_work_sync(struct work_struct *work);
6525extern bool flush_delayed_work(struct delayed_work *dwork);
6526extern bool flush_delayed_work_sync(struct delayed_work *work);
6527extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
6528extern void workqueue_set_max_active(struct workqueue_struct *wq,
6529 int max_active);
6530extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
6531extern unsigned int work_cpu(struct work_struct *work);
6532extern unsigned int work_busy(struct work_struct *work);
6533static inline __attribute__((always_inline)) bool cancel_delayed_work(struct delayed_work *work)
6534{
6535 bool ret;
6536 ret = del_timer_sync(&work->timer);
6537 if (__builtin_constant_p(((ret))) ? !!((ret)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/workqueue.h", .line = 395, }; ______r = !!((ret)); ______f.miss_hit[______r]++; ______r; }))
6538 clear_bit(WORK_STRUCT_PENDING_BIT, ((unsigned long *)(&(&work->work)->data)));
6539 return ret;
6540}
6541static inline __attribute__((always_inline)) bool __cancel_delayed_work(struct delayed_work *work)
6542{
6543 bool ret;
6544 ret = del_timer(&work->timer);
6545 if (__builtin_constant_p(((ret))) ? !!((ret)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/workqueue.h", .line = 410, }; ______r = !!((ret)); ______f.miss_hit[______r]++; ______r; }))
6546 clear_bit(WORK_STRUCT_PENDING_BIT, ((unsigned long *)(&(&work->work)->data)));
6547 return ret;
6548}
6549static inline __attribute__((always_inline)) __attribute__((deprecated))
6550void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
6551 struct delayed_work *work)
6552{
6553 cancel_delayed_work_sync(work);
6554}
6555static inline __attribute__((always_inline)) __attribute__((deprecated))
6556void cancel_rearming_delayed_work(struct delayed_work *work)
6557{
6558 cancel_delayed_work_sync(work);
6559}
6560long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
6561extern void freeze_workqueues_begin(void);
6562extern bool freeze_workqueues_busy(void);
6563extern void thaw_workqueues(void);
6564typedef struct __wait_queue wait_queue_t;
6565typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
6566int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
6567struct __wait_queue {
6568 unsigned int flags;
6569 void *private;
6570 wait_queue_func_t func;
6571 struct list_head task_list;
6572};
6573struct wait_bit_key {
6574 void *flags;
6575 int bit_nr;
6576};
6577struct wait_bit_queue {
6578 struct wait_bit_key key;
6579 wait_queue_t wait;
6580};
6581struct __wait_queue_head {
6582 spinlock_t lock;
6583 struct list_head task_list;
6584};
6585typedef struct __wait_queue_head wait_queue_head_t;
6586struct task_struct;
6587extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *);
6588static inline __attribute__((always_inline)) void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
6589{
6590 q->flags = 0;
6591 q->private = p;
6592 q->func = default_wake_function;
6593}
6594static inline __attribute__((always_inline)) void init_waitqueue_func_entry(wait_queue_t *q,
6595 wait_queue_func_t func)
6596{
6597 q->flags = 0;
6598 q->private = ((void *)0);
6599 q->func = func;
6600}
6601static inline __attribute__((always_inline)) int waitqueue_active(wait_queue_head_t *q)
6602{
6603 return !list_empty(&q->task_list);
6604}
6605extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
6606extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
6607extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
6608static inline __attribute__((always_inline)) void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
6609{
6610 list_add(&new->task_list, &head->task_list);
6611}
6612static inline __attribute__((always_inline)) void __add_wait_queue_exclusive(wait_queue_head_t *q,
6613 wait_queue_t *wait)
6614{
6615 wait->flags |= 0x01;
6616 __add_wait_queue(q, wait);
6617}
6618static inline __attribute__((always_inline)) void __add_wait_queue_tail(wait_queue_head_t *head,
6619 wait_queue_t *new)
6620{
6621 list_add_tail(&new->task_list, &head->task_list);
6622}
6623static inline __attribute__((always_inline)) void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
6624 wait_queue_t *wait)
6625{
6626 wait->flags |= 0x01;
6627 __add_wait_queue_tail(q, wait);
6628}
6629static inline __attribute__((always_inline)) void __remove_wait_queue(wait_queue_head_t *head,
6630 wait_queue_t *old)
6631{
6632 list_del(&old->task_list);
6633}
6634void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
6635void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
6636void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
6637 void *key);
6638void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
6639void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
6640void __wake_up_bit(wait_queue_head_t *, void *, int);
6641int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
6642int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
6643void wake_up_bit(void *, int);
6644int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
6645int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
6646wait_queue_head_t *bit_waitqueue(void *, int);
6647extern void sleep_on(wait_queue_head_t *q);
6648extern long sleep_on_timeout(wait_queue_head_t *q,
6649 signed long timeout);
6650extern void interruptible_sleep_on(wait_queue_head_t *q);
6651extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
6652 signed long timeout);
6653void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
6654void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
6655void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
6656void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
6657 unsigned int mode, void *key);
6658int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
6659int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
6660static inline __attribute__((always_inline)) int wait_on_bit(void *word, int bit,
6661 int (*action)(void *), unsigned mode)
6662{
6663 if (__builtin_constant_p(((!(__builtin_constant_p((bit)) ? constant_test_bit((bit), (word)) : variable_test_bit((bit), (word)))))) ? !!((!(__builtin_constant_p((bit)) ? constant_test_bit((bit), (word)) : variable_test_bit((bit), (word))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/wait.h", .line = 637, }; ______r = !!((!(__builtin_constant_p((bit)) ? constant_test_bit((bit), (word)) : variable_test_bit((bit), (word))))); ______f.miss_hit[______r]++; ______r; }))
6664 return 0;
6665 return out_of_line_wait_on_bit(word, bit, action, mode);
6666}
6667static inline __attribute__((always_inline)) int wait_on_bit_lock(void *word, int bit,
6668 int (*action)(void *), unsigned mode)
6669{
6670 if (__builtin_constant_p(((!test_and_set_bit(bit, word)))) ? !!((!test_and_set_bit(bit, word))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/wait.h", .line = 661, }; ______r = !!((!test_and_set_bit(bit, word))); ______f.miss_hit[______r]++; ______r; }))
6671 return 0;
6672 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
6673}
6674struct completion {
6675 unsigned int done;
6676 wait_queue_head_t wait;
6677};
6678static inline __attribute__((always_inline)) void init_completion(struct completion *x)
6679{
6680 x->done = 0;
6681 do { static struct lock_class_key __key; __init_waitqueue_head((&x->wait), &__key); } while (0);
6682}
6683extern void wait_for_completion(struct completion *);
6684extern int wait_for_completion_interruptible(struct completion *x);
6685extern int wait_for_completion_killable(struct completion *x);
6686extern unsigned long wait_for_completion_timeout(struct completion *x,
6687 unsigned long timeout);
6688extern long wait_for_completion_interruptible_timeout(
6689 struct completion *x, unsigned long timeout);
6690extern long wait_for_completion_killable_timeout(
6691 struct completion *x, unsigned long timeout);
6692extern bool try_wait_for_completion(struct completion *x);
6693extern bool completion_done(struct completion *x);
6694extern void complete(struct completion *);
6695extern void complete_all(struct completion *);
6696extern void (*pm_idle)(void);
6697extern void (*pm_power_off)(void);
6698extern void (*pm_power_off_prepare)(void);
6699struct device;
6700extern const char power_group_name[];
6701typedef struct pm_message {
6702 int event;
6703} pm_message_t;
6704struct dev_pm_ops {
6705 int (*prepare)(struct device *dev);
6706 void (*complete)(struct device *dev);
6707 int (*suspend)(struct device *dev);
6708 int (*resume)(struct device *dev);
6709 int (*freeze)(struct device *dev);
6710 int (*thaw)(struct device *dev);
6711 int (*poweroff)(struct device *dev);
6712 int (*restore)(struct device *dev);
6713 int (*suspend_noirq)(struct device *dev);
6714 int (*resume_noirq)(struct device *dev);
6715 int (*freeze_noirq)(struct device *dev);
6716 int (*thaw_noirq)(struct device *dev);
6717 int (*poweroff_noirq)(struct device *dev);
6718 int (*restore_noirq)(struct device *dev);
6719 int (*runtime_suspend)(struct device *dev);
6720 int (*runtime_resume)(struct device *dev);
6721 int (*runtime_idle)(struct device *dev);
6722};
6723extern struct dev_pm_ops generic_subsys_pm_ops;
6724enum rpm_status {
6725 RPM_ACTIVE = 0,
6726 RPM_RESUMING,
6727 RPM_SUSPENDED,
6728 RPM_SUSPENDING,
6729};
6730enum rpm_request {
6731 RPM_REQ_NONE = 0,
6732 RPM_REQ_IDLE,
6733 RPM_REQ_SUSPEND,
6734 RPM_REQ_AUTOSUSPEND,
6735 RPM_REQ_RESUME,
6736};
6737struct wakeup_source;
6738struct dev_pm_info {
6739 pm_message_t power_state;
6740 unsigned int can_wakeup:1;
6741 unsigned int async_suspend:1;
6742 bool is_prepared:1;
6743 bool is_suspended:1;
6744 spinlock_t lock;
6745 struct list_head entry;
6746 struct completion completion;
6747 struct wakeup_source *wakeup;
6748 struct timer_list suspend_timer;
6749 unsigned long timer_expires;
6750 struct work_struct work;
6751 wait_queue_head_t wait_queue;
6752 atomic_t usage_count;
6753 atomic_t child_count;
6754 unsigned int disable_depth:3;
6755 unsigned int ignore_children:1;
6756 unsigned int idle_notification:1;
6757 unsigned int request_pending:1;
6758 unsigned int deferred_resume:1;
6759 unsigned int run_wake:1;
6760 unsigned int runtime_auto:1;
6761 unsigned int no_callbacks:1;
6762 unsigned int irq_safe:1;
6763 unsigned int use_autosuspend:1;
6764 unsigned int timer_autosuspends:1;
6765 enum rpm_request request;
6766 enum rpm_status runtime_status;
6767 int runtime_error;
6768 int autosuspend_delay;
6769 unsigned long last_busy;
6770 unsigned long active_jiffies;
6771 unsigned long suspended_jiffies;
6772 unsigned long accounting_timestamp;
6773 void *subsys_data;
6774};
6775extern void update_pm_runtime_accounting(struct device *dev);
6776struct dev_power_domain {
6777 struct dev_pm_ops ops;
6778};
6779extern void device_pm_lock(void);
6780extern void dpm_resume_noirq(pm_message_t state);
6781extern void dpm_resume_end(pm_message_t state);
6782extern void dpm_resume(pm_message_t state);
6783extern void dpm_complete(pm_message_t state);
6784extern void device_pm_unlock(void);
6785extern int dpm_suspend_noirq(pm_message_t state);
6786extern int dpm_suspend_start(pm_message_t state);
6787extern int dpm_suspend(pm_message_t state);
6788extern int dpm_prepare(pm_message_t state);
6789extern void __suspend_report_result(const char *function, void *fn, int ret);
6790extern int device_pm_wait_for_dev(struct device *sub, struct device *dev);
6791extern int pm_generic_prepare(struct device *dev);
6792extern int pm_generic_suspend(struct device *dev);
6793extern int pm_generic_resume(struct device *dev);
6794extern int pm_generic_freeze(struct device *dev);
6795extern int pm_generic_thaw(struct device *dev);
6796extern int pm_generic_restore(struct device *dev);
6797extern int pm_generic_poweroff(struct device *dev);
6798extern void pm_generic_complete(struct device *dev);
6799enum dpm_order {
6800 DPM_ORDER_NONE,
6801 DPM_ORDER_DEV_AFTER_PARENT,
6802 DPM_ORDER_PARENT_BEFORE_DEV,
6803 DPM_ORDER_DEV_LAST,
6804};
6805typedef struct { unsigned long bits[((((1 << 0)) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; } nodemask_t;
6806extern nodemask_t _unused_nodemask_arg_;
6807static inline __attribute__((always_inline)) void __node_set(int node, volatile nodemask_t *dstp)
6808{
6809 set_bit(node, dstp->bits);
6810}
6811static inline __attribute__((always_inline)) void __node_clear(int node, volatile nodemask_t *dstp)
6812{
6813 clear_bit(node, dstp->bits);
6814}
6815static inline __attribute__((always_inline)) void __nodes_setall(nodemask_t *dstp, int nbits)
6816{
6817 bitmap_fill(dstp->bits, nbits);
6818}
6819static inline __attribute__((always_inline)) void __nodes_clear(nodemask_t *dstp, int nbits)
6820{
6821 bitmap_zero(dstp->bits, nbits);
6822}
6823static inline __attribute__((always_inline)) int __node_test_and_set(int node, nodemask_t *addr)
6824{
6825 return test_and_set_bit(node, addr->bits);
6826}
6827static inline __attribute__((always_inline)) void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
6828 const nodemask_t *src2p, int nbits)
6829{
6830 bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
6831}
6832static inline __attribute__((always_inline)) void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
6833 const nodemask_t *src2p, int nbits)
6834{
6835 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
6836}
6837static inline __attribute__((always_inline)) void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
6838 const nodemask_t *src2p, int nbits)
6839{
6840 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
6841}
6842static inline __attribute__((always_inline)) void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
6843 const nodemask_t *src2p, int nbits)
6844{
6845 bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
6846}
6847static inline __attribute__((always_inline)) void __nodes_complement(nodemask_t *dstp,
6848 const nodemask_t *srcp, int nbits)
6849{
6850 bitmap_complement(dstp->bits, srcp->bits, nbits);
6851}
6852static inline __attribute__((always_inline)) int __nodes_equal(const nodemask_t *src1p,
6853 const nodemask_t *src2p, int nbits)
6854{
6855 return bitmap_equal(src1p->bits, src2p->bits, nbits);
6856}
6857static inline __attribute__((always_inline)) int __nodes_intersects(const nodemask_t *src1p,
6858 const nodemask_t *src2p, int nbits)
6859{
6860 return bitmap_intersects(src1p->bits, src2p->bits, nbits);
6861}
6862static inline __attribute__((always_inline)) int __nodes_subset(const nodemask_t *src1p,
6863 const nodemask_t *src2p, int nbits)
6864{
6865 return bitmap_subset(src1p->bits, src2p->bits, nbits);
6866}
6867static inline __attribute__((always_inline)) int __nodes_empty(const nodemask_t *srcp, int nbits)
6868{
6869 return bitmap_empty(srcp->bits, nbits);
6870}
6871static inline __attribute__((always_inline)) int __nodes_full(const nodemask_t *srcp, int nbits)
6872{
6873 return bitmap_full(srcp->bits, nbits);
6874}
6875static inline __attribute__((always_inline)) int __nodes_weight(const nodemask_t *srcp, int nbits)
6876{
6877 return bitmap_weight(srcp->bits, nbits);
6878}
6879static inline __attribute__((always_inline)) void __nodes_shift_right(nodemask_t *dstp,
6880 const nodemask_t *srcp, int n, int nbits)
6881{
6882 bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
6883}
6884static inline __attribute__((always_inline)) void __nodes_shift_left(nodemask_t *dstp,
6885 const nodemask_t *srcp, int n, int nbits)
6886{
6887 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
6888}
6889static inline __attribute__((always_inline)) int __first_node(const nodemask_t *srcp)
6890{
6891 return ({ int __min1 = ((1 << 0)); int __min2 = (find_first_bit(srcp->bits, (1 << 0))); __min1 < __min2 ? __min1: __min2; });
6892}
6893static inline __attribute__((always_inline)) int __next_node(int n, const nodemask_t *srcp)
6894{
6895 return ({ int __min1 = ((1 << 0)); int __min2 = (find_next_bit(srcp->bits, (1 << 0), n+1)); __min1 < __min2 ? __min1: __min2; });
6896}
6897static inline __attribute__((always_inline)) void init_nodemask_of_node(nodemask_t *mask, int node)
6898{
6899 __nodes_clear(&(*mask), (1 << 0));
6900 __node_set((node), &(*mask));
6901}
6902static inline __attribute__((always_inline)) int __first_unset_node(const nodemask_t *maskp)
6903{
6904 return ({ int __min1 = ((1 << 0)); int __min2 = (find_first_zero_bit(maskp->bits, (1 << 0))); __min1 < __min2 ? __min1: __min2; })
6905 ;
6906}
6907static inline __attribute__((always_inline)) int __nodemask_scnprintf(char *buf, int len,
6908 const nodemask_t *srcp, int nbits)
6909{
6910 return bitmap_scnprintf(buf, len, srcp->bits, nbits);
6911}
6912static inline __attribute__((always_inline)) int __nodemask_parse_user(const char *buf, int len,
6913 nodemask_t *dstp, int nbits)
6914{
6915 return bitmap_parse_user(buf, len, dstp->bits, nbits);
6916}
6917static inline __attribute__((always_inline)) int __nodelist_scnprintf(char *buf, int len,
6918 const nodemask_t *srcp, int nbits)
6919{
6920 return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
6921}
6922static inline __attribute__((always_inline)) int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
6923{
6924 return bitmap_parselist(buf, dstp->bits, nbits);
6925}
6926static inline __attribute__((always_inline)) int __node_remap(int oldbit,
6927 const nodemask_t *oldp, const nodemask_t *newp, int nbits)
6928{
6929 return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
6930}
6931static inline __attribute__((always_inline)) void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
6932 const nodemask_t *oldp, const nodemask_t *newp, int nbits)
6933{
6934 bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
6935}
6936static inline __attribute__((always_inline)) void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
6937 const nodemask_t *relmapp, int nbits)
6938{
6939 bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
6940}
6941static inline __attribute__((always_inline)) void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
6942 int sz, int nbits)
6943{
6944 bitmap_fold(dstp->bits, origp->bits, sz, nbits);
6945}
6946enum node_states {
6947 N_POSSIBLE,
6948 N_ONLINE,
6949 N_NORMAL_MEMORY,
6950 N_HIGH_MEMORY,
6951 N_CPU,
6952 NR_NODE_STATES
6953};
6954extern nodemask_t node_states[NR_NODE_STATES];
6955static inline __attribute__((always_inline)) int node_state(int node, enum node_states state)
6956{
6957 return node == 0;
6958}
6959static inline __attribute__((always_inline)) void node_set_state(int node, enum node_states state)
6960{
6961}
6962static inline __attribute__((always_inline)) void node_clear_state(int node, enum node_states state)
6963{
6964}
6965static inline __attribute__((always_inline)) int num_node_state(enum node_states state)
6966{
6967 return 1;
6968}
6969struct nodemask_scratch {
6970 nodemask_t mask1;
6971 nodemask_t mask2;
6972};
6973static inline __attribute__((always_inline)) int numa_node_id(void)
6974{
6975 return 0;
6976}
6977static inline __attribute__((always_inline)) int early_cpu_to_node(int cpu)
6978{
6979 return 0;
6980}
6981static inline __attribute__((always_inline)) void setup_node_to_cpumask_map(void) { }
6982extern const struct cpumask *cpu_coregroup_mask(int cpu);
6983static inline __attribute__((always_inline)) void arch_fix_phys_package_id(int num, u32 slot)
6984{
6985}
6986struct pci_bus;
6987void x86_pci_root_bus_res_quirks(struct pci_bus *b);
6988static inline __attribute__((always_inline)) int get_mp_bus_to_node(int busnum)
6989{
6990 return 0;
6991}
6992static inline __attribute__((always_inline)) void set_mp_bus_to_node(int busnum, int node)
6993{
6994}
6995static inline __attribute__((always_inline)) void set_apicid_to_node(int apicid, s16 node)
6996{
6997}
6998static inline __attribute__((always_inline)) int numa_cpu_node(int cpu)
6999{
7000 return (-1);
7001}
7002extern void set_highmem_pages_init(void);
7003static inline __attribute__((always_inline)) void numa_set_node(int cpu, int node) { }
7004static inline __attribute__((always_inline)) void numa_clear_node(int cpu) { }
7005static inline __attribute__((always_inline)) void init_cpu_to_node(void) { }
7006static inline __attribute__((always_inline)) void numa_add_cpu(int cpu) { }
7007static inline __attribute__((always_inline)) void numa_remove_cpu(int cpu) { }
7008struct mutex {
7009 atomic_t count;
7010 spinlock_t wait_lock;
7011 struct list_head wait_list;
7012 struct task_struct *owner;
7013 const char *name;
7014 void *magic;
7015 struct lockdep_map dep_map;
7016};
7017struct mutex_waiter {
7018 struct list_head list;
7019 struct task_struct *task;
7020 void *magic;
7021};
7022extern void mutex_destroy(struct mutex *lock);
7023extern void __mutex_init(struct mutex *lock, const char *name,
7024 struct lock_class_key *key);
7025static inline __attribute__((always_inline)) int mutex_is_locked(struct mutex *lock)
7026{
7027 return atomic_read(&lock->count) != 1;
7028}
7029extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
7030extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
7031extern int __attribute__((warn_unused_result)) mutex_lock_interruptible_nested(struct mutex *lock,
7032 unsigned int subclass);
7033extern int __attribute__((warn_unused_result)) mutex_lock_killable_nested(struct mutex *lock,
7034 unsigned int subclass);
7035extern int mutex_trylock(struct mutex *lock);
7036extern void mutex_unlock(struct mutex *lock);
7037extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
7038typedef struct {
7039 void *ldt;
7040 int size;
7041 struct mutex lock;
7042 void *vdso;
7043} mm_context_t;
7044void leave_mm(int cpu);
7045enum xen_domain_type {
7046 XEN_NATIVE,
7047 XEN_PV_DOMAIN,
7048 XEN_HVM_DOMAIN,
7049};
7050static inline __attribute__((always_inline)) unsigned char readb(const volatile void *addr) { unsigned char ret; asm volatile("mov" "b" " %1,%0":"=q" (ret) :"m" (*(volatile unsigned char *)addr) :"memory"); return ret; }
7051static inline __attribute__((always_inline)) unsigned short readw(const volatile void *addr) { unsigned short ret; asm volatile("mov" "w" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned short *)addr) :"memory"); return ret; }
7052static inline __attribute__((always_inline)) unsigned int readl(const volatile void *addr) { unsigned int ret; asm volatile("mov" "l" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned int *)addr) :"memory"); return ret; }
7053static inline __attribute__((always_inline)) unsigned char __readb(const volatile void *addr) { unsigned char ret; asm volatile("mov" "b" " %1,%0":"=q" (ret) :"m" (*(volatile unsigned char *)addr) ); return ret; }
7054static inline __attribute__((always_inline)) unsigned short __readw(const volatile void *addr) { unsigned short ret; asm volatile("mov" "w" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned short *)addr) ); return ret; }
7055static inline __attribute__((always_inline)) unsigned int __readl(const volatile void *addr) { unsigned int ret; asm volatile("mov" "l" " %1,%0":"=r" (ret) :"m" (*(volatile unsigned int *)addr) ); return ret; }
7056static inline __attribute__((always_inline)) void writeb(unsigned char val, volatile void *addr) { asm volatile("mov" "b" " %0,%1": :"q" (val), "m" (*(volatile unsigned char *)addr) :"memory"); }
7057static inline __attribute__((always_inline)) void writew(unsigned short val, volatile void *addr) { asm volatile("mov" "w" " %0,%1": :"r" (val), "m" (*(volatile unsigned short *)addr) :"memory"); }
7058static inline __attribute__((always_inline)) void writel(unsigned int val, volatile void *addr) { asm volatile("mov" "l" " %0,%1": :"r" (val), "m" (*(volatile unsigned int *)addr) :"memory"); }
7059static inline __attribute__((always_inline)) void __writeb(unsigned char val, volatile void *addr) { asm volatile("mov" "b" " %0,%1": :"q" (val), "m" (*(volatile unsigned char *)addr) ); }
7060static inline __attribute__((always_inline)) void __writew(unsigned short val, volatile void *addr) { asm volatile("mov" "w" " %0,%1": :"r" (val), "m" (*(volatile unsigned short *)addr) ); }
7061static inline __attribute__((always_inline)) void __writel(unsigned int val, volatile void *addr) { asm volatile("mov" "l" " %0,%1": :"r" (val), "m" (*(volatile unsigned int *)addr) ); }
7062static inline __attribute__((always_inline)) phys_addr_t virt_to_phys(volatile void *address)
7063{
7064 return (((unsigned long)(address)) - ((unsigned long)(0xC0000000UL)));
7065}
7066static inline __attribute__((always_inline)) void *phys_to_virt(phys_addr_t address)
7067{
7068 return ((void *)((unsigned long)(address)+((unsigned long)(0xC0000000UL))));
7069}
7070static inline __attribute__((always_inline)) unsigned int isa_virt_to_bus(volatile void *address)
7071{
7072 return (unsigned int)virt_to_phys(address);
7073}
7074extern void *ioremap_nocache(resource_size_t offset, unsigned long size);
7075extern void *ioremap_cache(resource_size_t offset, unsigned long size);
7076extern void *ioremap_prot(resource_size_t offset, unsigned long size,
7077 unsigned long prot_val);
7078static inline __attribute__((always_inline)) void *ioremap(resource_size_t offset, unsigned long size)
7079{
7080 return ioremap_nocache(offset, size);
7081}
7082extern void iounmap(volatile void *addr);
7083extern void set_iounmap_nonlazy(void);
7084extern unsigned int ioread8(void *);
7085extern unsigned int ioread16(void *);
7086extern unsigned int ioread16be(void *);
7087extern unsigned int ioread32(void *);
7088extern unsigned int ioread32be(void *);
7089extern void iowrite8(u8, void *);
7090extern void iowrite16(u16, void *);
7091extern void iowrite16be(u16, void *);
7092extern void iowrite32(u32, void *);
7093extern void iowrite32be(u32, void *);
7094extern void ioread8_rep(void *port, void *buf, unsigned long count);
7095extern void ioread16_rep(void *port, void *buf, unsigned long count);
7096extern void ioread32_rep(void *port, void *buf, unsigned long count);
7097extern void iowrite8_rep(void *port, const void *buf, unsigned long count);
7098extern void iowrite16_rep(void *port, const void *buf, unsigned long count);
7099extern void iowrite32_rep(void *port, const void *buf, unsigned long count);
7100extern void *ioport_map(unsigned long port, unsigned int nr);
7101extern void ioport_unmap(void *);
7102struct pci_dev;
7103extern void *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
7104extern void pci_iounmap(struct pci_dev *dev, void *);
7105struct vm_area_struct;
7106struct vm_struct {
7107 struct vm_struct *next;
7108 void *addr;
7109 unsigned long size;
7110 unsigned long flags;
7111 struct page **pages;
7112 unsigned int nr_pages;
7113 phys_addr_t phys_addr;
7114 void *caller;
7115};
7116extern void vm_unmap_ram(const void *mem, unsigned int count);
7117extern void *vm_map_ram(struct page **pages, unsigned int count,
7118 int node, pgprot_t prot);
7119extern void vm_unmap_aliases(void);
7120extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vmalloc_init(void);
7121extern void *vmalloc(unsigned long size);
7122extern void *vzalloc(unsigned long size);
7123extern void *vmalloc_user(unsigned long size);
7124extern void *vmalloc_node(unsigned long size, int node);
7125extern void *vzalloc_node(unsigned long size, int node);
7126extern void *vmalloc_exec(unsigned long size);
7127extern void *vmalloc_32(unsigned long size);
7128extern void *vmalloc_32_user(unsigned long size);
7129extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
7130extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
7131 unsigned long start, unsigned long end, gfp_t gfp_mask,
7132 pgprot_t prot, int node, void *caller);
7133extern void vfree(const void *addr);
7134extern void *vmap(struct page **pages, unsigned int count,
7135 unsigned long flags, pgprot_t prot);
7136extern void vunmap(const void *addr);
7137extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
7138 unsigned long pgoff);
7139void vmalloc_sync_all(void);
7140static inline __attribute__((always_inline)) size_t get_vm_area_size(const struct vm_struct *area)
7141{
7142 return area->size - ((1UL) << 12);
7143}
7144extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
7145extern struct vm_struct *get_vm_area_caller(unsigned long size,
7146 unsigned long flags, void *caller);
7147extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
7148 unsigned long start, unsigned long end);
7149extern struct vm_struct *__get_vm_area_caller(unsigned long size,
7150 unsigned long flags,
7151 unsigned long start, unsigned long end,
7152 void *caller);
7153extern struct vm_struct *remove_vm_area(const void *addr);
7154extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
7155 struct page ***pages);
7156extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
7157 pgprot_t prot, struct page **pages);
7158extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
7159extern void unmap_kernel_range(unsigned long addr, unsigned long size);
7160extern struct vm_struct *alloc_vm_area(size_t size);
7161extern void free_vm_area(struct vm_struct *area);
7162extern long vread(char *buf, char *addr, unsigned long count);
7163extern long vwrite(char *buf, char *addr, unsigned long count);
7164extern rwlock_t vmlist_lock;
7165extern struct vm_struct *vmlist;
7166extern __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) void vm_area_register_early(struct vm_struct *vm, size_t align);
7167struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
7168 const size_t *sizes, int nr_vms,
7169 size_t align);
7170void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
7171static inline __attribute__((always_inline)) void
7172memset_io(volatile void *addr, unsigned char val, size_t count)
7173{
7174 __builtin_memset((void *)addr, val, count);
7175}
7176static inline __attribute__((always_inline)) void
7177memcpy_fromio(void *dst, const volatile void *src, size_t count)
7178{
7179 __builtin_memcpy(dst, (const void *)src, count);
7180}
7181static inline __attribute__((always_inline)) void
7182memcpy_toio(volatile void *dst, const void *src, size_t count)
7183{
7184 __builtin_memcpy((void *)dst, src, count);
7185}
7186static inline __attribute__((always_inline)) void flush_write_buffers(void)
7187{
7188}
7189extern void native_io_delay(void);
7190extern int io_delay_type;
7191extern void io_delay_init(void);
7192static inline __attribute__((always_inline)) void outb(unsigned char value, int port) { asm volatile("out" "b" " %" "b" "0, %w1" : : "a"(value), "Nd"(port)); } static inline __attribute__((always_inline)) unsigned char inb(int port) { unsigned char value; asm volatile("in" "b" " %w1, %" "b" "0" : "=a"(value) : "Nd"(port)); return value; } static inline __attribute__((always_inline)) void outb_p(unsigned char value, int port) { outb(value, port); slow_down_io(); } static inline __attribute__((always_inline)) unsigned char inb_p(int port) { unsigned char value = inb(port); slow_down_io(); return value; } static inline __attribute__((always_inline)) void outsb(int port, const void *addr, unsigned long count) { asm volatile("rep; outs" "b" : "+S"(addr), "+c"(count) : "d"(port)); } static inline __attribute__((always_inline)) void insb(int port, void *addr, unsigned long count) { asm volatile("rep; ins" "b" : "+D"(addr), "+c"(count) : "d"(port)); }
7193static inline __attribute__((always_inline)) void outw(unsigned short value, int port) { asm volatile("out" "w" " %" "w" "0, %w1" : : "a"(value), "Nd"(port)); } static inline __attribute__((always_inline)) unsigned short inw(int port) { unsigned short value; asm volatile("in" "w" " %w1, %" "w" "0" : "=a"(value) : "Nd"(port)); return value; } static inline __attribute__((always_inline)) void outw_p(unsigned short value, int port) { outw(value, port); slow_down_io(); } static inline __attribute__((always_inline)) unsigned short inw_p(int port) { unsigned short value = inw(port); slow_down_io(); return value; } static inline __attribute__((always_inline)) void outsw(int port, const void *addr, unsigned long count) { asm volatile("rep; outs" "w" : "+S"(addr), "+c"(count) : "d"(port)); } static inline __attribute__((always_inline)) void insw(int port, void *addr, unsigned long count) { asm volatile("rep; ins" "w" : "+D"(addr), "+c"(count) : "d"(port)); }
7194static inline __attribute__((always_inline)) void outl(unsigned int value, int port) { asm volatile("out" "l" " %" "" "0, %w1" : : "a"(value), "Nd"(port)); } static inline __attribute__((always_inline)) unsigned int inl(int port) { unsigned int value; asm volatile("in" "l" " %w1, %" "" "0" : "=a"(value) : "Nd"(port)); return value; } static inline __attribute__((always_inline)) void outl_p(unsigned int value, int port) { outl(value, port); slow_down_io(); } static inline __attribute__((always_inline)) unsigned int inl_p(int port) { unsigned int value = inl(port); slow_down_io(); return value; } static inline __attribute__((always_inline)) void outsl(int port, const void *addr, unsigned long count) { asm volatile("rep; outs" "l" : "+S"(addr), "+c"(count) : "d"(port)); } static inline __attribute__((always_inline)) void insl(int port, void *addr, unsigned long count) { asm volatile("rep; ins" "l" : "+D"(addr), "+c"(count) : "d"(port)); }
7195extern void *xlate_dev_mem_ptr(unsigned long phys);
7196extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
7197extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
7198 unsigned long prot_val);
7199extern void *ioremap_wc(resource_size_t offset, unsigned long size);
7200extern void early_ioremap_init(void);
7201extern void early_ioremap_reset(void);
7202extern void *early_ioremap(resource_size_t phys_addr,
7203 unsigned long size);
7204extern void *early_memremap(resource_size_t phys_addr,
7205 unsigned long size);
7206extern void early_iounmap(void *addr, unsigned long size);
7207extern void fixup_early_ioremap(void);
7208extern bool is_early_ioremap_ptep(pte_t *ptep);
7209extern const unsigned char x86_trampoline_start [];
7210extern const unsigned char x86_trampoline_end [];
7211extern unsigned char *x86_trampoline_base;
7212extern unsigned long init_rsp;
7213extern unsigned long initial_code;
7214extern unsigned long initial_gs;
7215extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) setup_trampolines(void);
7216extern const unsigned char trampoline_data[];
7217extern const unsigned char trampoline_status[];
7218static inline __attribute__((always_inline)) unsigned long trampoline_address(void)
7219{
7220 return virt_to_phys(((void *)(x86_trampoline_base + ((const unsigned char *)(trampoline_data) - x86_trampoline_start))));
7221}
7222int __acpi_acquire_global_lock(unsigned int *lock);
7223int __acpi_release_global_lock(unsigned int *lock);
7224extern int acpi_lapic;
7225extern int acpi_ioapic;
7226extern int acpi_noirq;
7227extern int acpi_strict;
7228extern int acpi_disabled;
7229extern int acpi_pci_disabled;
7230extern int acpi_skip_timer_override;
7231extern int acpi_use_timer_override;
7232extern int acpi_fix_pin2_polarity;
7233extern u8 acpi_sci_flags;
7234extern int acpi_sci_override_gsi;
7235void acpi_pic_sci_set_trigger(unsigned int, u16);
7236extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
7237 int trigger, int polarity);
7238static inline __attribute__((always_inline)) void disable_acpi(void)
7239{
7240 acpi_disabled = 1;
7241 acpi_pci_disabled = 1;
7242 acpi_noirq = 1;
7243}
7244extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
7245static inline __attribute__((always_inline)) void acpi_noirq_set(void) { acpi_noirq = 1; }
7246static inline __attribute__((always_inline)) void acpi_disable_pci(void)
7247{
7248 acpi_pci_disabled = 1;
7249 acpi_noirq_set();
7250}
7251extern int acpi_suspend_lowlevel(void);
7252extern const unsigned char acpi_wakeup_code[];
7253extern void acpi_reserve_wakeup_memory(void);
7254static inline __attribute__((always_inline)) unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
7255{
7256 if (__builtin_constant_p(((boot_cpu_data.x86 == 0x0F && boot_cpu_data.x86_vendor == 2 && boot_cpu_data.x86_model <= 0x05 && boot_cpu_data.x86_mask < 0x0A))) ? !!((boot_cpu_data.x86 == 0x0F && boot_cpu_data.x86_vendor == 2 && boot_cpu_data.x86_model <= 0x05 && boot_cpu_data.x86_mask < 0x0A)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
7257 "/data/exp/linux-3.0.4/arch/x86/include/asm/acpi.h"
7258 , .line =
7259 140
7260 , }; ______r = !!((boot_cpu_data.x86 == 0x0F && boot_cpu_data.x86_vendor == 2 && boot_cpu_data.x86_model <= 0x05 && boot_cpu_data.x86_mask < 0x0A)); ______f.miss_hit[______r]++; ______r; }))
7261 return 1;
7262 else if (__builtin_constant_p(((amd_e400_c1e_detected))) ? !!((amd_e400_c1e_detected)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/acpi.h", .line = 142, }; ______r = !!((amd_e400_c1e_detected)); ______f.miss_hit[______r]++; ______r; }))
7263 return 1;
7264 else
7265 return max_cstate;
7266}
7267static inline __attribute__((always_inline)) bool arch_has_acpi_pdc(void)
7268{
7269 struct cpuinfo_x86 *c = &(*({ do { const void *__vpp_verify = (typeof((&(cpu_info))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(cpu_info))) *)(&(cpu_info)))); (typeof((typeof(*(&(cpu_info))) *)(&(cpu_info)))) (__ptr + (((__per_cpu_offset[0])))); }); }));
7270 return (c->x86_vendor == 0 ||
7271 c->x86_vendor == 5);
7272}
7273static inline __attribute__((always_inline)) void arch_acpi_set_pdc_bits(u32 *buf)
7274{
7275 struct cpuinfo_x86 *c = &(*({ do { const void *__vpp_verify = (typeof((&(cpu_info))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(cpu_info))) *)(&(cpu_info)))); (typeof((typeof(*(&(cpu_info))) *)(&(cpu_info)))) (__ptr + (((__per_cpu_offset[0])))); }); }));
7276 buf[2] |= ((0x0010) | (0x0008) | (0x0002) | (0x0100) | (0x0200));
7277 if (__builtin_constant_p((((__builtin_constant_p((4*32+ 7)) && ( ((((4*32+ 7))>>5)==0 && (1UL<<(((4*32+ 7))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+ 7))>>5)==1 && (1UL<<(((4*32+ 7))&31) & (0|0))) || ((((4*32+ 7))>>5)==2 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==3 && (1UL<<(((4*32+ 7))&31) & (0))) || ((((4*32+ 7))>>5)==4 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==5 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==6 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==7 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==8 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==9 && (1UL<<(((4*32+ 7))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+ 7))) ? constant_test_bit(((4*32+ 7)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((4*32+ 7)), ((unsigned long *)((c)->x86_capability)))))))) ? !!(((__builtin_constant_p((4*32+ 7)) && ( ((((4*32+ 7))>>5)==0 && (1UL<<(((4*32+ 7))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+ 7))>>5)==1 && (1UL<<(((4*32+ 7))&31) & (0|0))) || ((((4*32+ 7))>>5)==2 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==3 && (1UL<<(((4*32+ 7))&31) & (0))) || ((((4*32+ 7))>>5)==4 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==5 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==6 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==7 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==8 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==9 && (1UL<<(((4*32+ 7))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+ 7))) ? constant_test_bit(((4*32+ 7)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((4*32+ 7)), ((unsigned long *)((c)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/acpi.h", .line = 161, }; ______r = !!(((__builtin_constant_p((4*32+ 7)) && ( ((((4*32+ 7))>>5)==0 && (1UL<<(((4*32+ 7))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+ 7))>>5)==1 && (1UL<<(((4*32+ 7))&31) & (0|0))) || ((((4*32+ 7))>>5)==2 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==3 && (1UL<<(((4*32+ 7))&31) & (0))) || ((((4*32+ 7))>>5)==4 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==5 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==6 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==7 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==8 && (1UL<<(((4*32+ 7))&31) & 0)) || ((((4*32+ 7))>>5)==9 && (1UL<<(((4*32+ 7))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+ 7))) ? constant_test_bit(((4*32+ 7)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((4*32+ 7)), ((unsigned long *)((c)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; }))
7278 buf[2] |= ((0x0008) | (0x0002) | (0x0020) | (0x0800) | (0x0001));
7279 if (__builtin_constant_p((((__builtin_constant_p((0*32+22)) && ( ((((0*32+22))>>5)==0 && (1UL<<(((0*32+22))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+22))>>5)==1 && (1UL<<(((0*32+22))&31) & (0|0))) || ((((0*32+22))>>5)==2 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==3 && (1UL<<(((0*32+22))&31) & (0))) || ((((0*32+22))>>5)==4 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==5 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==6 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==7 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==8 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==9 && (1UL<<(((0*32+22))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+22))) ? constant_test_bit(((0*32+22)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((0*32+22)), ((unsigned long *)((c)->x86_capability)))))))) ? !!(((__builtin_constant_p((0*32+22)) && ( ((((0*32+22))>>5)==0 && (1UL<<(((0*32+22))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+22))>>5)==1 && (1UL<<(((0*32+22))&31) & (0|0))) || ((((0*32+22))>>5)==2 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==3 && (1UL<<(((0*32+22))&31) & (0))) || ((((0*32+22))>>5)==4 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==5 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==6 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==7 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==8 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==9 && (1UL<<(((0*32+22))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+22))) ? constant_test_bit(((0*32+22)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((0*32+22)), ((unsigned long *)((c)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/acpi.h", .line = 164, }; ______r = !!(((__builtin_constant_p((0*32+22)) && ( ((((0*32+22))>>5)==0 && (1UL<<(((0*32+22))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+22))>>5)==1 && (1UL<<(((0*32+22))&31) & (0|0))) || ((((0*32+22))>>5)==2 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==3 && (1UL<<(((0*32+22))&31) & (0))) || ((((0*32+22))>>5)==4 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==5 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==6 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==7 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==8 && (1UL<<(((0*32+22))&31) & 0)) || ((((0*32+22))>>5)==9 && (1UL<<(((0*32+22))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+22))) ? constant_test_bit(((0*32+22)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((0*32+22)), ((unsigned long *)((c)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; }))
7280 buf[2] |= (0x0004);
7281 if (__builtin_constant_p(((!(__builtin_constant_p((4*32+ 3)) && ( ((((4*32+ 3))>>5)==0 && (1UL<<(((4*32+ 3))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+ 3))>>5)==1 && (1UL<<(((4*32+ 3))&31) & (0|0))) || ((((4*32+ 3))>>5)==2 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==3 && (1UL<<(((4*32+ 3))&31) & (0))) || ((((4*32+ 3))>>5)==4 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==5 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==6 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==7 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==8 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==9 && (1UL<<(((4*32+ 3))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+ 3))) ? constant_test_bit(((4*32+ 3)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((4*32+ 3)), ((unsigned long *)((c)->x86_capability)))))))) ? !!((!(__builtin_constant_p((4*32+ 3)) && ( ((((4*32+ 3))>>5)==0 && (1UL<<(((4*32+ 3))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+ 3))>>5)==1 && (1UL<<(((4*32+ 3))&31) & (0|0))) || ((((4*32+ 3))>>5)==2 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==3 && (1UL<<(((4*32+ 3))&31) & (0))) || ((((4*32+ 3))>>5)==4 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==5 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==6 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==7 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==8 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==9 && (1UL<<(((4*32+ 3))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+ 3))) ? constant_test_bit(((4*32+ 3)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((4*32+ 3)), ((unsigned long *)((c)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/acpi.h", .line = 170, }; ______r = !!((!(__builtin_constant_p((4*32+ 3)) && ( ((((4*32+ 3))>>5)==0 && (1UL<<(((4*32+ 3))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+ 3))>>5)==1 && (1UL<<(((4*32+ 3))&31) & (0|0))) || ((((4*32+ 3))>>5)==2 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==3 && (1UL<<(((4*32+ 3))&31) & (0))) || ((((4*32+ 3))>>5)==4 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==5 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==6 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==7 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==8 && (1UL<<(((4*32+ 3))&31) & 0)) || ((((4*32+ 3))>>5)==9 && (1UL<<(((4*32+ 3))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+ 3))) ? constant_test_bit(((4*32+ 3)), ((unsigned long *)((c)->x86_capability))) : variable_test_bit(((4*32+ 3)), ((unsigned long *)((c)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; }))
7282 buf[2] &= ~((0x0200));
7283}
7284extern unsigned long __FIXADDR_TOP;
7285enum fixed_addresses {
7286 FIX_HOLE,
7287 FIX_VDSO,
7288 FIX_DBGP_BASE,
7289 FIX_EARLYCON_MEM_BASE,
7290 FIX_APIC_BASE,
7291 FIX_IO_APIC_BASE_0,
7292 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + 64 - 1,
7293 FIX_KMAP_BEGIN,
7294 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*8)-1,
7295 FIX_PCIE_MCFG,
7296 FIX_PARAVIRT_BOOTMAP,
7297 FIX_TEXT_POKE1,
7298 FIX_TEXT_POKE0,
7299 __end_of_permanent_fixed_addresses,
7300 FIX_BTMAP_END =
7301 (__end_of_permanent_fixed_addresses ^
7302 (__end_of_permanent_fixed_addresses + (64 * 4) - 1)) &
7303 -512
7304 ? __end_of_permanent_fixed_addresses + (64 * 4) -
7305 (__end_of_permanent_fixed_addresses & ((64 * 4) - 1))
7306 : __end_of_permanent_fixed_addresses,
7307 FIX_BTMAP_BEGIN = FIX_BTMAP_END + (64 * 4) - 1,
7308 FIX_WP_TEST,
7309 __end_of_fixed_addresses
7310};
7311extern void reserve_top_address(unsigned long reserve);
7312extern int fixmaps_set;
7313extern pte_t *kmap_pte;
7314extern pgprot_t kmap_prot;
7315extern pte_t *pkmap_page_table;
7316void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
7317void native_set_fixmap(enum fixed_addresses idx,
7318 phys_addr_t phys, pgprot_t flags);
7319extern void __this_fixmap_does_not_exist(void);
7320static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long fix_to_virt(const unsigned int idx)
7321{
7322 if (__builtin_constant_p(((idx >= __end_of_fixed_addresses))) ? !!((idx >= __end_of_fixed_addresses)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/fixmap.h", .line = 210, }; ______r = !!((idx >= __end_of_fixed_addresses)); ______f.miss_hit[______r]++; ______r; }))
7323 __this_fixmap_does_not_exist();
7324 return (((unsigned long)__FIXADDR_TOP) - ((idx) << 12));
7325}
7326static inline __attribute__((always_inline)) unsigned long virt_to_fix(const unsigned long vaddr)
7327{
7328 do { if (__builtin_constant_p((((__builtin_constant_p(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))) ? !!(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/fixmap.h", .line = 218, }; ______r = __builtin_expect(!!(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))) ? !!(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/fixmap.h", .line = 218, }; ______r = __builtin_expect(!!(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/fixmap.h", .line = 218, }; ______r = !!(((__builtin_constant_p(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))) ? !!(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/fixmap.h", .line = 218, }; ______r = __builtin_expect(!!(vaddr >= ((unsigned long)__FIXADDR_TOP) || vaddr < (((unsigned long)__FIXADDR_TOP) - (__end_of_permanent_fixed_addresses << 12))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/fixmap.h"), "i" (218), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
7329 return ((((unsigned long)__FIXADDR_TOP) - ((vaddr)&(~(((1UL) << 12)-1)))) >> 12);
7330}
7331static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long
7332__set_fixmap_offset(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
7333{
7334 __set_fixmap(idx, phys, flags);
7335 return fix_to_virt(idx) + (phys & (((1UL) << 12) - 1));
7336}
7337extern void generic_apic_probe(void);
7338extern unsigned int apic_verbosity;
7339extern int local_apic_timer_c2_ok;
7340extern int disable_apic;
7341extern void __inquire_remote_apic(int apicid);
7342static inline __attribute__((always_inline)) void default_inquire_remote_apic(int apicid)
7343{
7344 if (__builtin_constant_p(((apic_verbosity >= 2))) ? !!((apic_verbosity >= 2)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/apic.h", .line = 63, }; ______r = !!((apic_verbosity >= 2)); ______f.miss_hit[______r]++; ______r; }))
7345 __inquire_remote_apic(apicid);
7346}
7347static inline __attribute__((always_inline)) bool apic_from_smp_config(void)
7348{
7349 return smp_found_config && !disable_apic;
7350}
7351static inline __attribute__((always_inline)) int is_vsmp_box(void)
7352{
7353 return 0;
7354}
7355extern void xapic_wait_icr_idle(void);
7356extern u32 safe_xapic_wait_icr_idle(void);
7357extern void xapic_icr_write(u32, u32);
7358extern int setup_profiling_timer(unsigned int);
7359static inline __attribute__((always_inline)) void native_apic_mem_write(u32 reg, u32 v)
7360{
7361 volatile u32 *addr = (volatile u32 *)((fix_to_virt(FIX_APIC_BASE)) + reg);
7362 asm volatile ("661:\n\t" "movl %0, %1" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(3*32+19)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "xchgl %0, %1" "\n664:\n" ".previous" : "=r" (v), "=m" (*addr) : "i" (0), "0" (v), "m" (*addr))
7363 ;
7364}
7365static inline __attribute__((always_inline)) u32 native_apic_mem_read(u32 reg)
7366{
7367 return *((volatile u32 *)((fix_to_virt(FIX_APIC_BASE)) + reg));
7368}
7369extern void native_apic_wait_icr_idle(void);
7370extern u32 native_safe_apic_wait_icr_idle(void);
7371extern void native_apic_icr_write(u32 low, u32 id);
7372extern u64 native_apic_icr_read(void);
7373extern int x2apic_mode;
7374static inline __attribute__((always_inline)) void check_x2apic(void)
7375{
7376}
7377static inline __attribute__((always_inline)) void enable_x2apic(void)
7378{
7379}
7380static inline __attribute__((always_inline)) int x2apic_enabled(void)
7381{
7382 return 0;
7383}
7384static inline __attribute__((always_inline)) void x2apic_force_phys(void)
7385{
7386}
7387extern void enable_IR_x2apic(void);
7388extern int get_physical_broadcast(void);
7389extern int lapic_get_maxlvt(void);
7390extern void clear_local_APIC(void);
7391extern void connect_bsp_APIC(void);
7392extern void disconnect_bsp_APIC(int virt_wire_setup);
7393extern void disable_local_APIC(void);
7394extern void lapic_shutdown(void);
7395extern int verify_local_APIC(void);
7396extern void sync_Arb_IDs(void);
7397extern void init_bsp_APIC(void);
7398extern void setup_local_APIC(void);
7399extern void end_local_APIC_setup(void);
7400extern void bsp_end_local_APIC_setup(void);
7401extern void init_apic_mappings(void);
7402void register_lapic_address(unsigned long address);
7403extern void setup_boot_APIC_clock(void);
7404extern void setup_secondary_APIC_clock(void);
7405extern int APIC_init_uniprocessor(void);
7406extern int apic_force_enable(unsigned long addr);
7407static inline __attribute__((always_inline)) int apic_is_clustered_box(void)
7408{
7409 return 0;
7410}
7411extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
7412struct apic {
7413 char *name;
7414 int (*probe)(void);
7415 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
7416 int (*apic_id_registered)(void);
7417 u32 irq_delivery_mode;
7418 u32 irq_dest_mode;
7419 const struct cpumask *(*target_cpus)(void);
7420 int disable_esr;
7421 int dest_logical;
7422 unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
7423 unsigned long (*check_apicid_present)(int apicid);
7424 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
7425 void (*init_apic_ldr)(void);
7426 void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
7427 void (*setup_apic_routing)(void);
7428 int (*multi_timer_check)(int apic, int irq);
7429 int (*cpu_present_to_apicid)(int mps_cpu);
7430 void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
7431 void (*setup_portio_remap)(void);
7432 int (*check_phys_apicid_present)(int phys_apicid);
7433 void (*enable_apic_mode)(void);
7434 int (*phys_pkg_id)(int cpuid_apic, int index_msb);
7435 int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid);
7436 unsigned int (*get_apic_id)(unsigned long x);
7437 unsigned long (*set_apic_id)(unsigned int id);
7438 unsigned long apic_id_mask;
7439 unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
7440 unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
7441 const struct cpumask *andmask);
7442 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
7443 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
7444 int vector);
7445 void (*send_IPI_allbutself)(int vector);
7446 void (*send_IPI_all)(int vector);
7447 void (*send_IPI_self)(int vector);
7448 int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
7449 int trampoline_phys_low;
7450 int trampoline_phys_high;
7451 void (*wait_for_init_deassert)(atomic_t *deassert);
7452 void (*smp_callin_clear_local_apic)(void);
7453 void (*inquire_remote_apic)(int apicid);
7454 u32 (*read)(u32 reg);
7455 void (*write)(u32 reg, u32 v);
7456 u64 (*icr_read)(void);
7457 void (*icr_write)(u32 low, u32 high);
7458 void (*wait_icr_idle)(void);
7459 u32 (*safe_wait_icr_idle)(void);
7460 int (*x86_32_early_logical_apicid)(int cpu);
7461 int (*x86_32_numa_cpu_node)(int cpu);
7462};
7463extern struct apic *apic;
7464extern struct apic *__apicdrivers[], *__apicdrivers_end[];
7465extern atomic_t init_deasserted;
7466extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
7467static inline __attribute__((always_inline)) u32 apic_read(u32 reg)
7468{
7469 return apic->read(reg);
7470}
7471static inline __attribute__((always_inline)) void apic_write(u32 reg, u32 val)
7472{
7473 apic->write(reg, val);
7474}
7475static inline __attribute__((always_inline)) u64 apic_icr_read(void)
7476{
7477 return apic->icr_read();
7478}
7479static inline __attribute__((always_inline)) void apic_icr_write(u32 low, u32 high)
7480{
7481 apic->icr_write(low, high);
7482}
7483static inline __attribute__((always_inline)) void apic_wait_icr_idle(void)
7484{
7485 apic->wait_icr_idle();
7486}
7487static inline __attribute__((always_inline)) u32 safe_apic_wait_icr_idle(void)
7488{
7489 return apic->safe_wait_icr_idle();
7490}
7491static inline __attribute__((always_inline)) void ack_APIC_irq(void)
7492{
7493 apic_write(0xB0, 0);
7494}
7495static inline __attribute__((always_inline)) unsigned default_get_apic_id(unsigned long x)
7496{
7497 unsigned int ver = ((apic_read(0x30)) & 0xFFu);
7498 if (__builtin_constant_p(((((ver) >= 0x14) || (__builtin_constant_p((3*32+26)) && ( ((((3*32+26))>>5)==0 && (1UL<<(((3*32+26))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((3*32+26))>>5)==1 && (1UL<<(((3*32+26))&31) & (0|0))) || ((((3*32+26))>>5)==2 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==3 && (1UL<<(((3*32+26))&31) & (0))) || ((((3*32+26))>>5)==4 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==5 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==6 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==7 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==8 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==9 && (1UL<<(((3*32+26))&31) & 0)) ) ? 1 : (__builtin_constant_p(((3*32+26))) ? constant_test_bit(((3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))))) ? !!((((ver) >= 0x14) || (__builtin_constant_p((3*32+26)) && ( ((((3*32+26))>>5)==0 && (1UL<<(((3*32+26))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((3*32+26))>>5)==1 && (1UL<<(((3*32+26))&31) & (0|0))) || ((((3*32+26))>>5)==2 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==3 && (1UL<<(((3*32+26))&31) & (0))) || ((((3*32+26))>>5)==4 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==5 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==6 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==7 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==8 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==9 && (1UL<<(((3*32+26))&31) & 0)) ) ? 1 : (__builtin_constant_p(((3*32+26))) ? constant_test_bit(((3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/apic.h", .line = 468, }; ______r = !!((((ver) >= 0x14) || (__builtin_constant_p((3*32+26)) && ( ((((3*32+26))>>5)==0 && (1UL<<(((3*32+26))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((3*32+26))>>5)==1 && (1UL<<(((3*32+26))&31) & (0|0))) || ((((3*32+26))>>5)==2 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==3 && (1UL<<(((3*32+26))&31) & (0))) || ((((3*32+26))>>5)==4 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==5 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==6 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==7 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==8 && (1UL<<(((3*32+26))&31) & 0)) || ((((3*32+26))>>5)==9 && (1UL<<(((3*32+26))&31) & 0)) ) ? 1 : (__builtin_constant_p(((3*32+26))) ? constant_test_bit(((3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((3*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; }))
7499 return (x >> 24) & 0xFF;
7500 else
7501 return (x >> 24) & 0x0F;
7502}
7503static inline __attribute__((always_inline)) void default_wait_for_init_deassert(atomic_t *deassert)
7504{
7505 while (!atomic_read(deassert))
7506 cpu_relax();
7507 return;
7508}
7509extern struct apic *generic_bigsmp_probe(void);
7510static inline __attribute__((always_inline)) const struct cpumask *default_target_cpus(void)
7511{
7512 return cpu_online_mask;
7513}
7514extern __attribute__((section(".data..percpu" ""))) __typeof__(u16) x86_bios_cpu_apicid; extern __typeof__(u16) *x86_bios_cpu_apicid_early_ptr; extern __typeof__(u16) x86_bios_cpu_apicid_early_map[];
7515static inline __attribute__((always_inline)) unsigned int read_apic_id(void)
7516{
7517 unsigned int reg;
7518 reg = apic_read(0x20);
7519 return apic->get_apic_id(reg);
7520}
7521extern void default_setup_apic_routing(void);
7522extern struct apic apic_noop;
7523static inline __attribute__((always_inline)) int noop_x86_32_early_logical_apicid(int cpu)
7524{
7525 return 0xFFu;
7526}
7527extern void default_init_apic_ldr(void);
7528static inline __attribute__((always_inline)) int default_apic_id_registered(void)
7529{
7530 return (__builtin_constant_p((read_apic_id())) ? constant_test_bit((read_apic_id()), ((phys_cpu_present_map).mask)) : variable_test_bit((read_apic_id()), ((phys_cpu_present_map).mask)));
7531}
7532static inline __attribute__((always_inline)) int default_phys_pkg_id(int cpuid_apic, int index_msb)
7533{
7534 return cpuid_apic >> index_msb;
7535}
7536static inline __attribute__((always_inline)) unsigned int
7537default_cpu_mask_to_apicid(const struct cpumask *cpumask)
7538{
7539 return ((cpumask)->bits)[0] & 0xFFu;
7540}
7541static inline __attribute__((always_inline)) unsigned int
7542default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
7543 const struct cpumask *andmask)
7544{
7545 unsigned long mask1 = ((cpumask)->bits)[0];
7546 unsigned long mask2 = ((andmask)->bits)[0];
7547 unsigned long mask3 = ((cpu_online_mask)->bits)[0];
7548 return (unsigned int)(mask1 & mask2 & mask3);
7549}
7550static inline __attribute__((always_inline)) unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)
7551{
7552 return (__builtin_constant_p((apicid)) ? constant_test_bit((apicid), ((*map).mask)) : variable_test_bit((apicid), ((*map).mask)));
7553}
7554static inline __attribute__((always_inline)) unsigned long default_check_apicid_present(int bit)
7555{
7556 return (__builtin_constant_p((bit)) ? constant_test_bit((bit), ((phys_cpu_present_map).mask)) : variable_test_bit((bit), ((phys_cpu_present_map).mask)));
7557}
7558static inline __attribute__((always_inline)) void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
7559{
7560 *retmap = *phys_map;
7561}
7562static inline __attribute__((always_inline)) int __default_cpu_present_to_apicid(int mps_cpu)
7563{
7564 if (__builtin_constant_p(((mps_cpu < nr_cpu_ids && (__builtin_constant_p((cpumask_check((mps_cpu)))) ? constant_test_bit((cpumask_check((mps_cpu))), ((((cpu_present_mask))->bits))) : variable_test_bit((cpumask_check((mps_cpu))), ((((cpu_present_mask))->bits))))))) ? !!((mps_cpu < nr_cpu_ids && (__builtin_constant_p((cpumask_check((mps_cpu)))) ? constant_test_bit((cpumask_check((mps_cpu))), ((((cpu_present_mask))->bits))) : variable_test_bit((cpumask_check((mps_cpu))), ((((cpu_present_mask))->bits)))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/apic.h", .line = 594, }; ______r = !!((mps_cpu < nr_cpu_ids && (__builtin_constant_p((cpumask_check((mps_cpu)))) ? constant_test_bit((cpumask_check((mps_cpu))), ((((cpu_present_mask))->bits))) : variable_test_bit((cpumask_check((mps_cpu))), ((((cpu_present_mask))->bits)))))); ______f.miss_hit[______r]++; ______r; }))
7565 return (int)(*({ do { const void *__vpp_verify = (typeof((&(x86_bios_cpu_apicid))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(x86_bios_cpu_apicid))) *)(&(x86_bios_cpu_apicid)))); (typeof((typeof(*(&(x86_bios_cpu_apicid))) *)(&(x86_bios_cpu_apicid)))) (__ptr + (((__per_cpu_offset[mps_cpu])))); }); }));
7566 else
7567 return 0xFFu;
7568}
7569static inline __attribute__((always_inline)) int
7570__default_check_phys_apicid_present(int phys_apicid)
7571{
7572 return (__builtin_constant_p((phys_apicid)) ? constant_test_bit((phys_apicid), ((phys_cpu_present_map).mask)) : variable_test_bit((phys_apicid), ((phys_cpu_present_map).mask)));
7573}
7574static inline __attribute__((always_inline)) int default_cpu_present_to_apicid(int mps_cpu)
7575{
7576 return __default_cpu_present_to_apicid(mps_cpu);
7577}
7578static inline __attribute__((always_inline)) int
7579default_check_phys_apicid_present(int phys_apicid)
7580{
7581 return __default_check_phys_apicid_present(phys_apicid);
7582}
7583static inline __attribute__((always_inline)) int invalid_vm86_irq(int irq)
7584{
7585 return irq < 3 || irq > 15;
7586}
7587union IO_APIC_reg_00 {
7588 u32 raw;
7589 struct {
7590 u32 __reserved_2 : 14,
7591 LTS : 1,
7592 delivery_type : 1,
7593 __reserved_1 : 8,
7594 ID : 8;
7595 } __attribute__ ((packed)) bits;
7596};
7597union IO_APIC_reg_01 {
7598 u32 raw;
7599 struct {
7600 u32 version : 8,
7601 __reserved_2 : 7,
7602 PRQ : 1,
7603 entries : 8,
7604 __reserved_1 : 8;
7605 } __attribute__ ((packed)) bits;
7606};
7607union IO_APIC_reg_02 {
7608 u32 raw;
7609 struct {
7610 u32 __reserved_2 : 24,
7611 arbitration : 4,
7612 __reserved_1 : 4;
7613 } __attribute__ ((packed)) bits;
7614};
7615union IO_APIC_reg_03 {
7616 u32 raw;
7617 struct {
7618 u32 boot_DT : 1,
7619 __reserved_1 : 31;
7620 } __attribute__ ((packed)) bits;
7621};
7622struct IO_APIC_route_entry {
7623 __u32 vector : 8,
7624 delivery_mode : 3,
7625 dest_mode : 1,
7626 delivery_status : 1,
7627 polarity : 1,
7628 irr : 1,
7629 trigger : 1,
7630 mask : 1,
7631 __reserved_2 : 15;
7632 __u32 __reserved_3 : 24,
7633 dest : 8;
7634} __attribute__ ((packed));
7635struct IR_IO_APIC_route_entry {
7636 __u64 vector : 8,
7637 zero : 3,
7638 index2 : 1,
7639 delivery_status : 1,
7640 polarity : 1,
7641 irr : 1,
7642 trigger : 1,
7643 mask : 1,
7644 reserved : 31,
7645 format : 1,
7646 index : 15;
7647} __attribute__ ((packed));
7648extern int nr_ioapics;
7649extern int mpc_ioapic_id(int ioapic);
7650extern unsigned int mpc_ioapic_addr(int ioapic);
7651extern struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic);
7652extern int mp_irq_entries;
7653extern struct mpc_intsrc mp_irqs[256];
7654extern int mpc_default_type;
7655extern int sis_apic_bug;
7656extern int skip_ioapic_setup;
7657extern int noioapicquirk;
7658extern int noioapicreroute;
7659extern int timer_through_8259;
7660struct io_apic_irq_attr;
7661extern int io_apic_set_pci_routing(struct device *dev, int irq,
7662 struct io_apic_irq_attr *irq_attr);
7663void setup_IO_APIC_irq_extra(u32 gsi);
7664extern void ioapic_and_gsi_init(void);
7665extern void ioapic_insert_resources(void);
7666int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr);
7667extern int save_ioapic_entries(void);
7668extern void mask_ioapic_entries(void);
7669extern int restore_ioapic_entries(void);
7670extern int get_nr_irqs_gsi(void);
7671extern void setup_ioapic_ids_from_mpc(void);
7672extern void setup_ioapic_ids_from_mpc_nocheck(void);
7673struct mp_ioapic_gsi{
7674 u32 gsi_base;
7675 u32 gsi_end;
7676};
7677extern struct mp_ioapic_gsi mp_gsi_routing[];
7678extern u32 gsi_top;
7679int mp_find_ioapic(u32 gsi);
7680int mp_find_ioapic_pin(int ioapic, u32 gsi);
7681void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) mp_register_ioapic(int id, u32 address, u32 gsi_base);
7682extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pre_init_apic_IRQ0(void);
7683extern void mp_save_irq(struct mpc_intsrc *m);
7684extern void disable_ioapic_support(void);
7685extern int smp_num_siblings;
7686extern unsigned int num_processors;
7687static inline __attribute__((always_inline)) bool cpu_has_ht_siblings(void)
7688{
7689 bool has_siblings = false;
7690 has_siblings = (__builtin_constant_p((0*32+28)) && ( ((((0*32+28))>>5)==0 && (1UL<<(((0*32+28))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+28))>>5)==1 && (1UL<<(((0*32+28))&31) & (0|0))) || ((((0*32+28))>>5)==2 && (1UL<<(((0*32+28))&31) & 0)) || ((((0*32+28))>>5)==3 && (1UL<<(((0*32+28))&31) & (0))) || ((((0*32+28))>>5)==4 && (1UL<<(((0*32+28))&31) & 0)) || ((((0*32+28))>>5)==5 && (1UL<<(((0*32+28))&31) & 0)) || ((((0*32+28))>>5)==6 && (1UL<<(((0*32+28))&31) & 0)) || ((((0*32+28))>>5)==7 && (1UL<<(((0*32+28))&31) & 0)) || ((((0*32+28))>>5)==8 && (1UL<<(((0*32+28))&31) & 0)) || ((((0*32+28))>>5)==9 && (1UL<<(((0*32+28))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+28))) ? constant_test_bit(((0*32+28)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+28)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) && smp_num_siblings > 1;
7691 return has_siblings;
7692}
7693extern __attribute__((section(".data..percpu" ""))) __typeof__(cpumask_var_t) cpu_sibling_map;
7694extern __attribute__((section(".data..percpu" ""))) __typeof__(cpumask_var_t) cpu_core_map;
7695extern __attribute__((section(".data..percpu" ""))) __typeof__(cpumask_var_t) cpu_llc_shared_map;
7696extern __attribute__((section(".data..percpu" ""))) __typeof__(u16) cpu_llc_id;
7697extern __attribute__((section(".data..percpu" ""))) __typeof__(int) cpu_number;
7698static inline __attribute__((always_inline)) struct cpumask *cpu_sibling_mask(int cpu)
7699{
7700 return (*({ do { const void *__vpp_verify = (typeof((&(cpu_sibling_map))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(cpu_sibling_map))) *)(&(cpu_sibling_map)))); (typeof((typeof(*(&(cpu_sibling_map))) *)(&(cpu_sibling_map)))) (__ptr + (((__per_cpu_offset[cpu])))); }); }));
7701}
7702static inline __attribute__((always_inline)) struct cpumask *cpu_core_mask(int cpu)
7703{
7704 return (*({ do { const void *__vpp_verify = (typeof((&(cpu_core_map))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(cpu_core_map))) *)(&(cpu_core_map)))); (typeof((typeof(*(&(cpu_core_map))) *)(&(cpu_core_map)))) (__ptr + (((__per_cpu_offset[cpu])))); }); }));
7705}
7706static inline __attribute__((always_inline)) struct cpumask *cpu_llc_shared_mask(int cpu)
7707{
7708 return (*({ do { const void *__vpp_verify = (typeof((&(cpu_llc_shared_map))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(cpu_llc_shared_map))) *)(&(cpu_llc_shared_map)))); (typeof((typeof(*(&(cpu_llc_shared_map))) *)(&(cpu_llc_shared_map)))) (__ptr + (((__per_cpu_offset[cpu])))); }); }));
7709}
7710extern __attribute__((section(".data..percpu" ""))) __typeof__(u16) x86_cpu_to_apicid; extern __typeof__(u16) *x86_cpu_to_apicid_early_ptr; extern __typeof__(u16) x86_cpu_to_apicid_early_map[];
7711extern __attribute__((section(".data..percpu" ""))) __typeof__(u16) x86_bios_cpu_apicid; extern __typeof__(u16) *x86_bios_cpu_apicid_early_ptr; extern __typeof__(u16) x86_bios_cpu_apicid_early_map[];
7712extern __attribute__((section(".data..percpu" ""))) __typeof__(int) x86_cpu_to_logical_apicid; extern __typeof__(int) *x86_cpu_to_logical_apicid_early_ptr; extern __typeof__(int) x86_cpu_to_logical_apicid_early_map[];
7713extern unsigned long stack_start;
7714struct smp_ops {
7715 void (*smp_prepare_boot_cpu)(void);
7716 void (*smp_prepare_cpus)(unsigned max_cpus);
7717 void (*smp_cpus_done)(unsigned max_cpus);
7718 void (*stop_other_cpus)(int wait);
7719 void (*smp_send_reschedule)(int cpu);
7720 int (*cpu_up)(unsigned cpu);
7721 int (*cpu_disable)(void);
7722 void (*cpu_die)(unsigned int cpu);
7723 void (*play_dead)(void);
7724 void (*send_call_func_ipi)(const struct cpumask *mask);
7725 void (*send_call_func_single_ipi)(int cpu);
7726};
7727extern void set_cpu_sibling_map(int cpu);
7728extern struct smp_ops smp_ops;
7729static inline __attribute__((always_inline)) void smp_send_stop(void)
7730{
7731 smp_ops.stop_other_cpus(0);
7732}
7733static inline __attribute__((always_inline)) void stop_other_cpus(void)
7734{
7735 smp_ops.stop_other_cpus(1);
7736}
7737static inline __attribute__((always_inline)) void smp_prepare_boot_cpu(void)
7738{
7739 smp_ops.smp_prepare_boot_cpu();
7740}
7741static inline __attribute__((always_inline)) void smp_prepare_cpus(unsigned int max_cpus)
7742{
7743 smp_ops.smp_prepare_cpus(max_cpus);
7744}
7745static inline __attribute__((always_inline)) void smp_cpus_done(unsigned int max_cpus)
7746{
7747 smp_ops.smp_cpus_done(max_cpus);
7748}
7749static inline __attribute__((always_inline)) int __cpu_up(unsigned int cpu)
7750{
7751 return smp_ops.cpu_up(cpu);
7752}
7753static inline __attribute__((always_inline)) int __cpu_disable(void)
7754{
7755 return smp_ops.cpu_disable();
7756}
7757static inline __attribute__((always_inline)) void __cpu_die(unsigned int cpu)
7758{
7759 smp_ops.cpu_die(cpu);
7760}
7761static inline __attribute__((always_inline)) void play_dead(void)
7762{
7763 smp_ops.play_dead();
7764}
7765static inline __attribute__((always_inline)) void smp_send_reschedule(int cpu)
7766{
7767 smp_ops.smp_send_reschedule(cpu);
7768}
7769static inline __attribute__((always_inline)) void arch_send_call_function_single_ipi(int cpu)
7770{
7771 smp_ops.send_call_func_single_ipi(cpu);
7772}
7773static inline __attribute__((always_inline)) void arch_send_call_function_ipi_mask(const struct cpumask *mask)
7774{
7775 smp_ops.send_call_func_ipi(mask);
7776}
7777void cpu_disable_common(void);
7778void native_smp_prepare_boot_cpu(void);
7779void native_smp_prepare_cpus(unsigned int max_cpus);
7780void native_smp_cpus_done(unsigned int max_cpus);
7781int native_cpu_up(unsigned int cpunum);
7782int native_cpu_disable(void);
7783void native_cpu_die(unsigned int cpu);
7784void native_play_dead(void);
7785void play_dead_common(void);
7786void wbinvd_on_cpu(int cpu);
7787int wbinvd_on_all_cpus(void);
7788void native_send_call_func_ipi(const struct cpumask *mask);
7789void native_send_call_func_single_ipi(int cpu);
7790void smp_store_cpu_info(int id);
7791static inline __attribute__((always_inline)) int num_booting_cpus(void)
7792{
7793 return cpumask_weight(cpu_callout_mask);
7794}
7795extern unsigned disabled_cpus __attribute__ ((__section__(".cpuinit.data")));
7796extern int safe_smp_processor_id(void);
7797static inline __attribute__((always_inline)) int logical_smp_processor_id(void)
7798{
7799 return (((apic_read(0xD0)) >> 24) & 0xFFu);
7800}
7801extern int hard_smp_processor_id(void);
7802extern void smp_send_stop(void);
7803extern void smp_send_reschedule(int cpu);
7804extern void smp_prepare_cpus(unsigned int max_cpus);
7805extern int __cpu_up(unsigned int cpunum);
7806extern void smp_cpus_done(unsigned int max_cpus);
7807int smp_call_function(smp_call_func_t func, void *info, int wait);
7808void smp_call_function_many(const struct cpumask *mask,
7809 smp_call_func_t func, void *info, bool wait);
7810void __smp_call_function_single(int cpuid, struct call_single_data *data,
7811 int wait);
7812int smp_call_function_any(const struct cpumask *mask,
7813 smp_call_func_t func, void *info, int wait);
7814void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) call_function_init(void);
7815void generic_smp_call_function_single_interrupt(void);
7816void generic_smp_call_function_interrupt(void);
7817void ipi_call_lock(void);
7818void ipi_call_unlock(void);
7819void ipi_call_lock_irq(void);
7820void ipi_call_unlock_irq(void);
7821int on_each_cpu(smp_call_func_t func, void *info, int wait);
7822void smp_prepare_boot_cpu(void);
7823extern unsigned int setup_max_cpus;
7824extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) setup_nr_cpu_ids(void);
7825extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) smp_init(void);
7826 extern unsigned int debug_smp_processor_id(void);
7827extern void arch_disable_smp_support(void);
7828void smp_setup_processor_id(void);
7829enum pageblock_bits {
7830 PB_migrate,
7831 PB_migrate_end = PB_migrate + 3 - 1,
7832 NR_PAGEBLOCK_BITS
7833};
7834struct page;
7835unsigned long get_pageblock_flags_group(struct page *page,
7836 int start_bitidx, int end_bitidx);
7837void set_pageblock_flags_group(struct page *page, unsigned long flags,
7838 int start_bitidx, int end_bitidx);
7839extern int page_group_by_mobility_disabled;
7840static inline __attribute__((always_inline)) int get_pageblock_migratetype(struct page *page)
7841{
7842 return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
7843}
7844struct free_area {
7845 struct list_head free_list[5];
7846 unsigned long nr_free;
7847};
7848struct pglist_data;
7849struct zone_padding {
7850 char x[0];
7851} __attribute__((__aligned__(1 << (6))));
7852enum zone_stat_item {
7853 NR_FREE_PAGES,
7854 NR_LRU_BASE,
7855 NR_INACTIVE_ANON = NR_LRU_BASE,
7856 NR_ACTIVE_ANON,
7857 NR_INACTIVE_FILE,
7858 NR_ACTIVE_FILE,
7859 NR_UNEVICTABLE,
7860 NR_MLOCK,
7861 NR_ANON_PAGES,
7862 NR_FILE_MAPPED,
7863 NR_FILE_PAGES,
7864 NR_FILE_DIRTY,
7865 NR_WRITEBACK,
7866 NR_SLAB_RECLAIMABLE,
7867 NR_SLAB_UNRECLAIMABLE,
7868 NR_PAGETABLE,
7869 NR_KERNEL_STACK,
7870 NR_UNSTABLE_NFS,
7871 NR_BOUNCE,
7872 NR_VMSCAN_WRITE,
7873 NR_WRITEBACK_TEMP,
7874 NR_ISOLATED_ANON,
7875 NR_ISOLATED_FILE,
7876 NR_SHMEM,
7877 NR_DIRTIED,
7878 NR_WRITTEN,
7879 NR_ANON_TRANSPARENT_HUGEPAGES,
7880 NR_VM_ZONE_STAT_ITEMS };
7881enum lru_list {
7882 LRU_INACTIVE_ANON = 0,
7883 LRU_ACTIVE_ANON = 0 + 1,
7884 LRU_INACTIVE_FILE = 0 + 2,
7885 LRU_ACTIVE_FILE = 0 + 2 + 1,
7886 LRU_UNEVICTABLE,
7887 NR_LRU_LISTS
7888};
7889static inline __attribute__((always_inline)) int is_file_lru(enum lru_list l)
7890{
7891 return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE);
7892}
7893static inline __attribute__((always_inline)) int is_active_lru(enum lru_list l)
7894{
7895 return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE);
7896}
7897static inline __attribute__((always_inline)) int is_unevictable_lru(enum lru_list l)
7898{
7899 return (l == LRU_UNEVICTABLE);
7900}
7901enum zone_watermarks {
7902 WMARK_MIN,
7903 WMARK_LOW,
7904 WMARK_HIGH,
7905 NR_WMARK
7906};
7907struct per_cpu_pages {
7908 int count;
7909 int high;
7910 int batch;
7911 struct list_head lists[3];
7912};
7913struct per_cpu_pageset {
7914 struct per_cpu_pages pcp;
7915 s8 stat_threshold;
7916 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
7917};
7918enum zone_type {
7919 ZONE_DMA,
7920 ZONE_NORMAL,
7921 ZONE_HIGHMEM,
7922 ZONE_MOVABLE,
7923 __MAX_NR_ZONES
7924};
7925struct zone_reclaim_stat {
7926 unsigned long recent_rotated[2];
7927 unsigned long recent_scanned[2];
7928};
7929struct zone {
7930 unsigned long watermark[NR_WMARK];
7931 unsigned long percpu_drift_mark;
7932 unsigned long lowmem_reserve[4];
7933 struct per_cpu_pageset *pageset;
7934 spinlock_t lock;
7935 int all_unreclaimable;
7936 struct free_area free_area[11];
7937 unsigned long *pageblock_flags;
7938 unsigned int compact_considered;
7939 unsigned int compact_defer_shift;
7940 struct zone_padding _pad1_;
7941 spinlock_t lru_lock;
7942 struct zone_lru {
7943 struct list_head list;
7944 } lru[NR_LRU_LISTS];
7945 struct zone_reclaim_stat reclaim_stat;
7946 unsigned long pages_scanned;
7947 unsigned long flags;
7948 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
7949 unsigned int inactive_ratio;
7950 struct zone_padding _pad2_;
7951 wait_queue_head_t * wait_table;
7952 unsigned long wait_table_hash_nr_entries;
7953 unsigned long wait_table_bits;
7954 struct pglist_data *zone_pgdat;
7955 unsigned long zone_start_pfn;
7956 unsigned long spanned_pages;
7957 unsigned long present_pages;
7958 const char *name;
7959} __attribute__((__aligned__(1 << (6))));
7960typedef enum {
7961 ZONE_RECLAIM_LOCKED,
7962 ZONE_OOM_LOCKED,
7963 ZONE_CONGESTED,
7964} zone_flags_t;
7965static inline __attribute__((always_inline)) void zone_set_flag(struct zone *zone, zone_flags_t flag)
7966{
7967 set_bit(flag, &zone->flags);
7968}
7969static inline __attribute__((always_inline)) int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
7970{
7971 return test_and_set_bit(flag, &zone->flags);
7972}
7973static inline __attribute__((always_inline)) void zone_clear_flag(struct zone *zone, zone_flags_t flag)
7974{
7975 clear_bit(flag, &zone->flags);
7976}
7977static inline __attribute__((always_inline)) int zone_is_reclaim_congested(const struct zone *zone)
7978{
7979 return (__builtin_constant_p((ZONE_CONGESTED)) ? constant_test_bit((ZONE_CONGESTED), (&zone->flags)) : variable_test_bit((ZONE_CONGESTED), (&zone->flags)));
7980}
7981static inline __attribute__((always_inline)) int zone_is_reclaim_locked(const struct zone *zone)
7982{
7983 return (__builtin_constant_p((ZONE_RECLAIM_LOCKED)) ? constant_test_bit((ZONE_RECLAIM_LOCKED), (&zone->flags)) : variable_test_bit((ZONE_RECLAIM_LOCKED), (&zone->flags)));
7984}
7985static inline __attribute__((always_inline)) int zone_is_oom_locked(const struct zone *zone)
7986{
7987 return (__builtin_constant_p((ZONE_OOM_LOCKED)) ? constant_test_bit((ZONE_OOM_LOCKED), (&zone->flags)) : variable_test_bit((ZONE_OOM_LOCKED), (&zone->flags)));
7988}
7989struct zonelist_cache;
7990struct zoneref {
7991 struct zone *zone;
7992 int zone_idx;
7993};
7994struct zonelist {
7995 struct zonelist_cache *zlcache_ptr;
7996 struct zoneref _zonerefs[((1 << 0) * 4) + 1];
7997};
7998struct node_active_region {
7999 unsigned long start_pfn;
8000 unsigned long end_pfn;
8001 int nid;
8002};
8003extern struct page *mem_map;
8004struct bootmem_data;
8005typedef struct pglist_data {
8006 struct zone node_zones[4];
8007 struct zonelist node_zonelists[1];
8008 int nr_zones;
8009 struct page *node_mem_map;
8010 unsigned long node_start_pfn;
8011 unsigned long node_present_pages;
8012 unsigned long node_spanned_pages;
8013 int node_id;
8014 wait_queue_head_t kswapd_wait;
8015 struct task_struct *kswapd;
8016 int kswapd_max_order;
8017 enum zone_type classzone_idx;
8018} pg_data_t;
8019struct rw_semaphore;
8020struct rw_semaphore {
8021 long count;
8022 spinlock_t wait_lock;
8023 struct list_head wait_list;
8024 struct lockdep_map dep_map;
8025};
8026extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
8027extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
8028extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
8029extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
8030static inline __attribute__((always_inline)) void __down_read(struct rw_semaphore *sem)
8031{
8032 asm volatile("# beginning down_read\n\t"
8033 ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " " "incl" " " "(%1)\n\t"
8034 " jns 1f\n"
8035 " call call_rwsem_down_read_failed\n"
8036 "1:\n\t"
8037 "# ending down_read\n\t"
8038 : "+m" (sem->count)
8039 : "a" (sem)
8040 : "memory", "cc");
8041}
8042static inline __attribute__((always_inline)) int __down_read_trylock(struct rw_semaphore *sem)
8043{
8044 long result, tmp;
8045 asm volatile("# beginning __down_read_trylock\n\t"
8046 " mov %0,%1\n\t"
8047 "1:\n\t"
8048 " mov %1,%2\n\t"
8049 " add %3,%2\n\t"
8050 " jle 2f\n\t"
8051 ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " cmpxchg %2,%0\n\t"
8052 " jnz 1b\n\t"
8053 "2:\n\t"
8054 "# ending __down_read_trylock\n\t"
8055 : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
8056 : "i" (0x00000001L)
8057 : "memory", "cc");
8058 return result >= 0 ? 1 : 0;
8059}
8060static inline __attribute__((always_inline)) void __down_write_nested(struct rw_semaphore *sem, int subclass)
8061{
8062 long tmp;
8063 asm volatile("# beginning down_write\n\t"
8064 ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " xadd %1,(%2)\n\t"
8065 " test %1,%1\n\t"
8066 " jz 1f\n"
8067 " call call_rwsem_down_write_failed\n"
8068 "1:\n"
8069 "# ending down_write"
8070 : "+m" (sem->count), "=d" (tmp)
8071 : "a" (sem), "1" (((-0x0000ffffL -1) + 0x00000001L))
8072 : "memory", "cc");
8073}
8074static inline __attribute__((always_inline)) void __down_write(struct rw_semaphore *sem)
8075{
8076 __down_write_nested(sem, 0);
8077}
8078static inline __attribute__((always_inline)) int __down_write_trylock(struct rw_semaphore *sem)
8079{
8080 long ret = ({ __typeof__(*(((&sem->count)))) __ret; __typeof__(*(((&sem->count)))) __old = (((0x00000000L))); __typeof__(*(((&sem->count)))) __new = (((((-0x0000ffffL -1) + 0x00000001L)))); switch ((sizeof(*&sem->count))) { case 1: { volatile u8 *__ptr = (volatile u8 *)(((&sem->count))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgb %2,%1" : "=a" (__ret), "+m" (*__ptr) : "q" (__new), "0" (__old) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)(((&sem->count))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgw %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)(((&sem->count))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgl %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } default: __cmpxchg_wrong_size(); } __ret; })
8081 ;
8082 if (__builtin_constant_p(((ret == 0x00000000L))) ? !!((ret == 0x00000000L)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/rwsem.h", .line = 131, }; ______r = !!((ret == 0x00000000L)); ______f.miss_hit[______r]++; ______r; }))
8083 return 1;
8084 return 0;
8085}
8086static inline __attribute__((always_inline)) void __up_read(struct rw_semaphore *sem)
8087{
8088 long tmp;
8089 asm volatile("# beginning __up_read\n\t"
8090 ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " xadd %1,(%2)\n\t"
8091 " jns 1f\n\t"
8092 " call call_rwsem_wake\n"
8093 "1:\n"
8094 "# ending __up_read\n"
8095 : "+m" (sem->count), "=d" (tmp)
8096 : "a" (sem), "1" (-0x00000001L)
8097 : "memory", "cc");
8098}
8099static inline __attribute__((always_inline)) void __up_write(struct rw_semaphore *sem)
8100{
8101 long tmp;
8102 asm volatile("# beginning __up_write\n\t"
8103 ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " xadd %1,(%2)\n\t"
8104 " jns 1f\n\t"
8105 " call call_rwsem_wake\n"
8106 "1:\n\t"
8107 "# ending __up_write\n"
8108 : "+m" (sem->count), "=d" (tmp)
8109 : "a" (sem), "1" (-((-0x0000ffffL -1) + 0x00000001L))
8110 : "memory", "cc");
8111}
8112static inline __attribute__((always_inline)) void __downgrade_write(struct rw_semaphore *sem)
8113{
8114 asm volatile("# beginning __downgrade_write\n\t"
8115 ".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " " "addl" " " "%2,(%1)\n\t"
8116 " jns 1f\n\t"
8117 " call call_rwsem_downgrade_wake\n"
8118 "1:\n\t"
8119 "# ending __downgrade_write\n"
8120 : "+m" (sem->count)
8121 : "a" (sem), "er" (-(-0x0000ffffL -1))
8122 : "memory", "cc");
8123}
8124static inline __attribute__((always_inline)) void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8125{
8126 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " " " "addl" " " "%1,%0"
8127 : "+m" (sem->count)
8128 : "er" (delta));
8129}
8130static inline __attribute__((always_inline)) long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
8131{
8132 long tmp = delta;
8133 asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "xadd %0,%1"
8134 : "+r" (tmp), "+m" (sem->count)
8135 : : "memory");
8136 return tmp + delta;
8137}
8138static inline __attribute__((always_inline)) int rwsem_is_locked(struct rw_semaphore *sem)
8139{
8140 return sem->count != 0;
8141}
8142extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
8143 struct lock_class_key *key);
8144extern void down_read(struct rw_semaphore *sem);
8145extern int down_read_trylock(struct rw_semaphore *sem);
8146extern void down_write(struct rw_semaphore *sem);
8147extern int down_write_trylock(struct rw_semaphore *sem);
8148extern void up_read(struct rw_semaphore *sem);
8149extern void up_write(struct rw_semaphore *sem);
8150extern void downgrade_write(struct rw_semaphore *sem);
8151extern void down_read_nested(struct rw_semaphore *sem, int subclass);
8152extern void down_write_nested(struct rw_semaphore *sem, int subclass);
8153extern void down_read_non_owner(struct rw_semaphore *sem);
8154extern void up_read_non_owner(struct rw_semaphore *sem);
8155struct srcu_struct_array {
8156 int c[2];
8157};
8158struct srcu_struct {
8159 int completed;
8160 struct srcu_struct_array *per_cpu_ref;
8161 struct mutex mutex;
8162 struct lockdep_map dep_map;
8163};
8164int __init_srcu_struct(struct srcu_struct *sp, const char *name,
8165 struct lock_class_key *key);
8166void cleanup_srcu_struct(struct srcu_struct *sp);
8167int __srcu_read_lock(struct srcu_struct *sp) ;
8168void __srcu_read_unlock(struct srcu_struct *sp, int idx) ;
8169void synchronize_srcu(struct srcu_struct *sp);
8170void synchronize_srcu_expedited(struct srcu_struct *sp);
8171long srcu_batches_completed(struct srcu_struct *sp);
8172static inline __attribute__((always_inline)) int srcu_read_lock_held(struct srcu_struct *sp)
8173{
8174 if (__builtin_constant_p(((debug_locks))) ? !!((debug_locks)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/srcu.h", .line = 96, }; ______r = !!((debug_locks)); ______f.miss_hit[______r]++; ______r; }))
8175 return lock_is_held(&sp->dep_map);
8176 return 1;
8177}
8178static inline __attribute__((always_inline)) int srcu_read_lock(struct srcu_struct *sp)
8179{
8180 int retval = __srcu_read_lock(sp);
8181 lock_acquire(&(sp)->dep_map, 0, 0, 2, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; }));
8182 return retval;
8183}
8184static inline __attribute__((always_inline)) void srcu_read_unlock(struct srcu_struct *sp, int idx)
8185{
8186 lock_release(&(sp)->dep_map, 1, ({ __label__ __here; __here: (unsigned long)&&__here; }));
8187 __srcu_read_unlock(sp, idx);
8188}
8189struct notifier_block {
8190 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
8191 struct notifier_block *next;
8192 int priority;
8193};
8194struct atomic_notifier_head {
8195 spinlock_t lock;
8196 struct notifier_block *head;
8197};
8198struct blocking_notifier_head {
8199 struct rw_semaphore rwsem;
8200 struct notifier_block *head;
8201};
8202struct raw_notifier_head {
8203 struct notifier_block *head;
8204};
8205struct srcu_notifier_head {
8206 struct mutex mutex;
8207 struct srcu_struct srcu;
8208 struct notifier_block *head;
8209};
8210extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
8211extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
8212 struct notifier_block *nb);
8213extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
8214 struct notifier_block *nb);
8215extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
8216 struct notifier_block *nb);
8217extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
8218 struct notifier_block *nb);
8219extern int blocking_notifier_chain_cond_register(
8220 struct blocking_notifier_head *nh,
8221 struct notifier_block *nb);
8222extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
8223 struct notifier_block *nb);
8224extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
8225 struct notifier_block *nb);
8226extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
8227 struct notifier_block *nb);
8228extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
8229 struct notifier_block *nb);
8230extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
8231 unsigned long val, void *v);
8232extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
8233 unsigned long val, void *v, int nr_to_call, int *nr_calls);
8234extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
8235 unsigned long val, void *v);
8236extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
8237 unsigned long val, void *v, int nr_to_call, int *nr_calls);
8238extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
8239 unsigned long val, void *v);
8240extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
8241 unsigned long val, void *v, int nr_to_call, int *nr_calls);
8242extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
8243 unsigned long val, void *v);
8244extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
8245 unsigned long val, void *v, int nr_to_call, int *nr_calls);
8246static inline __attribute__((always_inline)) int notifier_from_errno(int err)
8247{
8248 if (__builtin_constant_p(((err))) ? !!((err)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/notifier.h", .line = 167, }; ______r = !!((err)); ______f.miss_hit[______r]++; ______r; }))
8249 return 0x8000 | (0x0001 - err);
8250 return 0x0001;
8251}
8252static inline __attribute__((always_inline)) int notifier_to_errno(int ret)
8253{
8254 ret &= ~0x8000;
8255 return ret > 0x0001 ? 0x0001 - ret : 0;
8256}
8257extern struct blocking_notifier_head reboot_notifier_list;
8258struct page;
8259struct zone;
8260struct pglist_data;
8261struct mem_section;
8262static inline __attribute__((always_inline)) void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
8263static inline __attribute__((always_inline)) void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
8264static inline __attribute__((always_inline)) void pgdat_resize_init(struct pglist_data *pgdat) {}
8265static inline __attribute__((always_inline)) unsigned zone_span_seqbegin(struct zone *zone)
8266{
8267 return 0;
8268}
8269static inline __attribute__((always_inline)) int zone_span_seqretry(struct zone *zone, unsigned iv)
8270{
8271 return 0;
8272}
8273static inline __attribute__((always_inline)) void zone_span_writelock(struct zone *zone) {}
8274static inline __attribute__((always_inline)) void zone_span_writeunlock(struct zone *zone) {}
8275static inline __attribute__((always_inline)) void zone_seqlock_init(struct zone *zone) {}
8276static inline __attribute__((always_inline)) int mhp_notimplemented(const char *func)
8277{
8278 printk("<4>" "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
8279 dump_stack();
8280 return -38;
8281}
8282static inline __attribute__((always_inline)) void register_page_bootmem_info_node(struct pglist_data *pgdat)
8283{
8284}
8285static inline __attribute__((always_inline)) void lock_memory_hotplug(void) {}
8286static inline __attribute__((always_inline)) void unlock_memory_hotplug(void) {}
8287static inline __attribute__((always_inline)) int is_mem_section_removable(unsigned long pfn,
8288 unsigned long nr_pages)
8289{
8290 return 0;
8291}
8292extern int mem_online_node(int nid);
8293extern int add_memory(int nid, u64 start, u64 size);
8294extern int arch_add_memory(int nid, u64 start, u64 size);
8295extern int remove_memory(u64 start, u64 size);
8296extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
8297 int nr_pages);
8298extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
8299extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
8300 unsigned long pnum);
8301extern struct mutex zonelists_mutex;
8302void build_all_zonelists(void *data);
8303void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
8304bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
8305 int classzone_idx, int alloc_flags);
8306bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
8307 int classzone_idx, int alloc_flags);
8308enum memmap_context {
8309 MEMMAP_EARLY,
8310 MEMMAP_HOTPLUG,
8311};
8312extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
8313 unsigned long size,
8314 enum memmap_context context);
8315static inline __attribute__((always_inline)) void memory_present(int nid, unsigned long start, unsigned long end) {}
8316static inline __attribute__((always_inline)) int local_memory_node(int node_id) { return node_id; };
8317static inline __attribute__((always_inline)) int populated_zone(struct zone *zone)
8318{
8319 return (!!zone->present_pages);
8320}
8321extern int movable_zone;
8322static inline __attribute__((always_inline)) int zone_movable_is_highmem(void)
8323{
8324 return movable_zone == ZONE_HIGHMEM;
8325}
8326static inline __attribute__((always_inline)) int is_highmem_idx(enum zone_type idx)
8327{
8328 return (idx == ZONE_HIGHMEM ||
8329 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
8330}
8331static inline __attribute__((always_inline)) int is_normal_idx(enum zone_type idx)
8332{
8333 return (idx == ZONE_NORMAL);
8334}
8335static inline __attribute__((always_inline)) int is_highmem(struct zone *zone)
8336{
8337 int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
8338 return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
8339 (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
8340 zone_movable_is_highmem());
8341}
8342static inline __attribute__((always_inline)) int is_normal(struct zone *zone)
8343{
8344 return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
8345}
8346static inline __attribute__((always_inline)) int is_dma32(struct zone *zone)
8347{
8348 return 0;
8349}
8350static inline __attribute__((always_inline)) int is_dma(struct zone *zone)
8351{
8352 return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
8353}
8354struct ctl_table;
8355int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
8356 void *, size_t *, loff_t *);
8357extern int sysctl_lowmem_reserve_ratio[4 -1];
8358int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
8359 void *, size_t *, loff_t *);
8360int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
8361 void *, size_t *, loff_t *);
8362int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
8363 void *, size_t *, loff_t *);
8364int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
8365 void *, size_t *, loff_t *);
8366extern int numa_zonelist_order_handler(struct ctl_table *, int,
8367 void *, size_t *, loff_t *);
8368extern char numa_zonelist_order[];
8369extern struct pglist_data contig_page_data;
8370extern struct pglist_data *first_online_pgdat(void);
8371extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
8372extern struct zone *next_zone(struct zone *zone);
8373static inline __attribute__((always_inline)) struct zone *zonelist_zone(struct zoneref *zoneref)
8374{
8375 return zoneref->zone;
8376}
8377static inline __attribute__((always_inline)) int zonelist_zone_idx(struct zoneref *zoneref)
8378{
8379 return zoneref->zone_idx;
8380}
8381static inline __attribute__((always_inline)) int zonelist_node_idx(struct zoneref *zoneref)
8382{
8383 return 0;
8384}
8385struct zoneref *next_zones_zonelist(struct zoneref *z,
8386 enum zone_type highest_zoneidx,
8387 nodemask_t *nodes,
8388 struct zone **zone);
8389static inline __attribute__((always_inline)) struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
8390 enum zone_type highest_zoneidx,
8391 nodemask_t *nodes,
8392 struct zone **zone)
8393{
8394 return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
8395 zone);
8396}
8397void memory_present(int nid, unsigned long start, unsigned long end);
8398unsigned long __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) node_memmap_size_bytes(int, unsigned long, unsigned long);
8399static inline __attribute__((always_inline)) int memmap_valid_within(unsigned long pfn,
8400 struct page *page, struct zone *zone)
8401{
8402 return 1;
8403}
8404extern void *pcpu_base_addr;
8405extern const unsigned long *pcpu_unit_offsets;
8406struct pcpu_group_info {
8407 int nr_units;
8408 unsigned long base_offset;
8409 unsigned int *cpu_map;
8410};
8411struct pcpu_alloc_info {
8412 size_t static_size;
8413 size_t reserved_size;
8414 size_t dyn_size;
8415 size_t unit_size;
8416 size_t atom_size;
8417 size_t alloc_size;
8418 size_t __ai_size;
8419 int nr_groups;
8420 struct pcpu_group_info groups[];
8421};
8422enum pcpu_fc {
8423 PCPU_FC_AUTO,
8424 PCPU_FC_EMBED,
8425 PCPU_FC_PAGE,
8426 PCPU_FC_NR,
8427};
8428extern const char *pcpu_fc_names[PCPU_FC_NR];
8429extern enum pcpu_fc pcpu_chosen_fc;
8430typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
8431 size_t align);
8432typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
8433typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
8434typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
8435extern struct pcpu_alloc_info * __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_alloc_alloc_info(int nr_groups,
8436 int nr_units);
8437extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
8438extern int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
8439 void *base_addr);
8440extern int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
8441 size_t atom_size,
8442 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
8443 pcpu_fc_alloc_fn_t alloc_fn,
8444 pcpu_fc_free_fn_t free_fn);
8445extern int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_page_first_chunk(size_t reserved_size,
8446 pcpu_fc_alloc_fn_t alloc_fn,
8447 pcpu_fc_free_fn_t free_fn,
8448 pcpu_fc_populate_pte_fn_t populate_pte_fn);
8449extern void *__alloc_reserved_percpu(size_t size, size_t align);
8450extern bool is_kernel_percpu_address(unsigned long addr);
8451extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) percpu_init_late(void);
8452extern void *__alloc_percpu(size_t size, size_t align);
8453extern void free_percpu(void *__pdata);
8454extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
8455extern void __bad_size_call_parameter(void);
8456int arch_update_cpu_topology(void);
8457static inline __attribute__((always_inline)) int numa_mem_id(void)
8458{
8459 return numa_node_id();
8460}
8461struct vm_area_struct;
8462static inline __attribute__((always_inline)) int allocflags_to_migratetype(gfp_t gfp_flags)
8463{
8464 ({ int __ret_warn_on = !!((gfp_flags & ((( gfp_t)0x80000u)|(( gfp_t)0x08u))) == ((( gfp_t)0x80000u)|(( gfp_t)0x08u))); if (__builtin_constant_p((((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 152, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 152, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 152, }; ______r = !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 152, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) warn_slowpath_null("include/linux/gfp.h", 152); (__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 152, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); });
8465 if (__builtin_constant_p((((__builtin_constant_p(page_group_by_mobility_disabled) ? !!(page_group_by_mobility_disabled) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 154, }; ______r = __builtin_expect(!!(page_group_by_mobility_disabled), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(page_group_by_mobility_disabled) ? !!(page_group_by_mobility_disabled) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 154, }; ______r = __builtin_expect(!!(page_group_by_mobility_disabled), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 154, }; ______r = !!(((__builtin_constant_p(page_group_by_mobility_disabled) ? !!(page_group_by_mobility_disabled) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 154, }; ______r = __builtin_expect(!!(page_group_by_mobility_disabled), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
8466 return 0;
8467 return (((gfp_flags & (( gfp_t)0x08u)) != 0) << 1) |
8468 ((gfp_flags & (( gfp_t)0x80000u)) != 0);
8469}
8470static inline __attribute__((always_inline)) enum zone_type gfp_zone(gfp_t flags)
8471{
8472 enum zone_type z;
8473 int bit = ( int) (flags & ((( gfp_t)0x01u)|(( gfp_t)0x02u)|(( gfp_t)0x04u)|(( gfp_t)0x08u)));
8474 z = (( (ZONE_NORMAL << 0 * 2) | (ZONE_DMA << 0x01u * 2) | (ZONE_HIGHMEM << 0x02u * 2) | (ZONE_NORMAL << 0x04u * 2) | (ZONE_NORMAL << 0x08u * 2) | (ZONE_DMA << (0x08u | 0x01u) * 2) | (ZONE_MOVABLE << (0x08u | 0x02u) * 2) | (ZONE_NORMAL << (0x08u | 0x04u) * 2) ) >> (bit * 2)) &
8475 ((1 << 2) - 1);
8476 do { (void)((( 1 << (0x01u | 0x02u) | 1 << (0x01u | 0x04u) | 1 << (0x04u | 0x02u) | 1 << (0x01u | 0x04u | 0x02u) | 1 << (0x08u | 0x02u | 0x01u) | 1 << (0x08u | 0x04u | 0x01u) | 1 << (0x08u | 0x04u | 0x02u) | 1 << (0x08u | 0x04u | 0x01u | 0x02u) ) >> bit) & 1); } while (0);
8477 return z;
8478}
8479static inline __attribute__((always_inline)) int gfp_zonelist(gfp_t flags)
8480{
8481 if (__builtin_constant_p(((0 && (__builtin_constant_p(flags & (( gfp_t)0x40000u)) ? !!(flags & (( gfp_t)0x40000u)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 265, }; ______r = __builtin_expect(!!(flags & (( gfp_t)0x40000u)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!((0 && (__builtin_constant_p(flags & (( gfp_t)0x40000u)) ? !!(flags & (( gfp_t)0x40000u)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 265, }; ______r = __builtin_expect(!!(flags & (( gfp_t)0x40000u)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 265, }; ______r = !!((0 && (__builtin_constant_p(flags & (( gfp_t)0x40000u)) ? !!(flags & (( gfp_t)0x40000u)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 265, }; ______r = __builtin_expect(!!(flags & (( gfp_t)0x40000u)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
8482 return 1;
8483 return 0;
8484}
8485static inline __attribute__((always_inline)) struct zonelist *node_zonelist(int nid, gfp_t flags)
8486{
8487 return (&contig_page_data)->node_zonelists + gfp_zonelist(flags);
8488}
8489static inline __attribute__((always_inline)) void arch_free_page(struct page *page, int order) { }
8490static inline __attribute__((always_inline)) void arch_alloc_page(struct page *page, int order) { }
8491struct page *
8492__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
8493 struct zonelist *zonelist, nodemask_t *nodemask);
8494static inline __attribute__((always_inline)) struct page *
8495__alloc_pages(gfp_t gfp_mask, unsigned int order,
8496 struct zonelist *zonelist)
8497{
8498 return __alloc_pages_nodemask(gfp_mask, order, zonelist, ((void *)0));
8499}
8500static inline __attribute__((always_inline)) struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
8501 unsigned int order)
8502{
8503 if (__builtin_constant_p(((nid < 0))) ? !!((nid < 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/gfp.h", .line = 307, }; ______r = !!((nid < 0)); ______f.miss_hit[______r]++; ______r; }))
8504 nid = numa_node_id();
8505 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
8506}
8507static inline __attribute__((always_inline)) struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
8508 unsigned int order)
8509{
8510 do { (void)(nid < 0 || nid >= (1 << 0)); } while (0);
8511 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
8512}
8513extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
8514extern unsigned long get_zeroed_page(gfp_t gfp_mask);
8515void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
8516void free_pages_exact(void *virt, size_t size);
8517void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
8518extern void __free_pages(struct page *page, unsigned int order);
8519extern void free_pages(unsigned long addr, unsigned int order);
8520extern void free_hot_cold_page(struct page *page, int cold);
8521void page_alloc_init(void);
8522void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
8523void drain_all_pages(void);
8524void drain_local_pages(void *dummy);
8525extern gfp_t gfp_allowed_mask;
8526extern void pm_restrict_gfp_mask(void);
8527extern void pm_restore_gfp_mask(void);
8528enum irqreturn {
8529 IRQ_NONE = (0 << 0),
8530 IRQ_HANDLED = (1 << 0),
8531 IRQ_WAKE_THREAD = (1 << 1),
8532};
8533typedef enum irqreturn irqreturn_t;
8534extern int nr_irqs;
8535extern struct irq_desc *irq_to_desc(unsigned int irq);
8536unsigned int irq_get_next_irq(unsigned int offset);
8537static inline __attribute__((always_inline)) int irq_canonicalize(int irq)
8538{
8539 return ((irq == 2) ? 9 : irq);
8540}
8541extern void irq_ctx_init(int cpu);
8542extern void fixup_irqs(void);
8543extern void irq_force_complete_move(int);
8544extern void (*x86_platform_ipi_callback)(void);
8545extern void native_init_IRQ(void);
8546extern bool handle_irq(unsigned irq, struct pt_regs *regs);
8547extern unsigned int do_IRQ(struct pt_regs *regs);
8548extern unsigned long used_vectors[(((256) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))];
8549extern int vector_used_by_percpu_irq(unsigned int vector);
8550extern void init_ISA_irqs(void);
8551extern __attribute__((section(".data..percpu" ""))) __typeof__(struct pt_regs *) irq_regs;
8552static inline __attribute__((always_inline)) struct pt_regs *get_irq_regs(void)
8553{
8554 return ({ typeof(irq_regs) pfo_ret__; switch (sizeof(irq_regs)) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m" (irq_regs)); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_regs)); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_regs)); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m" (irq_regs)); break; default: __bad_percpu_size(); } pfo_ret__; });
8555}
8556static inline __attribute__((always_inline)) struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
8557{
8558 struct pt_regs *old_regs;
8559 old_regs = get_irq_regs();
8560 do { typedef typeof(irq_regs) pto_T__; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/irq_regs.h", .line = 26, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pto_T__ pto_tmp__; pto_tmp__ = (new_regs); (void)pto_tmp__; } switch (sizeof(irq_regs)) { case 1: asm("mov" "b %1,""%%""fs"":" "%P" "0" : "+m" (irq_regs) : "qi" ((pto_T__)(new_regs))); break; case 2: asm("mov" "w %1,""%%""fs"":" "%P" "0" : "+m" (irq_regs) : "ri" ((pto_T__)(new_regs))); break; case 4: asm("mov" "l %1,""%%""fs"":" "%P" "0" : "+m" (irq_regs) : "ri" ((pto_T__)(new_regs))); break; case 8: asm("mov" "q %1,""%%""fs"":" "%P" "0" : "+m" (irq_regs) : "re" ((pto_T__)(new_regs))); break; default: __bad_percpu_size(); } } while (0);
8561 return old_regs;
8562}
8563struct seq_file;
8564struct irq_desc;
8565struct irq_data;
8566typedef void (*irq_flow_handler_t)(unsigned int irq,
8567 struct irq_desc *desc);
8568typedef void (*irq_preflow_handler_t)(struct irq_data *data);
8569enum {
8570 IRQ_TYPE_NONE = 0x00000000,
8571 IRQ_TYPE_EDGE_RISING = 0x00000001,
8572 IRQ_TYPE_EDGE_FALLING = 0x00000002,
8573 IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING),
8574 IRQ_TYPE_LEVEL_HIGH = 0x00000004,
8575 IRQ_TYPE_LEVEL_LOW = 0x00000008,
8576 IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
8577 IRQ_TYPE_SENSE_MASK = 0x0000000f,
8578 IRQ_TYPE_PROBE = 0x00000010,
8579 IRQ_LEVEL = (1 << 8),
8580 IRQ_PER_CPU = (1 << 9),
8581 IRQ_NOPROBE = (1 << 10),
8582 IRQ_NOREQUEST = (1 << 11),
8583 IRQ_NOAUTOEN = (1 << 12),
8584 IRQ_NO_BALANCING = (1 << 13),
8585 IRQ_MOVE_PCNTXT = (1 << 14),
8586 IRQ_NESTED_THREAD = (1 << 15),
8587 IRQ_NOTHREAD = (1 << 16),
8588};
8589static inline __attribute__((always_inline)) __attribute__((deprecated)) bool CHECK_IRQ_PER_CPU(unsigned int status)
8590{
8591 return status & IRQ_PER_CPU;
8592}
8593enum {
8594 IRQ_SET_MASK_OK = 0,
8595 IRQ_SET_MASK_OK_NOCOPY,
8596};
8597struct msi_desc;
8598struct irq_data {
8599 unsigned int irq;
8600 unsigned int node;
8601 unsigned int state_use_accessors;
8602 struct irq_chip *chip;
8603 void *handler_data;
8604 void *chip_data;
8605 struct msi_desc *msi_desc;
8606 cpumask_var_t affinity;
8607};
8608enum {
8609 IRQD_TRIGGER_MASK = 0xf,
8610 IRQD_SETAFFINITY_PENDING = (1 << 8),
8611 IRQD_NO_BALANCING = (1 << 10),
8612 IRQD_PER_CPU = (1 << 11),
8613 IRQD_AFFINITY_SET = (1 << 12),
8614 IRQD_LEVEL = (1 << 13),
8615 IRQD_WAKEUP_STATE = (1 << 14),
8616 IRQD_MOVE_PCNTXT = (1 << 15),
8617 IRQD_IRQ_DISABLED = (1 << 16),
8618 IRQD_IRQ_MASKED = (1 << 17),
8619 IRQD_IRQ_INPROGRESS = (1 << 18),
8620};
8621static inline __attribute__((always_inline)) bool irqd_is_setaffinity_pending(struct irq_data *d)
8622{
8623 return d->state_use_accessors & IRQD_SETAFFINITY_PENDING;
8624}
8625static inline __attribute__((always_inline)) bool irqd_is_per_cpu(struct irq_data *d)
8626{
8627 return d->state_use_accessors & IRQD_PER_CPU;
8628}
8629static inline __attribute__((always_inline)) bool irqd_can_balance(struct irq_data *d)
8630{
8631 return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING));
8632}
8633static inline __attribute__((always_inline)) bool irqd_affinity_was_set(struct irq_data *d)
8634{
8635 return d->state_use_accessors & IRQD_AFFINITY_SET;
8636}
8637static inline __attribute__((always_inline)) void irqd_mark_affinity_was_set(struct irq_data *d)
8638{
8639 d->state_use_accessors |= IRQD_AFFINITY_SET;
8640}
8641static inline __attribute__((always_inline)) u32 irqd_get_trigger_type(struct irq_data *d)
8642{
8643 return d->state_use_accessors & IRQD_TRIGGER_MASK;
8644}
8645static inline __attribute__((always_inline)) void irqd_set_trigger_type(struct irq_data *d, u32 type)
8646{
8647 d->state_use_accessors &= ~IRQD_TRIGGER_MASK;
8648 d->state_use_accessors |= type & IRQD_TRIGGER_MASK;
8649}
8650static inline __attribute__((always_inline)) bool irqd_is_level_type(struct irq_data *d)
8651{
8652 return d->state_use_accessors & IRQD_LEVEL;
8653}
8654static inline __attribute__((always_inline)) bool irqd_is_wakeup_set(struct irq_data *d)
8655{
8656 return d->state_use_accessors & IRQD_WAKEUP_STATE;
8657}
8658static inline __attribute__((always_inline)) bool irqd_can_move_in_process_context(struct irq_data *d)
8659{
8660 return d->state_use_accessors & IRQD_MOVE_PCNTXT;
8661}
8662static inline __attribute__((always_inline)) bool irqd_irq_disabled(struct irq_data *d)
8663{
8664 return d->state_use_accessors & IRQD_IRQ_DISABLED;
8665}
8666static inline __attribute__((always_inline)) bool irqd_irq_masked(struct irq_data *d)
8667{
8668 return d->state_use_accessors & IRQD_IRQ_MASKED;
8669}
8670static inline __attribute__((always_inline)) bool irqd_irq_inprogress(struct irq_data *d)
8671{
8672 return d->state_use_accessors & IRQD_IRQ_INPROGRESS;
8673}
8674static inline __attribute__((always_inline)) void irqd_set_chained_irq_inprogress(struct irq_data *d)
8675{
8676 d->state_use_accessors |= IRQD_IRQ_INPROGRESS;
8677}
8678static inline __attribute__((always_inline)) void irqd_clr_chained_irq_inprogress(struct irq_data *d)
8679{
8680 d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS;
8681}
8682struct irq_chip {
8683 const char *name;
8684 unsigned int (*irq_startup)(struct irq_data *data);
8685 void (*irq_shutdown)(struct irq_data *data);
8686 void (*irq_enable)(struct irq_data *data);
8687 void (*irq_disable)(struct irq_data *data);
8688 void (*irq_ack)(struct irq_data *data);
8689 void (*irq_mask)(struct irq_data *data);
8690 void (*irq_mask_ack)(struct irq_data *data);
8691 void (*irq_unmask)(struct irq_data *data);
8692 void (*irq_eoi)(struct irq_data *data);
8693 int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
8694 int (*irq_retrigger)(struct irq_data *data);
8695 int (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
8696 int (*irq_set_wake)(struct irq_data *data, unsigned int on);
8697 void (*irq_bus_lock)(struct irq_data *data);
8698 void (*irq_bus_sync_unlock)(struct irq_data *data);
8699 void (*irq_cpu_online)(struct irq_data *data);
8700 void (*irq_cpu_offline)(struct irq_data *data);
8701 void (*irq_suspend)(struct irq_data *data);
8702 void (*irq_resume)(struct irq_data *data);
8703 void (*irq_pm_shutdown)(struct irq_data *data);
8704 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
8705 unsigned long flags;
8706};
8707enum {
8708 IRQCHIP_SET_TYPE_MASKED = (1 << 0),
8709 IRQCHIP_EOI_IF_HANDLED = (1 << 1),
8710 IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
8711 IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
8712};
8713struct irq_affinity_notify;
8714struct proc_dir_entry;
8715struct timer_rand_state;
8716struct irq_desc {
8717 struct irq_data irq_data;
8718 struct timer_rand_state *timer_rand_state;
8719 unsigned int *kstat_irqs;
8720 irq_flow_handler_t handle_irq;
8721 struct irqaction *action;
8722 unsigned int status_use_accessors;
8723 unsigned int core_internal_state__do_not_mess_with_it;
8724 unsigned int depth;
8725 unsigned int wake_depth;
8726 unsigned int irq_count;
8727 unsigned long last_unhandled;
8728 unsigned int irqs_unhandled;
8729 raw_spinlock_t lock;
8730 const struct cpumask *affinity_hint;
8731 struct irq_affinity_notify *affinity_notify;
8732 cpumask_var_t pending_mask;
8733 unsigned long threads_oneshot;
8734 atomic_t threads_active;
8735 wait_queue_head_t wait_for_threads;
8736 struct proc_dir_entry *dir;
8737 const char *name;
8738} __attribute__((__aligned__(1 << (6))));
8739extern struct irq_desc irq_desc[((32 * 8) < ( 32 * 64 ) ? (256 + (32 * 8)) : (256 + ( 32 * 64 )))];
8740static inline __attribute__((always_inline)) struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
8741{
8742 return &desc->irq_data;
8743}
8744static inline __attribute__((always_inline)) struct irq_chip *irq_desc_get_chip(struct irq_desc *desc)
8745{
8746 return desc->irq_data.chip;
8747}
8748static inline __attribute__((always_inline)) void *irq_desc_get_chip_data(struct irq_desc *desc)
8749{
8750 return desc->irq_data.chip_data;
8751}
8752static inline __attribute__((always_inline)) void *irq_desc_get_handler_data(struct irq_desc *desc)
8753{
8754 return desc->irq_data.handler_data;
8755}
8756static inline __attribute__((always_inline)) struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
8757{
8758 return desc->irq_data.msi_desc;
8759}
8760static inline __attribute__((always_inline)) void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
8761{
8762 desc->handle_irq(irq, desc);
8763}
8764int generic_handle_irq(unsigned int irq);
8765static inline __attribute__((always_inline)) int irq_has_action(unsigned int irq)
8766{
8767 struct irq_desc *desc = irq_to_desc(irq);
8768 return desc->action != ((void *)0);
8769}
8770static inline __attribute__((always_inline)) void __irq_set_handler_locked(unsigned int irq,
8771 irq_flow_handler_t handler)
8772{
8773 struct irq_desc *desc;
8774 desc = irq_to_desc(irq);
8775 desc->handle_irq = handler;
8776}
8777static inline __attribute__((always_inline)) void
8778__irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip,
8779 irq_flow_handler_t handler, const char *name)
8780{
8781 struct irq_desc *desc;
8782 desc = irq_to_desc(irq);
8783 irq_desc_get_irq_data(desc)->chip = chip;
8784 desc->handle_irq = handler;
8785 desc->name = name;
8786}
8787static inline __attribute__((always_inline)) int irq_balancing_disabled(unsigned int irq)
8788{
8789 struct irq_desc *desc;
8790 desc = irq_to_desc(irq);
8791 return desc->status_use_accessors & (IRQ_PER_CPU | IRQ_NO_BALANCING);
8792}
8793static inline __attribute__((always_inline)) void
8794irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
8795{
8796 struct irq_desc *desc = irq_to_desc(irq);
8797 if (__builtin_constant_p(((desc))) ? !!((desc)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/irqdesc.h", .line = 159, }; ______r = !!((desc)); ______f.miss_hit[______r]++; ______r; }))
8798 lockdep_init_map(&(&desc->lock)->dep_map, "class", class, 0);
8799}
8800struct proc_dir_entry;
8801struct pt_regs;
8802struct notifier_block;
8803void create_prof_cpu_mask(struct proc_dir_entry *de);
8804int create_proc_profile(void);
8805enum profile_type {
8806 PROFILE_TASK_EXIT,
8807 PROFILE_MUNMAP
8808};
8809extern int prof_on __attribute__((__section__(".data..read_mostly")));
8810int profile_init(void);
8811int profile_setup(char *str);
8812void profile_tick(int type);
8813void profile_hits(int type, void *ip, unsigned int nr_hits);
8814static inline __attribute__((always_inline)) void profile_hit(int type, void *ip)
8815{
8816 if (__builtin_constant_p((((__builtin_constant_p(prof_on == type) ? !!(prof_on == type) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/profile.h", .line = 61, }; ______r = __builtin_expect(!!(prof_on == type), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(prof_on == type) ? !!(prof_on == type) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/profile.h", .line = 61, }; ______r = __builtin_expect(!!(prof_on == type), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/profile.h", .line = 61, }; ______r = !!(((__builtin_constant_p(prof_on == type) ? !!(prof_on == type) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/profile.h", .line = 61, }; ______r = __builtin_expect(!!(prof_on == type), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
8817 profile_hits(type, ip, 1);
8818}
8819struct task_struct;
8820struct mm_struct;
8821void profile_task_exit(struct task_struct * task);
8822int profile_handoff_task(struct task_struct * task);
8823void profile_munmap(unsigned long addr);
8824int task_handoff_register(struct notifier_block * n);
8825int task_handoff_unregister(struct notifier_block * n);
8826int profile_event_register(enum profile_type, struct notifier_block * n);
8827int profile_event_unregister(enum profile_type, struct notifier_block * n);
8828int register_timer_hook(int (*hook)(struct pt_regs *));
8829void unregister_timer_hook(int (*hook)(struct pt_regs *));
8830struct pt_regs;
8831extern char _text[], _stext[], _etext[];
8832extern char _data[], _sdata[], _edata[];
8833extern char __bss_start[], __bss_stop[];
8834extern char __init_begin[], __init_end[];
8835extern char _sinittext[], _einittext[];
8836extern char _end[];
8837extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
8838extern char __kprobes_text_start[], __kprobes_text_end[];
8839extern char __entry_text_start[], __entry_text_end[];
8840extern char __initdata_begin[], __initdata_end[];
8841extern char __start_rodata[], __end_rodata[];
8842extern char __ctors_start[], __ctors_end[];
8843static inline __attribute__((always_inline)) int arch_is_kernel_text(unsigned long addr)
8844{
8845 return 0;
8846}
8847static inline __attribute__((always_inline)) int arch_is_kernel_data(unsigned long addr)
8848{
8849 return 0;
8850}
8851struct exception_table_entry {
8852 unsigned long insn, fixup;
8853};
8854extern int fixup_exception(struct pt_regs *regs);
8855extern int __get_user_1(void);
8856extern int __get_user_2(void);
8857extern int __get_user_4(void);
8858extern int __get_user_8(void);
8859extern int __get_user_bad(void);
8860extern void __put_user_bad(void);
8861extern void __put_user_1(void);
8862extern void __put_user_2(void);
8863extern void __put_user_4(void);
8864extern void __put_user_8(void);
8865struct __large_struct { unsigned long buf[100]; };
8866extern struct movsl_mask {
8867 int mask;
8868} __attribute__((__aligned__((1 << (6))))) movsl_mask;
8869unsigned long __attribute__((warn_unused_result)) __copy_to_user_ll
8870 (void *to, const void *from, unsigned long n);
8871unsigned long __attribute__((warn_unused_result)) __copy_from_user_ll
8872 (void *to, const void *from, unsigned long n);
8873unsigned long __attribute__((warn_unused_result)) __copy_from_user_ll_nozero
8874 (void *to, const void *from, unsigned long n);
8875unsigned long __attribute__((warn_unused_result)) __copy_from_user_ll_nocache
8876 (void *to, const void *from, unsigned long n);
8877unsigned long __attribute__((warn_unused_result)) __copy_from_user_ll_nocache_nozero
8878 (void *to, const void *from, unsigned long n);
8879static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result))
8880__copy_to_user_inatomic(void *to, const void *from, unsigned long n)
8881{
8882 if (__builtin_constant_p(((__builtin_constant_p(n)))) ? !!((__builtin_constant_p(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 46, }; ______r = !!((__builtin_constant_p(n))); ______f.miss_hit[______r]++; ______r; })) {
8883 unsigned long ret;
8884 switch (n) {
8885 case 1:
8886 do { ret = 0; (void)0; switch (1) { case 1: asm volatile("1: mov""b"" %""b""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "iq"(*(u8 *)from), "m" ((*(struct __large_struct *)((u8 *)to))), "i" (1), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %""w""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "ir"(*(u8 *)from), "m" ((*(struct __large_struct *)((u8 *)to))), "i" (1), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %""k""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "ir"(*(u8 *)from), "m" ((*(struct __large_struct *)((u8 *)to))), "i" (1), "0" (ret)); break; case 8: asm volatile("1: movl %%eax,0(%2)\n" "2: movl %%edx,4(%2)\n" "3:\n" ".section .fixup,\"ax\"\n" "4: movl %3,%0\n" " jmp 3b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "4b" "\n" " .previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "2b" "," "4b" "\n" " .previous\n" : "=r" (ret) : "A" ((__typeof__(*(u8 *)to))(*(u8 *)from)), "r" ((u8 *)to), "i" (1), "0" (ret)); break; default: __put_user_bad(); } } while (0)
8887 ;
8888 return ret;
8889 case 2:
8890 do { ret = 0; (void)0; switch (2) { case 1: asm volatile("1: mov""b"" %""b""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "iq"(*(u16 *)from), "m" ((*(struct __large_struct *)((u16 *)to))), "i" (2), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %""w""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "ir"(*(u16 *)from), "m" ((*(struct __large_struct *)((u16 *)to))), "i" (2), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %""k""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "ir"(*(u16 *)from), "m" ((*(struct __large_struct *)((u16 *)to))), "i" (2), "0" (ret)); break; case 8: asm volatile("1: movl %%eax,0(%2)\n" "2: movl %%edx,4(%2)\n" "3:\n" ".section .fixup,\"ax\"\n" "4: movl %3,%0\n" " jmp 3b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "4b" "\n" " .previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "2b" "," "4b" "\n" " .previous\n" : "=r" (ret) : "A" ((__typeof__(*(u16 *)to))(*(u16 *)from)), "r" ((u16 *)to), "i" (2), "0" (ret)); break; default: __put_user_bad(); } } while (0)
8891 ;
8892 return ret;
8893 case 4:
8894 do { ret = 0; (void)0; switch (4) { case 1: asm volatile("1: mov""b"" %""b""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "iq"(*(u32 *)from), "m" ((*(struct __large_struct *)((u32 *)to))), "i" (4), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %""w""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "ir"(*(u32 *)from), "m" ((*(struct __large_struct *)((u32 *)to))), "i" (4), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %""k""1,%2\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r"(ret) : "ir"(*(u32 *)from), "m" ((*(struct __large_struct *)((u32 *)to))), "i" (4), "0" (ret)); break; case 8: asm volatile("1: movl %%eax,0(%2)\n" "2: movl %%edx,4(%2)\n" "3:\n" ".section .fixup,\"ax\"\n" "4: movl %3,%0\n" " jmp 3b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "4b" "\n" " .previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "2b" "," "4b" "\n" " .previous\n" : "=r" (ret) : "A" ((__typeof__(*(u32 *)to))(*(u32 *)from)), "r" ((u32 *)to), "i" (4), "0" (ret)); break; default: __put_user_bad(); } } while (0)
8895 ;
8896 return ret;
8897 }
8898 }
8899 return __copy_to_user_ll(to, from, n);
8900}
8901static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result))
8902__copy_to_user(void *to, const void *from, unsigned long n)
8903{
8904 might_fault();
8905 return __copy_to_user_inatomic(to, from, n);
8906}
8907static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long
8908__copy_from_user_inatomic(void *to, const void *from, unsigned long n)
8909{
8910 if (__builtin_constant_p(((__builtin_constant_p(n)))) ? !!((__builtin_constant_p(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 96, }; ______r = !!((__builtin_constant_p(n))); ______f.miss_hit[______r]++; ______r; })) {
8911 unsigned long ret;
8912 switch (n) {
8913 case 1:
8914 do { ret = 0; (void)0; switch (1) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 8: (*(u8 *)to) = __get_user_bad(); break; default: (*(u8 *)to) = __get_user_bad(); } } while (0);
8915 return ret;
8916 case 2:
8917 do { ret = 0; (void)0; switch (2) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 8: (*(u16 *)to) = __get_user_bad(); break; default: (*(u16 *)to) = __get_user_bad(); } } while (0);
8918 return ret;
8919 case 4:
8920 do { ret = 0; (void)0; switch (4) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 8: (*(u32 *)to) = __get_user_bad(); break; default: (*(u32 *)to) = __get_user_bad(); } } while (0);
8921 return ret;
8922 }
8923 }
8924 return __copy_from_user_ll_nozero(to, from, n);
8925}
8926static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long
8927__copy_from_user(void *to, const void *from, unsigned long n)
8928{
8929 might_fault();
8930 if (__builtin_constant_p(((__builtin_constant_p(n)))) ? !!((__builtin_constant_p(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 140, }; ______r = !!((__builtin_constant_p(n))); ______f.miss_hit[______r]++; ______r; })) {
8931 unsigned long ret;
8932 switch (n) {
8933 case 1:
8934 do { ret = 0; (void)0; switch (1) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 8: (*(u8 *)to) = __get_user_bad(); break; default: (*(u8 *)to) = __get_user_bad(); } } while (0);
8935 return ret;
8936 case 2:
8937 do { ret = 0; (void)0; switch (2) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 8: (*(u16 *)to) = __get_user_bad(); break; default: (*(u16 *)to) = __get_user_bad(); } } while (0);
8938 return ret;
8939 case 4:
8940 do { ret = 0; (void)0; switch (4) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 8: (*(u32 *)to) = __get_user_bad(); break; default: (*(u32 *)to) = __get_user_bad(); } } while (0);
8941 return ret;
8942 }
8943 }
8944 return __copy_from_user_ll(to, from, n);
8945}
8946static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long __copy_from_user_nocache(void *to,
8947 const void *from, unsigned long n)
8948{
8949 might_fault();
8950 if (__builtin_constant_p(((__builtin_constant_p(n)))) ? !!((__builtin_constant_p(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 162, }; ______r = !!((__builtin_constant_p(n))); ______f.miss_hit[______r]++; ______r; })) {
8951 unsigned long ret;
8952 switch (n) {
8953 case 1:
8954 do { ret = 0; (void)0; switch (1) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u8 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (1), "0" (ret)); break; case 8: (*(u8 *)to) = __get_user_bad(); break; default: (*(u8 *)to) = __get_user_bad(); } } while (0);
8955 return ret;
8956 case 2:
8957 do { ret = 0; (void)0; switch (2) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u16 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (2), "0" (ret)); break; case 8: (*(u16 *)to) = __get_user_bad(); break; default: (*(u16 *)to) = __get_user_bad(); } } while (0);
8958 return ret;
8959 case 4:
8960 do { ret = 0; (void)0; switch (4) { case 1: asm volatile("1: mov""b"" %2,%""b""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""b"" %""b""1,%""b""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=q"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 2: asm volatile("1: mov""w"" %2,%""w""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""w"" %""w""1,%""w""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 4: asm volatile("1: mov""l"" %2,%""k""1\n" "2:\n" ".section .fixup,\"ax\"\n" "3: mov %3,%0\n" " xor""l"" %""k""1,%""k""1\n" " jmp 2b\n" ".previous\n" " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n" : "=r" (ret), "=r"(*(u32 *)to) : "m" ((*(struct __large_struct *)(from))), "i" (4), "0" (ret)); break; case 8: (*(u32 *)to) = __get_user_bad(); break; default: (*(u32 *)to) = __get_user_bad(); } } while (0);
8961 return ret;
8962 }
8963 }
8964 return __copy_from_user_ll_nocache(to, from, n);
8965}
8966static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long
8967__copy_from_user_inatomic_nocache(void *to, const void *from,
8968 unsigned long n)
8969{
8970 return __copy_from_user_ll_nocache_nozero(to, from, n);
8971}
8972unsigned long __attribute__((warn_unused_result)) copy_to_user(void *to,
8973 const void *from, unsigned long n);
8974unsigned long __attribute__((warn_unused_result)) _copy_from_user(void *to,
8975 const void *from,
8976 unsigned long n);
8977extern void copy_from_user_overflow(void)
8978 __attribute__((warning("copy_from_user() buffer size is not provably correct")))
8979;
8980static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result)) copy_from_user(void *to,
8981 const void *from,
8982 unsigned long n)
8983{
8984 int sz = __builtin_object_size(to, 0);
8985 if (__builtin_constant_p((((__builtin_constant_p(sz == -1 || sz >= n) ? !!(sz == -1 || sz >= n) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 208, }; ______r = __builtin_expect(!!(sz == -1 || sz >= n), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(sz == -1 || sz >= n) ? !!(sz == -1 || sz >= n) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 208, }; ______r = __builtin_expect(!!(sz == -1 || sz >= n), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 208, }; ______r = !!(((__builtin_constant_p(sz == -1 || sz >= n) ? !!(sz == -1 || sz >= n) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/uaccess_32.h", .line = 208, }; ______r = __builtin_expect(!!(sz == -1 || sz >= n), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
8986 n = _copy_from_user(to, from, n);
8987 else
8988 copy_from_user_overflow();
8989 return n;
8990}
8991long __attribute__((warn_unused_result)) strncpy_from_user(char *dst, const char *src,
8992 long count);
8993long __attribute__((warn_unused_result)) __strncpy_from_user(char *dst,
8994 const char *src, long count);
8995long strnlen_user(const char *str, long n);
8996unsigned long __attribute__((warn_unused_result)) clear_user(void *mem, unsigned long len);
8997unsigned long __attribute__((warn_unused_result)) __clear_user(void *mem, unsigned long len);
8998extern char __brk_base[], __brk_limit[];
8999extern struct exception_table_entry __stop___ex_table[];
9000extern void apic_timer_interrupt(void);
9001extern void x86_platform_ipi(void);
9002extern void error_interrupt(void);
9003extern void irq_work_interrupt(void);
9004extern void spurious_interrupt(void);
9005extern void thermal_interrupt(void);
9006extern void reschedule_interrupt(void);
9007extern void mce_self_interrupt(void);
9008extern void invalidate_interrupt(void);
9009extern void invalidate_interrupt0(void);
9010extern void invalidate_interrupt1(void);
9011extern void invalidate_interrupt2(void);
9012extern void invalidate_interrupt3(void);
9013extern void invalidate_interrupt4(void);
9014extern void invalidate_interrupt5(void);
9015extern void invalidate_interrupt6(void);
9016extern void invalidate_interrupt7(void);
9017extern void invalidate_interrupt8(void);
9018extern void invalidate_interrupt9(void);
9019extern void invalidate_interrupt10(void);
9020extern void invalidate_interrupt11(void);
9021extern void invalidate_interrupt12(void);
9022extern void invalidate_interrupt13(void);
9023extern void invalidate_interrupt14(void);
9024extern void invalidate_interrupt15(void);
9025extern void invalidate_interrupt16(void);
9026extern void invalidate_interrupt17(void);
9027extern void invalidate_interrupt18(void);
9028extern void invalidate_interrupt19(void);
9029extern void invalidate_interrupt20(void);
9030extern void invalidate_interrupt21(void);
9031extern void invalidate_interrupt22(void);
9032extern void invalidate_interrupt23(void);
9033extern void invalidate_interrupt24(void);
9034extern void invalidate_interrupt25(void);
9035extern void invalidate_interrupt26(void);
9036extern void invalidate_interrupt27(void);
9037extern void invalidate_interrupt28(void);
9038extern void invalidate_interrupt29(void);
9039extern void invalidate_interrupt30(void);
9040extern void invalidate_interrupt31(void);
9041extern void irq_move_cleanup_interrupt(void);
9042extern void reboot_interrupt(void);
9043extern void threshold_interrupt(void);
9044extern void call_function_interrupt(void);
9045extern void call_function_single_interrupt(void);
9046extern unsigned long io_apic_irqs;
9047extern void init_VISWS_APIC_irqs(void);
9048extern void setup_IO_APIC(void);
9049extern void disable_IO_APIC(void);
9050struct io_apic_irq_attr {
9051 int ioapic;
9052 int ioapic_pin;
9053 int trigger;
9054 int polarity;
9055};
9056static inline __attribute__((always_inline)) void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
9057 int ioapic, int ioapic_pin,
9058 int trigger, int polarity)
9059{
9060 irq_attr->ioapic = ioapic;
9061 irq_attr->ioapic_pin = ioapic_pin;
9062 irq_attr->trigger = trigger;
9063 irq_attr->polarity = polarity;
9064}
9065struct irq_2_iommu {
9066 struct intel_iommu *iommu;
9067 u16 irte_index;
9068 u16 sub_handle;
9069 u8 irte_mask;
9070};
9071struct irq_cfg {
9072 struct irq_pin_list *irq_2_pin;
9073 cpumask_var_t domain;
9074 cpumask_var_t old_domain;
9075 u8 vector;
9076 u8 move_in_progress : 1;
9077};
9078extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *);
9079extern void send_cleanup_vector(struct irq_cfg *);
9080struct irq_data;
9081int __ioapic_set_affinity(struct irq_data *, const struct cpumask *,
9082 unsigned int *dest_id);
9083extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr);
9084extern void setup_ioapic_dest(void);
9085extern void enable_IO_APIC(void);
9086extern atomic_t irq_err_count;
9087extern atomic_t irq_mis_count;
9088extern void eisa_set_level_irq(unsigned int irq);
9089extern void smp_apic_timer_interrupt(struct pt_regs *);
9090extern void smp_spurious_interrupt(struct pt_regs *);
9091extern void smp_x86_platform_ipi(struct pt_regs *);
9092extern void smp_error_interrupt(struct pt_regs *);
9093extern __attribute__((regparm(0))) void smp_irq_move_cleanup_interrupt(void);
9094extern void smp_reschedule_interrupt(struct pt_regs *);
9095extern void smp_call_function_interrupt(struct pt_regs *);
9096extern void smp_call_function_single_interrupt(struct pt_regs *);
9097extern void smp_invalidate_interrupt(struct pt_regs *);
9098extern void (*__attribute__ ((__section__(".init.rodata"))) interrupt[256 -0x20])(void);
9099typedef int vector_irq_t[256];
9100extern __attribute__((section(".data..percpu" ""))) __typeof__(vector_irq_t) vector_irq;
9101extern void setup_vector_irq(int cpu);
9102extern void lock_vector_lock(void);
9103extern void unlock_vector_lock(void);
9104extern void __setup_vector_irq(int cpu);
9105struct irqaction;
9106extern int setup_irq(unsigned int irq, struct irqaction *new);
9107extern void remove_irq(unsigned int irq, struct irqaction *act);
9108extern void irq_cpu_online(void);
9109extern void irq_cpu_offline(void);
9110extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask);
9111void irq_move_irq(struct irq_data *data);
9112void irq_move_masked_irq(struct irq_data *data);
9113extern int no_irq_affinity;
9114extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
9115extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
9116extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
9117extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
9118extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
9119extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
9120extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
9121extern void handle_nested_irq(unsigned int irq);
9122extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
9123 irqreturn_t action_ret);
9124extern int noirqdebug_setup(char *str);
9125extern int can_request_irq(unsigned int irq, unsigned long irqflags);
9126extern struct irq_chip no_irq_chip;
9127extern struct irq_chip dummy_irq_chip;
9128extern void
9129irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
9130 irq_flow_handler_t handle, const char *name);
9131static inline __attribute__((always_inline)) void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
9132 irq_flow_handler_t handle)
9133{
9134 irq_set_chip_and_handler_name(irq, chip, handle, ((void *)0));
9135}
9136extern void
9137__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
9138 const char *name);
9139static inline __attribute__((always_inline)) void
9140irq_set_handler(unsigned int irq, irq_flow_handler_t handle)
9141{
9142 __irq_set_handler(irq, handle, 0, ((void *)0));
9143}
9144static inline __attribute__((always_inline)) void
9145irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
9146{
9147 __irq_set_handler(irq, handle, 1, ((void *)0));
9148}
9149void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
9150static inline __attribute__((always_inline)) void irq_set_status_flags(unsigned int irq, unsigned long set)
9151{
9152 irq_modify_status(irq, 0, set);
9153}
9154static inline __attribute__((always_inline)) void irq_clear_status_flags(unsigned int irq, unsigned long clr)
9155{
9156 irq_modify_status(irq, clr, 0);
9157}
9158static inline __attribute__((always_inline)) void irq_set_noprobe(unsigned int irq)
9159{
9160 irq_modify_status(irq, 0, IRQ_NOPROBE);
9161}
9162static inline __attribute__((always_inline)) void irq_set_probe(unsigned int irq)
9163{
9164 irq_modify_status(irq, IRQ_NOPROBE, 0);
9165}
9166static inline __attribute__((always_inline)) void irq_set_nothread(unsigned int irq)
9167{
9168 irq_modify_status(irq, 0, IRQ_NOTHREAD);
9169}
9170static inline __attribute__((always_inline)) void irq_set_thread(unsigned int irq)
9171{
9172 irq_modify_status(irq, IRQ_NOTHREAD, 0);
9173}
9174static inline __attribute__((always_inline)) void irq_set_nested_thread(unsigned int irq, bool nest)
9175{
9176 if (__builtin_constant_p(((nest))) ? !!((nest)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/irq.h", .line = 476, }; ______r = !!((nest)); ______f.miss_hit[______r]++; ______r; }))
9177 irq_set_status_flags(irq, IRQ_NESTED_THREAD);
9178 else
9179 irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
9180}
9181extern unsigned int create_irq_nr(unsigned int irq_want, int node);
9182extern int create_irq(void);
9183extern void destroy_irq(unsigned int irq);
9184extern void dynamic_irq_cleanup(unsigned int irq);
9185static inline __attribute__((always_inline)) void dynamic_irq_init(unsigned int irq)
9186{
9187 dynamic_irq_cleanup(irq);
9188}
9189extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
9190extern int irq_set_handler_data(unsigned int irq, void *data);
9191extern int irq_set_chip_data(unsigned int irq, void *data);
9192extern int irq_set_irq_type(unsigned int irq, unsigned int type);
9193extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
9194extern struct irq_data *irq_get_irq_data(unsigned int irq);
9195static inline __attribute__((always_inline)) struct irq_chip *irq_get_chip(unsigned int irq)
9196{
9197 struct irq_data *d = irq_get_irq_data(irq);
9198 return d ? d->chip : ((void *)0);
9199}
9200static inline __attribute__((always_inline)) struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
9201{
9202 return d->chip;
9203}
9204static inline __attribute__((always_inline)) void *irq_get_chip_data(unsigned int irq)
9205{
9206 struct irq_data *d = irq_get_irq_data(irq);
9207 return d ? d->chip_data : ((void *)0);
9208}
9209static inline __attribute__((always_inline)) void *irq_data_get_irq_chip_data(struct irq_data *d)
9210{
9211 return d->chip_data;
9212}
9213static inline __attribute__((always_inline)) void *irq_get_handler_data(unsigned int irq)
9214{
9215 struct irq_data *d = irq_get_irq_data(irq);
9216 return d ? d->handler_data : ((void *)0);
9217}
9218static inline __attribute__((always_inline)) void *irq_data_get_irq_handler_data(struct irq_data *d)
9219{
9220 return d->handler_data;
9221}
9222static inline __attribute__((always_inline)) struct msi_desc *irq_get_msi_desc(unsigned int irq)
9223{
9224 struct irq_data *d = irq_get_irq_data(irq);
9225 return d ? d->msi_desc : ((void *)0);
9226}
9227static inline __attribute__((always_inline)) struct msi_desc *irq_data_get_msi(struct irq_data *d)
9228{
9229 return d->msi_desc;
9230}
9231int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node);
9232void irq_free_descs(unsigned int irq, unsigned int cnt);
9233int irq_reserve_irqs(unsigned int from, unsigned int cnt);
9234static inline __attribute__((always_inline)) int irq_alloc_desc(int node)
9235{
9236 return irq_alloc_descs(-1, 0, 1, node);
9237}
9238static inline __attribute__((always_inline)) int irq_alloc_desc_at(unsigned int at, int node)
9239{
9240 return irq_alloc_descs(at, at, 1, node);
9241}
9242static inline __attribute__((always_inline)) int irq_alloc_desc_from(unsigned int from, int node)
9243{
9244 return irq_alloc_descs(-1, from, 1, node);
9245}
9246static inline __attribute__((always_inline)) void irq_free_desc(unsigned int irq)
9247{
9248 irq_free_descs(irq, 1);
9249}
9250static inline __attribute__((always_inline)) int irq_reserve_irq(unsigned int irq)
9251{
9252 return irq_reserve_irqs(irq, 1);
9253}
9254struct irq_chip_regs {
9255 unsigned long enable;
9256 unsigned long disable;
9257 unsigned long mask;
9258 unsigned long ack;
9259 unsigned long eoi;
9260 unsigned long type;
9261 unsigned long polarity;
9262};
9263struct irq_chip_type {
9264 struct irq_chip chip;
9265 struct irq_chip_regs regs;
9266 irq_flow_handler_t handler;
9267 u32 type;
9268};
9269struct irq_chip_generic {
9270 raw_spinlock_t lock;
9271 void *reg_base;
9272 unsigned int irq_base;
9273 unsigned int irq_cnt;
9274 u32 mask_cache;
9275 u32 type_cache;
9276 u32 polarity_cache;
9277 u32 wake_enabled;
9278 u32 wake_active;
9279 unsigned int num_ct;
9280 void *private;
9281 struct list_head list;
9282 struct irq_chip_type chip_types[0];
9283};
9284enum irq_gc_flags {
9285 IRQ_GC_INIT_MASK_CACHE = 1 << 0,
9286 IRQ_GC_INIT_NESTED_LOCK = 1 << 1,
9287};
9288void irq_gc_noop(struct irq_data *d);
9289void irq_gc_mask_disable_reg(struct irq_data *d);
9290void irq_gc_mask_set_bit(struct irq_data *d);
9291void irq_gc_mask_clr_bit(struct irq_data *d);
9292void irq_gc_unmask_enable_reg(struct irq_data *d);
9293void irq_gc_ack_set_bit(struct irq_data *d);
9294void irq_gc_ack_clr_bit(struct irq_data *d);
9295void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
9296void irq_gc_eoi(struct irq_data *d);
9297int irq_gc_set_wake(struct irq_data *d, unsigned int on);
9298struct irq_chip_generic *
9299irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
9300 void *reg_base, irq_flow_handler_t handler);
9301void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
9302 enum irq_gc_flags flags, unsigned int clr,
9303 unsigned int set);
9304int irq_setup_alt_chip(struct irq_data *d, unsigned int type);
9305void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
9306 unsigned int clr, unsigned int set);
9307static inline __attribute__((always_inline)) struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
9308{
9309 return ({ const typeof( ((struct irq_chip_type *)0)->chip ) *__mptr = (d->chip); (struct irq_chip_type *)( (char *)__mptr - __builtin_offsetof(struct irq_chip_type,chip) );});
9310}
9311static inline __attribute__((always_inline)) void irq_gc_lock(struct irq_chip_generic *gc)
9312{
9313 _raw_spin_lock(&gc->lock);
9314}
9315static inline __attribute__((always_inline)) void irq_gc_unlock(struct irq_chip_generic *gc)
9316{
9317 _raw_spin_unlock(&gc->lock);
9318}
9319typedef struct {
9320 unsigned int __softirq_pending;
9321 unsigned int __nmi_count;
9322 unsigned int irq0_irqs;
9323 unsigned int apic_timer_irqs;
9324 unsigned int irq_spurious_count;
9325 unsigned int x86_platform_ipis;
9326 unsigned int apic_perf_irqs;
9327 unsigned int apic_irq_work_irqs;
9328 unsigned int irq_resched_count;
9329 unsigned int irq_call_count;
9330 unsigned int irq_tlb_count;
9331 unsigned int irq_thermal_count;
9332 unsigned int irq_threshold_count;
9333} __attribute__((__aligned__((1 << (6))))) irq_cpustat_t;
9334extern __attribute__((section(".data..percpu" ""))) __typeof__(irq_cpustat_t) irq_stat __attribute__((__aligned__((1 << (6)))));
9335extern void ack_bad_irq(unsigned int irq);
9336extern u64 arch_irq_stat_cpu(unsigned int cpu);
9337extern u64 arch_irq_stat(void);
9338extern void synchronize_irq(unsigned int irq);
9339struct task_struct;
9340extern void account_system_vtime(struct task_struct *tsk);
9341extern void irq_enter(void);
9342extern void irq_exit(void);
9343struct stat {
9344 unsigned long st_dev;
9345 unsigned long st_ino;
9346 unsigned short st_mode;
9347 unsigned short st_nlink;
9348 unsigned short st_uid;
9349 unsigned short st_gid;
9350 unsigned long st_rdev;
9351 unsigned long st_size;
9352 unsigned long st_blksize;
9353 unsigned long st_blocks;
9354 unsigned long st_atime;
9355 unsigned long st_atime_nsec;
9356 unsigned long st_mtime;
9357 unsigned long st_mtime_nsec;
9358 unsigned long st_ctime;
9359 unsigned long st_ctime_nsec;
9360 unsigned long __unused4;
9361 unsigned long __unused5;
9362};
9363struct stat64 {
9364 unsigned long long st_dev;
9365 unsigned char __pad0[4];
9366 unsigned long __st_ino;
9367 unsigned int st_mode;
9368 unsigned int st_nlink;
9369 unsigned long st_uid;
9370 unsigned long st_gid;
9371 unsigned long long st_rdev;
9372 unsigned char __pad3[4];
9373 long long st_size;
9374 unsigned long st_blksize;
9375 unsigned long long st_blocks;
9376 unsigned long st_atime;
9377 unsigned long st_atime_nsec;
9378 unsigned long st_mtime;
9379 unsigned int st_mtime_nsec;
9380 unsigned long st_ctime;
9381 unsigned long st_ctime_nsec;
9382 unsigned long long st_ino;
9383};
9384struct __old_kernel_stat {
9385 unsigned short st_dev;
9386 unsigned short st_ino;
9387 unsigned short st_mode;
9388 unsigned short st_nlink;
9389 unsigned short st_uid;
9390 unsigned short st_gid;
9391 unsigned short st_rdev;
9392 unsigned long st_size;
9393 unsigned long st_atime;
9394 unsigned long st_mtime;
9395 unsigned long st_ctime;
9396};
9397struct kstat {
9398 u64 ino;
9399 dev_t dev;
9400 umode_t mode;
9401 unsigned int nlink;
9402 uid_t uid;
9403 gid_t gid;
9404 dev_t rdev;
9405 loff_t size;
9406 struct timespec atime;
9407 struct timespec mtime;
9408 struct timespec ctime;
9409 unsigned long blksize;
9410 unsigned long long blocks;
9411};
9412struct completion;
9413struct __sysctl_args {
9414 int *name;
9415 int nlen;
9416 void *oldval;
9417 size_t *oldlenp;
9418 void *newval;
9419 size_t newlen;
9420 unsigned long __unused[4];
9421};
9422enum
9423{
9424 CTL_KERN=1,
9425 CTL_VM=2,
9426 CTL_NET=3,
9427 CTL_PROC=4,
9428 CTL_FS=5,
9429 CTL_DEBUG=6,
9430 CTL_DEV=7,
9431 CTL_BUS=8,
9432 CTL_ABI=9,
9433 CTL_CPU=10,
9434 CTL_ARLAN=254,
9435 CTL_S390DBF=5677,
9436 CTL_SUNRPC=7249,
9437 CTL_PM=9899,
9438 CTL_FRV=9898,
9439};
9440enum
9441{
9442 CTL_BUS_ISA=1
9443};
9444enum
9445{
9446 INOTIFY_MAX_USER_INSTANCES=1,
9447 INOTIFY_MAX_USER_WATCHES=2,
9448 INOTIFY_MAX_QUEUED_EVENTS=3
9449};
9450enum
9451{
9452 KERN_OSTYPE=1,
9453 KERN_OSRELEASE=2,
9454 KERN_OSREV=3,
9455 KERN_VERSION=4,
9456 KERN_SECUREMASK=5,
9457 KERN_PROF=6,
9458 KERN_NODENAME=7,
9459 KERN_DOMAINNAME=8,
9460 KERN_PANIC=15,
9461 KERN_REALROOTDEV=16,
9462 KERN_SPARC_REBOOT=21,
9463 KERN_CTLALTDEL=22,
9464 KERN_PRINTK=23,
9465 KERN_NAMETRANS=24,
9466 KERN_PPC_HTABRECLAIM=25,
9467 KERN_PPC_ZEROPAGED=26,
9468 KERN_PPC_POWERSAVE_NAP=27,
9469 KERN_MODPROBE=28,
9470 KERN_SG_BIG_BUFF=29,
9471 KERN_ACCT=30,
9472 KERN_PPC_L2CR=31,
9473 KERN_RTSIGNR=32,
9474 KERN_RTSIGMAX=33,
9475 KERN_SHMMAX=34,
9476 KERN_MSGMAX=35,
9477 KERN_MSGMNB=36,
9478 KERN_MSGPOOL=37,
9479 KERN_SYSRQ=38,
9480 KERN_MAX_THREADS=39,
9481 KERN_RANDOM=40,
9482 KERN_SHMALL=41,
9483 KERN_MSGMNI=42,
9484 KERN_SEM=43,
9485 KERN_SPARC_STOP_A=44,
9486 KERN_SHMMNI=45,
9487 KERN_OVERFLOWUID=46,
9488 KERN_OVERFLOWGID=47,
9489 KERN_SHMPATH=48,
9490 KERN_HOTPLUG=49,
9491 KERN_IEEE_EMULATION_WARNINGS=50,
9492 KERN_S390_USER_DEBUG_LOGGING=51,
9493 KERN_CORE_USES_PID=52,
9494 KERN_TAINTED=53,
9495 KERN_CADPID=54,
9496 KERN_PIDMAX=55,
9497 KERN_CORE_PATTERN=56,
9498 KERN_PANIC_ON_OOPS=57,
9499 KERN_HPPA_PWRSW=58,
9500 KERN_HPPA_UNALIGNED=59,
9501 KERN_PRINTK_RATELIMIT=60,
9502 KERN_PRINTK_RATELIMIT_BURST=61,
9503 KERN_PTY=62,
9504 KERN_NGROUPS_MAX=63,
9505 KERN_SPARC_SCONS_PWROFF=64,
9506 KERN_HZ_TIMER=65,
9507 KERN_UNKNOWN_NMI_PANIC=66,
9508 KERN_BOOTLOADER_TYPE=67,
9509 KERN_RANDOMIZE=68,
9510 KERN_SETUID_DUMPABLE=69,
9511 KERN_SPIN_RETRY=70,
9512 KERN_ACPI_VIDEO_FLAGS=71,
9513 KERN_IA64_UNALIGNED=72,
9514 KERN_COMPAT_LOG=73,
9515 KERN_MAX_LOCK_DEPTH=74,
9516 KERN_NMI_WATCHDOG=75,
9517 KERN_PANIC_ON_NMI=76,
9518};
9519enum
9520{
9521 VM_UNUSED1=1,
9522 VM_UNUSED2=2,
9523 VM_UNUSED3=3,
9524 VM_UNUSED4=4,
9525 VM_OVERCOMMIT_MEMORY=5,
9526 VM_UNUSED5=6,
9527 VM_UNUSED7=7,
9528 VM_UNUSED8=8,
9529 VM_UNUSED9=9,
9530 VM_PAGE_CLUSTER=10,
9531 VM_DIRTY_BACKGROUND=11,
9532 VM_DIRTY_RATIO=12,
9533 VM_DIRTY_WB_CS=13,
9534 VM_DIRTY_EXPIRE_CS=14,
9535 VM_NR_PDFLUSH_THREADS=15,
9536 VM_OVERCOMMIT_RATIO=16,
9537 VM_PAGEBUF=17,
9538 VM_HUGETLB_PAGES=18,
9539 VM_SWAPPINESS=19,
9540 VM_LOWMEM_RESERVE_RATIO=20,
9541 VM_MIN_FREE_KBYTES=21,
9542 VM_MAX_MAP_COUNT=22,
9543 VM_LAPTOP_MODE=23,
9544 VM_BLOCK_DUMP=24,
9545 VM_HUGETLB_GROUP=25,
9546 VM_VFS_CACHE_PRESSURE=26,
9547 VM_LEGACY_VA_LAYOUT=27,
9548 VM_SWAP_TOKEN_TIMEOUT=28,
9549 VM_DROP_PAGECACHE=29,
9550 VM_PERCPU_PAGELIST_FRACTION=30,
9551 VM_ZONE_RECLAIM_MODE=31,
9552 VM_MIN_UNMAPPED=32,
9553 VM_PANIC_ON_OOM=33,
9554 VM_VDSO_ENABLED=34,
9555 VM_MIN_SLAB=35,
9556};
9557enum
9558{
9559 NET_CORE=1,
9560 NET_ETHER=2,
9561 NET_802=3,
9562 NET_UNIX=4,
9563 NET_IPV4=5,
9564 NET_IPX=6,
9565 NET_ATALK=7,
9566 NET_NETROM=8,
9567 NET_AX25=9,
9568 NET_BRIDGE=10,
9569 NET_ROSE=11,
9570 NET_IPV6=12,
9571 NET_X25=13,
9572 NET_TR=14,
9573 NET_DECNET=15,
9574 NET_ECONET=16,
9575 NET_SCTP=17,
9576 NET_LLC=18,
9577 NET_NETFILTER=19,
9578 NET_DCCP=20,
9579 NET_IRDA=412,
9580};
9581enum
9582{
9583 RANDOM_POOLSIZE=1,
9584 RANDOM_ENTROPY_COUNT=2,
9585 RANDOM_READ_THRESH=3,
9586 RANDOM_WRITE_THRESH=4,
9587 RANDOM_BOOT_ID=5,
9588 RANDOM_UUID=6
9589};
9590enum
9591{
9592 PTY_MAX=1,
9593 PTY_NR=2
9594};
9595enum
9596{
9597 BUS_ISA_MEM_BASE=1,
9598 BUS_ISA_PORT_BASE=2,
9599 BUS_ISA_PORT_SHIFT=3
9600};
9601enum
9602{
9603 NET_CORE_WMEM_MAX=1,
9604 NET_CORE_RMEM_MAX=2,
9605 NET_CORE_WMEM_DEFAULT=3,
9606 NET_CORE_RMEM_DEFAULT=4,
9607 NET_CORE_MAX_BACKLOG=6,
9608 NET_CORE_FASTROUTE=7,
9609 NET_CORE_MSG_COST=8,
9610 NET_CORE_MSG_BURST=9,
9611 NET_CORE_OPTMEM_MAX=10,
9612 NET_CORE_HOT_LIST_LENGTH=11,
9613 NET_CORE_DIVERT_VERSION=12,
9614 NET_CORE_NO_CONG_THRESH=13,
9615 NET_CORE_NO_CONG=14,
9616 NET_CORE_LO_CONG=15,
9617 NET_CORE_MOD_CONG=16,
9618 NET_CORE_DEV_WEIGHT=17,
9619 NET_CORE_SOMAXCONN=18,
9620 NET_CORE_BUDGET=19,
9621 NET_CORE_AEVENT_ETIME=20,
9622 NET_CORE_AEVENT_RSEQTH=21,
9623 NET_CORE_WARNINGS=22,
9624};
9625enum
9626{
9627 NET_UNIX_DESTROY_DELAY=1,
9628 NET_UNIX_DELETE_DELAY=2,
9629 NET_UNIX_MAX_DGRAM_QLEN=3,
9630};
9631enum
9632{
9633 NET_NF_CONNTRACK_MAX=1,
9634 NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2,
9635 NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3,
9636 NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4,
9637 NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5,
9638 NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6,
9639 NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7,
9640 NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8,
9641 NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9,
9642 NET_NF_CONNTRACK_UDP_TIMEOUT=10,
9643 NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11,
9644 NET_NF_CONNTRACK_ICMP_TIMEOUT=12,
9645 NET_NF_CONNTRACK_GENERIC_TIMEOUT=13,
9646 NET_NF_CONNTRACK_BUCKETS=14,
9647 NET_NF_CONNTRACK_LOG_INVALID=15,
9648 NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16,
9649 NET_NF_CONNTRACK_TCP_LOOSE=17,
9650 NET_NF_CONNTRACK_TCP_BE_LIBERAL=18,
9651 NET_NF_CONNTRACK_TCP_MAX_RETRANS=19,
9652 NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20,
9653 NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21,
9654 NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22,
9655 NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23,
9656 NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24,
9657 NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25,
9658 NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26,
9659 NET_NF_CONNTRACK_COUNT=27,
9660 NET_NF_CONNTRACK_ICMPV6_TIMEOUT=28,
9661 NET_NF_CONNTRACK_FRAG6_TIMEOUT=29,
9662 NET_NF_CONNTRACK_FRAG6_LOW_THRESH=30,
9663 NET_NF_CONNTRACK_FRAG6_HIGH_THRESH=31,
9664 NET_NF_CONNTRACK_CHECKSUM=32,
9665};
9666enum
9667{
9668 NET_IPV4_FORWARD=8,
9669 NET_IPV4_DYNADDR=9,
9670 NET_IPV4_CONF=16,
9671 NET_IPV4_NEIGH=17,
9672 NET_IPV4_ROUTE=18,
9673 NET_IPV4_FIB_HASH=19,
9674 NET_IPV4_NETFILTER=20,
9675 NET_IPV4_TCP_TIMESTAMPS=33,
9676 NET_IPV4_TCP_WINDOW_SCALING=34,
9677 NET_IPV4_TCP_SACK=35,
9678 NET_IPV4_TCP_RETRANS_COLLAPSE=36,
9679 NET_IPV4_DEFAULT_TTL=37,
9680 NET_IPV4_AUTOCONFIG=38,
9681 NET_IPV4_NO_PMTU_DISC=39,
9682 NET_IPV4_TCP_SYN_RETRIES=40,
9683 NET_IPV4_IPFRAG_HIGH_THRESH=41,
9684 NET_IPV4_IPFRAG_LOW_THRESH=42,
9685 NET_IPV4_IPFRAG_TIME=43,
9686 NET_IPV4_TCP_MAX_KA_PROBES=44,
9687 NET_IPV4_TCP_KEEPALIVE_TIME=45,
9688 NET_IPV4_TCP_KEEPALIVE_PROBES=46,
9689 NET_IPV4_TCP_RETRIES1=47,
9690 NET_IPV4_TCP_RETRIES2=48,
9691 NET_IPV4_TCP_FIN_TIMEOUT=49,
9692 NET_IPV4_IP_MASQ_DEBUG=50,
9693 NET_TCP_SYNCOOKIES=51,
9694 NET_TCP_STDURG=52,
9695 NET_TCP_RFC1337=53,
9696 NET_TCP_SYN_TAILDROP=54,
9697 NET_TCP_MAX_SYN_BACKLOG=55,
9698 NET_IPV4_LOCAL_PORT_RANGE=56,
9699 NET_IPV4_ICMP_ECHO_IGNORE_ALL=57,
9700 NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS=58,
9701 NET_IPV4_ICMP_SOURCEQUENCH_RATE=59,
9702 NET_IPV4_ICMP_DESTUNREACH_RATE=60,
9703 NET_IPV4_ICMP_TIMEEXCEED_RATE=61,
9704 NET_IPV4_ICMP_PARAMPROB_RATE=62,
9705 NET_IPV4_ICMP_ECHOREPLY_RATE=63,
9706 NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES=64,
9707 NET_IPV4_IGMP_MAX_MEMBERSHIPS=65,
9708 NET_TCP_TW_RECYCLE=66,
9709 NET_IPV4_ALWAYS_DEFRAG=67,
9710 NET_IPV4_TCP_KEEPALIVE_INTVL=68,
9711 NET_IPV4_INET_PEER_THRESHOLD=69,
9712 NET_IPV4_INET_PEER_MINTTL=70,
9713 NET_IPV4_INET_PEER_MAXTTL=71,
9714 NET_IPV4_INET_PEER_GC_MINTIME=72,
9715 NET_IPV4_INET_PEER_GC_MAXTIME=73,
9716 NET_TCP_ORPHAN_RETRIES=74,
9717 NET_TCP_ABORT_ON_OVERFLOW=75,
9718 NET_TCP_SYNACK_RETRIES=76,
9719 NET_TCP_MAX_ORPHANS=77,
9720 NET_TCP_MAX_TW_BUCKETS=78,
9721 NET_TCP_FACK=79,
9722 NET_TCP_REORDERING=80,
9723 NET_TCP_ECN=81,
9724 NET_TCP_DSACK=82,
9725 NET_TCP_MEM=83,
9726 NET_TCP_WMEM=84,
9727 NET_TCP_RMEM=85,
9728 NET_TCP_APP_WIN=86,
9729 NET_TCP_ADV_WIN_SCALE=87,
9730 NET_IPV4_NONLOCAL_BIND=88,
9731 NET_IPV4_ICMP_RATELIMIT=89,
9732 NET_IPV4_ICMP_RATEMASK=90,
9733 NET_TCP_TW_REUSE=91,
9734 NET_TCP_FRTO=92,
9735 NET_TCP_LOW_LATENCY=93,
9736 NET_IPV4_IPFRAG_SECRET_INTERVAL=94,
9737 NET_IPV4_IGMP_MAX_MSF=96,
9738 NET_TCP_NO_METRICS_SAVE=97,
9739 NET_TCP_DEFAULT_WIN_SCALE=105,
9740 NET_TCP_MODERATE_RCVBUF=106,
9741 NET_TCP_TSO_WIN_DIVISOR=107,
9742 NET_TCP_BIC_BETA=108,
9743 NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR=109,
9744 NET_TCP_CONG_CONTROL=110,
9745 NET_TCP_ABC=111,
9746 NET_IPV4_IPFRAG_MAX_DIST=112,
9747 NET_TCP_MTU_PROBING=113,
9748 NET_TCP_BASE_MSS=114,
9749 NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS=115,
9750 NET_TCP_DMA_COPYBREAK=116,
9751 NET_TCP_SLOW_START_AFTER_IDLE=117,
9752 NET_CIPSOV4_CACHE_ENABLE=118,
9753 NET_CIPSOV4_CACHE_BUCKET_SIZE=119,
9754 NET_CIPSOV4_RBM_OPTFMT=120,
9755 NET_CIPSOV4_RBM_STRICTVALID=121,
9756 NET_TCP_AVAIL_CONG_CONTROL=122,
9757 NET_TCP_ALLOWED_CONG_CONTROL=123,
9758 NET_TCP_MAX_SSTHRESH=124,
9759 NET_TCP_FRTO_RESPONSE=125,
9760};
9761enum {
9762 NET_IPV4_ROUTE_FLUSH=1,
9763 NET_IPV4_ROUTE_MIN_DELAY=2,
9764 NET_IPV4_ROUTE_MAX_DELAY=3,
9765 NET_IPV4_ROUTE_GC_THRESH=4,
9766 NET_IPV4_ROUTE_MAX_SIZE=5,
9767 NET_IPV4_ROUTE_GC_MIN_INTERVAL=6,
9768 NET_IPV4_ROUTE_GC_TIMEOUT=7,
9769 NET_IPV4_ROUTE_GC_INTERVAL=8,
9770 NET_IPV4_ROUTE_REDIRECT_LOAD=9,
9771 NET_IPV4_ROUTE_REDIRECT_NUMBER=10,
9772 NET_IPV4_ROUTE_REDIRECT_SILENCE=11,
9773 NET_IPV4_ROUTE_ERROR_COST=12,
9774 NET_IPV4_ROUTE_ERROR_BURST=13,
9775 NET_IPV4_ROUTE_GC_ELASTICITY=14,
9776 NET_IPV4_ROUTE_MTU_EXPIRES=15,
9777 NET_IPV4_ROUTE_MIN_PMTU=16,
9778 NET_IPV4_ROUTE_MIN_ADVMSS=17,
9779 NET_IPV4_ROUTE_SECRET_INTERVAL=18,
9780 NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS=19,
9781};
9782enum
9783{
9784 NET_PROTO_CONF_ALL=-2,
9785 NET_PROTO_CONF_DEFAULT=-3
9786};
9787enum
9788{
9789 NET_IPV4_CONF_FORWARDING=1,
9790 NET_IPV4_CONF_MC_FORWARDING=2,
9791 NET_IPV4_CONF_PROXY_ARP=3,
9792 NET_IPV4_CONF_ACCEPT_REDIRECTS=4,
9793 NET_IPV4_CONF_SECURE_REDIRECTS=5,
9794 NET_IPV4_CONF_SEND_REDIRECTS=6,
9795 NET_IPV4_CONF_SHARED_MEDIA=7,
9796 NET_IPV4_CONF_RP_FILTER=8,
9797 NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE=9,
9798 NET_IPV4_CONF_BOOTP_RELAY=10,
9799 NET_IPV4_CONF_LOG_MARTIANS=11,
9800 NET_IPV4_CONF_TAG=12,
9801 NET_IPV4_CONF_ARPFILTER=13,
9802 NET_IPV4_CONF_MEDIUM_ID=14,
9803 NET_IPV4_CONF_NOXFRM=15,
9804 NET_IPV4_CONF_NOPOLICY=16,
9805 NET_IPV4_CONF_FORCE_IGMP_VERSION=17,
9806 NET_IPV4_CONF_ARP_ANNOUNCE=18,
9807 NET_IPV4_CONF_ARP_IGNORE=19,
9808 NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
9809 NET_IPV4_CONF_ARP_ACCEPT=21,
9810 NET_IPV4_CONF_ARP_NOTIFY=22,
9811};
9812enum
9813{
9814 NET_IPV4_NF_CONNTRACK_MAX=1,
9815 NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2,
9816 NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3,
9817 NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4,
9818 NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5,
9819 NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6,
9820 NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7,
9821 NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8,
9822 NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9,
9823 NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT=10,
9824 NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11,
9825 NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT=12,
9826 NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT=13,
9827 NET_IPV4_NF_CONNTRACK_BUCKETS=14,
9828 NET_IPV4_NF_CONNTRACK_LOG_INVALID=15,
9829 NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16,
9830 NET_IPV4_NF_CONNTRACK_TCP_LOOSE=17,
9831 NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL=18,
9832 NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS=19,
9833 NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20,
9834 NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21,
9835 NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22,
9836 NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23,
9837 NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24,
9838 NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25,
9839 NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26,
9840 NET_IPV4_NF_CONNTRACK_COUNT=27,
9841 NET_IPV4_NF_CONNTRACK_CHECKSUM=28,
9842};
9843enum {
9844 NET_IPV6_CONF=16,
9845 NET_IPV6_NEIGH=17,
9846 NET_IPV6_ROUTE=18,
9847 NET_IPV6_ICMP=19,
9848 NET_IPV6_BINDV6ONLY=20,
9849 NET_IPV6_IP6FRAG_HIGH_THRESH=21,
9850 NET_IPV6_IP6FRAG_LOW_THRESH=22,
9851 NET_IPV6_IP6FRAG_TIME=23,
9852 NET_IPV6_IP6FRAG_SECRET_INTERVAL=24,
9853 NET_IPV6_MLD_MAX_MSF=25,
9854};
9855enum {
9856 NET_IPV6_ROUTE_FLUSH=1,
9857 NET_IPV6_ROUTE_GC_THRESH=2,
9858 NET_IPV6_ROUTE_MAX_SIZE=3,
9859 NET_IPV6_ROUTE_GC_MIN_INTERVAL=4,
9860 NET_IPV6_ROUTE_GC_TIMEOUT=5,
9861 NET_IPV6_ROUTE_GC_INTERVAL=6,
9862 NET_IPV6_ROUTE_GC_ELASTICITY=7,
9863 NET_IPV6_ROUTE_MTU_EXPIRES=8,
9864 NET_IPV6_ROUTE_MIN_ADVMSS=9,
9865 NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS=10
9866};
9867enum {
9868 NET_IPV6_FORWARDING=1,
9869 NET_IPV6_HOP_LIMIT=2,
9870 NET_IPV6_MTU=3,
9871 NET_IPV6_ACCEPT_RA=4,
9872 NET_IPV6_ACCEPT_REDIRECTS=5,
9873 NET_IPV6_AUTOCONF=6,
9874 NET_IPV6_DAD_TRANSMITS=7,
9875 NET_IPV6_RTR_SOLICITS=8,
9876 NET_IPV6_RTR_SOLICIT_INTERVAL=9,
9877 NET_IPV6_RTR_SOLICIT_DELAY=10,
9878 NET_IPV6_USE_TEMPADDR=11,
9879 NET_IPV6_TEMP_VALID_LFT=12,
9880 NET_IPV6_TEMP_PREFERED_LFT=13,
9881 NET_IPV6_REGEN_MAX_RETRY=14,
9882 NET_IPV6_MAX_DESYNC_FACTOR=15,
9883 NET_IPV6_MAX_ADDRESSES=16,
9884 NET_IPV6_FORCE_MLD_VERSION=17,
9885 NET_IPV6_ACCEPT_RA_DEFRTR=18,
9886 NET_IPV6_ACCEPT_RA_PINFO=19,
9887 NET_IPV6_ACCEPT_RA_RTR_PREF=20,
9888 NET_IPV6_RTR_PROBE_INTERVAL=21,
9889 NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN=22,
9890 NET_IPV6_PROXY_NDP=23,
9891 NET_IPV6_ACCEPT_SOURCE_ROUTE=25,
9892 __NET_IPV6_MAX
9893};
9894enum {
9895 NET_IPV6_ICMP_RATELIMIT=1
9896};
9897enum {
9898 NET_NEIGH_MCAST_SOLICIT=1,
9899 NET_NEIGH_UCAST_SOLICIT=2,
9900 NET_NEIGH_APP_SOLICIT=3,
9901 NET_NEIGH_RETRANS_TIME=4,
9902 NET_NEIGH_REACHABLE_TIME=5,
9903 NET_NEIGH_DELAY_PROBE_TIME=6,
9904 NET_NEIGH_GC_STALE_TIME=7,
9905 NET_NEIGH_UNRES_QLEN=8,
9906 NET_NEIGH_PROXY_QLEN=9,
9907 NET_NEIGH_ANYCAST_DELAY=10,
9908 NET_NEIGH_PROXY_DELAY=11,
9909 NET_NEIGH_LOCKTIME=12,
9910 NET_NEIGH_GC_INTERVAL=13,
9911 NET_NEIGH_GC_THRESH1=14,
9912 NET_NEIGH_GC_THRESH2=15,
9913 NET_NEIGH_GC_THRESH3=16,
9914 NET_NEIGH_RETRANS_TIME_MS=17,
9915 NET_NEIGH_REACHABLE_TIME_MS=18,
9916};
9917enum {
9918 NET_DCCP_DEFAULT=1,
9919};
9920enum {
9921 NET_IPX_PPROP_BROADCASTING=1,
9922 NET_IPX_FORWARDING=2
9923};
9924enum {
9925 NET_LLC2=1,
9926 NET_LLC_STATION=2,
9927};
9928enum {
9929 NET_LLC2_TIMEOUT=1,
9930};
9931enum {
9932 NET_LLC_STATION_ACK_TIMEOUT=1,
9933};
9934enum {
9935 NET_LLC2_ACK_TIMEOUT=1,
9936 NET_LLC2_P_TIMEOUT=2,
9937 NET_LLC2_REJ_TIMEOUT=3,
9938 NET_LLC2_BUSY_TIMEOUT=4,
9939};
9940enum {
9941 NET_ATALK_AARP_EXPIRY_TIME=1,
9942 NET_ATALK_AARP_TICK_TIME=2,
9943 NET_ATALK_AARP_RETRANSMIT_LIMIT=3,
9944 NET_ATALK_AARP_RESOLVE_TIME=4
9945};
9946enum {
9947 NET_NETROM_DEFAULT_PATH_QUALITY=1,
9948 NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER=2,
9949 NET_NETROM_NETWORK_TTL_INITIALISER=3,
9950 NET_NETROM_TRANSPORT_TIMEOUT=4,
9951 NET_NETROM_TRANSPORT_MAXIMUM_TRIES=5,
9952 NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY=6,
9953 NET_NETROM_TRANSPORT_BUSY_DELAY=7,
9954 NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE=8,
9955 NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT=9,
9956 NET_NETROM_ROUTING_CONTROL=10,
9957 NET_NETROM_LINK_FAILS_COUNT=11,
9958 NET_NETROM_RESET=12
9959};
9960enum {
9961 NET_AX25_IP_DEFAULT_MODE=1,
9962 NET_AX25_DEFAULT_MODE=2,
9963 NET_AX25_BACKOFF_TYPE=3,
9964 NET_AX25_CONNECT_MODE=4,
9965 NET_AX25_STANDARD_WINDOW=5,
9966 NET_AX25_EXTENDED_WINDOW=6,
9967 NET_AX25_T1_TIMEOUT=7,
9968 NET_AX25_T2_TIMEOUT=8,
9969 NET_AX25_T3_TIMEOUT=9,
9970 NET_AX25_IDLE_TIMEOUT=10,
9971 NET_AX25_N2=11,
9972 NET_AX25_PACLEN=12,
9973 NET_AX25_PROTOCOL=13,
9974 NET_AX25_DAMA_SLAVE_TIMEOUT=14
9975};
9976enum {
9977 NET_ROSE_RESTART_REQUEST_TIMEOUT=1,
9978 NET_ROSE_CALL_REQUEST_TIMEOUT=2,
9979 NET_ROSE_RESET_REQUEST_TIMEOUT=3,
9980 NET_ROSE_CLEAR_REQUEST_TIMEOUT=4,
9981 NET_ROSE_ACK_HOLD_BACK_TIMEOUT=5,
9982 NET_ROSE_ROUTING_CONTROL=6,
9983 NET_ROSE_LINK_FAIL_TIMEOUT=7,
9984 NET_ROSE_MAX_VCS=8,
9985 NET_ROSE_WINDOW_SIZE=9,
9986 NET_ROSE_NO_ACTIVITY_TIMEOUT=10
9987};
9988enum {
9989 NET_X25_RESTART_REQUEST_TIMEOUT=1,
9990 NET_X25_CALL_REQUEST_TIMEOUT=2,
9991 NET_X25_RESET_REQUEST_TIMEOUT=3,
9992 NET_X25_CLEAR_REQUEST_TIMEOUT=4,
9993 NET_X25_ACK_HOLD_BACK_TIMEOUT=5,
9994 NET_X25_FORWARD=6
9995};
9996enum
9997{
9998 NET_TR_RIF_TIMEOUT=1
9999};
10000enum {
10001 NET_DECNET_NODE_TYPE = 1,
10002 NET_DECNET_NODE_ADDRESS = 2,
10003 NET_DECNET_NODE_NAME = 3,
10004 NET_DECNET_DEFAULT_DEVICE = 4,
10005 NET_DECNET_TIME_WAIT = 5,
10006 NET_DECNET_DN_COUNT = 6,
10007 NET_DECNET_DI_COUNT = 7,
10008 NET_DECNET_DR_COUNT = 8,
10009 NET_DECNET_DST_GC_INTERVAL = 9,
10010 NET_DECNET_CONF = 10,
10011 NET_DECNET_NO_FC_MAX_CWND = 11,
10012 NET_DECNET_MEM = 12,
10013 NET_DECNET_RMEM = 13,
10014 NET_DECNET_WMEM = 14,
10015 NET_DECNET_DEBUG_LEVEL = 255
10016};
10017enum {
10018 NET_DECNET_CONF_LOOPBACK = -2,
10019 NET_DECNET_CONF_DDCMP = -3,
10020 NET_DECNET_CONF_PPP = -4,
10021 NET_DECNET_CONF_X25 = -5,
10022 NET_DECNET_CONF_GRE = -6,
10023 NET_DECNET_CONF_ETHER = -7
10024};
10025enum {
10026 NET_DECNET_CONF_DEV_PRIORITY = 1,
10027 NET_DECNET_CONF_DEV_T1 = 2,
10028 NET_DECNET_CONF_DEV_T2 = 3,
10029 NET_DECNET_CONF_DEV_T3 = 4,
10030 NET_DECNET_CONF_DEV_FORWARDING = 5,
10031 NET_DECNET_CONF_DEV_BLKSIZE = 6,
10032 NET_DECNET_CONF_DEV_STATE = 7
10033};
10034enum {
10035 NET_SCTP_RTO_INITIAL = 1,
10036 NET_SCTP_RTO_MIN = 2,
10037 NET_SCTP_RTO_MAX = 3,
10038 NET_SCTP_RTO_ALPHA = 4,
10039 NET_SCTP_RTO_BETA = 5,
10040 NET_SCTP_VALID_COOKIE_LIFE = 6,
10041 NET_SCTP_ASSOCIATION_MAX_RETRANS = 7,
10042 NET_SCTP_PATH_MAX_RETRANS = 8,
10043 NET_SCTP_MAX_INIT_RETRANSMITS = 9,
10044 NET_SCTP_HB_INTERVAL = 10,
10045 NET_SCTP_PRESERVE_ENABLE = 11,
10046 NET_SCTP_MAX_BURST = 12,
10047 NET_SCTP_ADDIP_ENABLE = 13,
10048 NET_SCTP_PRSCTP_ENABLE = 14,
10049 NET_SCTP_SNDBUF_POLICY = 15,
10050 NET_SCTP_SACK_TIMEOUT = 16,
10051 NET_SCTP_RCVBUF_POLICY = 17,
10052};
10053enum {
10054 NET_BRIDGE_NF_CALL_ARPTABLES = 1,
10055 NET_BRIDGE_NF_CALL_IPTABLES = 2,
10056 NET_BRIDGE_NF_CALL_IP6TABLES = 3,
10057 NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4,
10058 NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5,
10059};
10060enum {
10061 NET_IRDA_DISCOVERY=1,
10062 NET_IRDA_DEVNAME=2,
10063 NET_IRDA_DEBUG=3,
10064 NET_IRDA_FAST_POLL=4,
10065 NET_IRDA_DISCOVERY_SLOTS=5,
10066 NET_IRDA_DISCOVERY_TIMEOUT=6,
10067 NET_IRDA_SLOT_TIMEOUT=7,
10068 NET_IRDA_MAX_BAUD_RATE=8,
10069 NET_IRDA_MIN_TX_TURN_TIME=9,
10070 NET_IRDA_MAX_TX_DATA_SIZE=10,
10071 NET_IRDA_MAX_TX_WINDOW=11,
10072 NET_IRDA_MAX_NOREPLY_TIME=12,
10073 NET_IRDA_WARN_NOREPLY_TIME=13,
10074 NET_IRDA_LAP_KEEPALIVE_TIME=14,
10075};
10076enum
10077{
10078 FS_NRINODE=1,
10079 FS_STATINODE=2,
10080 FS_MAXINODE=3,
10081 FS_NRDQUOT=4,
10082 FS_MAXDQUOT=5,
10083 FS_NRFILE=6,
10084 FS_MAXFILE=7,
10085 FS_DENTRY=8,
10086 FS_NRSUPER=9,
10087 FS_MAXSUPER=10,
10088 FS_OVERFLOWUID=11,
10089 FS_OVERFLOWGID=12,
10090 FS_LEASES=13,
10091 FS_DIR_NOTIFY=14,
10092 FS_LEASE_TIME=15,
10093 FS_DQSTATS=16,
10094 FS_XFS=17,
10095 FS_AIO_NR=18,
10096 FS_AIO_MAX_NR=19,
10097 FS_INOTIFY=20,
10098 FS_OCFS2=988,
10099};
10100enum {
10101 FS_DQ_LOOKUPS = 1,
10102 FS_DQ_DROPS = 2,
10103 FS_DQ_READS = 3,
10104 FS_DQ_WRITES = 4,
10105 FS_DQ_CACHE_HITS = 5,
10106 FS_DQ_ALLOCATED = 6,
10107 FS_DQ_FREE = 7,
10108 FS_DQ_SYNCS = 8,
10109 FS_DQ_WARNINGS = 9,
10110};
10111enum {
10112 DEV_CDROM=1,
10113 DEV_HWMON=2,
10114 DEV_PARPORT=3,
10115 DEV_RAID=4,
10116 DEV_MAC_HID=5,
10117 DEV_SCSI=6,
10118 DEV_IPMI=7,
10119};
10120enum {
10121 DEV_CDROM_INFO=1,
10122 DEV_CDROM_AUTOCLOSE=2,
10123 DEV_CDROM_AUTOEJECT=3,
10124 DEV_CDROM_DEBUG=4,
10125 DEV_CDROM_LOCK=5,
10126 DEV_CDROM_CHECK_MEDIA=6
10127};
10128enum {
10129 DEV_PARPORT_DEFAULT=-3
10130};
10131enum {
10132 DEV_RAID_SPEED_LIMIT_MIN=1,
10133 DEV_RAID_SPEED_LIMIT_MAX=2
10134};
10135enum {
10136 DEV_PARPORT_DEFAULT_TIMESLICE=1,
10137 DEV_PARPORT_DEFAULT_SPINTIME=2
10138};
10139enum {
10140 DEV_PARPORT_SPINTIME=1,
10141 DEV_PARPORT_BASE_ADDR=2,
10142 DEV_PARPORT_IRQ=3,
10143 DEV_PARPORT_DMA=4,
10144 DEV_PARPORT_MODES=5,
10145 DEV_PARPORT_DEVICES=6,
10146 DEV_PARPORT_AUTOPROBE=16
10147};
10148enum {
10149 DEV_PARPORT_DEVICES_ACTIVE=-3,
10150};
10151enum {
10152 DEV_PARPORT_DEVICE_TIMESLICE=1,
10153};
10154enum {
10155 DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES=1,
10156 DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES=2,
10157 DEV_MAC_HID_MOUSE_BUTTON_EMULATION=3,
10158 DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE=4,
10159 DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE=5,
10160 DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES=6
10161};
10162enum {
10163 DEV_SCSI_LOGGING_LEVEL=1,
10164};
10165enum {
10166 DEV_IPMI_POWEROFF_POWERCYCLE=1,
10167};
10168enum
10169{
10170 ABI_DEFHANDLER_COFF=1,
10171 ABI_DEFHANDLER_ELF=2,
10172 ABI_DEFHANDLER_LCALL7=3,
10173 ABI_DEFHANDLER_LIBCSO=4,
10174 ABI_TRACE=5,
10175 ABI_FAKE_UTSNAME=6,
10176};
10177extern void rcutorture_record_test_transition(void);
10178extern void rcutorture_record_progress(unsigned long vernum);
10179struct rcu_head {
10180 struct rcu_head *next;
10181 void (*func)(struct rcu_head *head);
10182};
10183extern void call_rcu_sched(struct rcu_head *head,
10184 void (*func)(struct rcu_head *rcu));
10185extern void synchronize_sched(void);
10186extern void rcu_barrier_bh(void);
10187extern void rcu_barrier_sched(void);
10188static inline __attribute__((always_inline)) void __rcu_read_lock_bh(void)
10189{
10190 local_bh_disable();
10191}
10192static inline __attribute__((always_inline)) void __rcu_read_unlock_bh(void)
10193{
10194 local_bh_enable();
10195}
10196extern void __rcu_read_lock(void);
10197extern void __rcu_read_unlock(void);
10198void synchronize_rcu(void);
10199extern void rcu_sched_qs(int cpu);
10200extern void rcu_bh_qs(int cpu);
10201extern void rcu_check_callbacks(int cpu, int user);
10202struct notifier_block;
10203static inline __attribute__((always_inline)) void rcu_enter_nohz(void)
10204{
10205}
10206static inline __attribute__((always_inline)) void rcu_exit_nohz(void)
10207{
10208}
10209extern void rcu_init(void);
10210extern void rcu_note_context_switch(int cpu);
10211extern int rcu_needs_cpu(int cpu);
10212extern void rcu_cpu_stall_reset(void);
10213static inline __attribute__((always_inline)) void rcu_virt_note_context_switch(int cpu)
10214{
10215 rcu_note_context_switch(cpu);
10216}
10217extern void exit_rcu(void);
10218extern void synchronize_rcu_bh(void);
10219extern void synchronize_sched_expedited(void);
10220extern void synchronize_rcu_expedited(void);
10221static inline __attribute__((always_inline)) void synchronize_rcu_bh_expedited(void)
10222{
10223 synchronize_sched_expedited();
10224}
10225extern void rcu_barrier(void);
10226extern unsigned long rcutorture_testseq;
10227extern unsigned long rcutorture_vernum;
10228extern long rcu_batches_completed(void);
10229extern long rcu_batches_completed_bh(void);
10230extern long rcu_batches_completed_sched(void);
10231extern void rcu_force_quiescent_state(void);
10232extern void rcu_bh_force_quiescent_state(void);
10233extern void rcu_sched_force_quiescent_state(void);
10234static inline __attribute__((always_inline)) int rcu_blocking_is_gp(void)
10235{
10236 return cpumask_weight(cpu_online_mask) == 1;
10237}
10238extern void rcu_scheduler_starting(void);
10239extern int rcu_scheduler_active __attribute__((__section__(".data..read_mostly")));
10240static inline __attribute__((always_inline)) void init_rcu_head_on_stack(struct rcu_head *head)
10241{
10242}
10243static inline __attribute__((always_inline)) void destroy_rcu_head_on_stack(struct rcu_head *head)
10244{
10245}
10246extern struct lockdep_map rcu_lock_map;
10247extern struct lockdep_map rcu_bh_lock_map;
10248extern struct lockdep_map rcu_sched_lock_map;
10249extern int debug_lockdep_rcu_enabled(void);
10250static inline __attribute__((always_inline)) int rcu_read_lock_held(void)
10251{
10252 if (__builtin_constant_p(((!debug_lockdep_rcu_enabled()))) ? !!((!debug_lockdep_rcu_enabled())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 216, }; ______r = !!((!debug_lockdep_rcu_enabled())); ______f.miss_hit[______r]++; ______r; }))
10253 return 1;
10254 return lock_is_held(&rcu_lock_map);
10255}
10256extern int rcu_read_lock_bh_held(void);
10257static inline __attribute__((always_inline)) int rcu_read_lock_sched_held(void)
10258{
10259 int lockdep_opinion = 0;
10260 if (__builtin_constant_p(((!debug_lockdep_rcu_enabled()))) ? !!((!debug_lockdep_rcu_enabled())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 247, }; ______r = !!((!debug_lockdep_rcu_enabled())); ______f.miss_hit[______r]++; ______r; }))
10261 return 1;
10262 if (__builtin_constant_p(((debug_locks))) ? !!((debug_locks)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 249, }; ______r = !!((debug_locks)); ______f.miss_hit[______r]++; ______r; }))
10263 lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
10264 return lockdep_opinion || (current_thread_info()->preempt_count) != 0 || ({ unsigned long _flags; do { ({ unsigned long __dummy; typeof(_flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _flags = arch_local_save_flags(); } while (0); ({ ({ unsigned long __dummy; typeof(_flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(_flags); }); });
10265}
10266static inline __attribute__((always_inline)) void rcu_read_lock(void)
10267{
10268 __rcu_read_lock();
10269 (void)0;
10270 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; }));
10271}
10272static inline __attribute__((always_inline)) void rcu_read_unlock(void)
10273{
10274 lock_release(&rcu_lock_map, 1, ({ __label__ __here; __here: (unsigned long)&&__here; }));
10275 (void)0;
10276 __rcu_read_unlock();
10277}
10278static inline __attribute__((always_inline)) void rcu_read_lock_bh(void)
10279{
10280 __rcu_read_lock_bh();
10281 (void)0;
10282 lock_acquire(&rcu_bh_lock_map, 0, 0, 2, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; }));
10283}
10284static inline __attribute__((always_inline)) void rcu_read_unlock_bh(void)
10285{
10286 lock_release(&rcu_bh_lock_map, 1, ({ __label__ __here; __here: (unsigned long)&&__here; }));
10287 (void)0;
10288 __rcu_read_unlock_bh();
10289}
10290static inline __attribute__((always_inline)) void rcu_read_lock_sched(void)
10291{
10292 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
10293 (void)0;
10294 lock_acquire(&rcu_sched_lock_map, 0, 0, 2, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; }));
10295}
10296static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void rcu_read_lock_sched_notrace(void)
10297{
10298 do { do { (current_thread_info()->preempt_count) += (1); } while (0); __asm__ __volatile__("": : :"memory"); } while (0);
10299 (void)0;
10300}
10301static inline __attribute__((always_inline)) void rcu_read_unlock_sched(void)
10302{
10303 lock_release(&rcu_sched_lock_map, 1, ({ __label__ __here; __here: (unsigned long)&&__here; }));
10304 (void)0;
10305 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 681, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 681, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 681, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 681, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
10306}
10307static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void rcu_read_unlock_sched_notrace(void)
10308{
10309 (void)0;
10310 do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 688, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 688, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 688, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 688, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
10311}
10312struct rcu_synchronize {
10313 struct rcu_head head;
10314 struct completion completion;
10315};
10316extern void wakeme_after_rcu(struct rcu_head *head);
10317extern void call_rcu(struct rcu_head *head,
10318 void (*func)(struct rcu_head *head));
10319extern void call_rcu_bh(struct rcu_head *head,
10320 void (*func)(struct rcu_head *head));
10321static inline __attribute__((always_inline)) void debug_rcu_head_queue(struct rcu_head *head)
10322{
10323}
10324static inline __attribute__((always_inline)) void debug_rcu_head_unqueue(struct rcu_head *head)
10325{
10326}
10327static inline __attribute__((always_inline)) __attribute__((always_inline)) bool __is_kfree_rcu_offset(unsigned long offset)
10328{
10329 return offset < 4096;
10330}
10331static inline __attribute__((always_inline)) __attribute__((always_inline))
10332void __kfree_rcu(struct rcu_head *head, unsigned long offset)
10333{
10334 typedef void (*rcu_callback)(struct rcu_head *);
10335 do { ((void)sizeof(char[1 - 2*!!(!__builtin_constant_p(offset))])); if (__builtin_constant_p(((!__builtin_constant_p(offset)))) ? !!((!__builtin_constant_p(offset))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 822, }; ______r = !!((!__builtin_constant_p(offset))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0);
10336 do { ((void)sizeof(char[1 - 2*!!(!__is_kfree_rcu_offset(offset))])); if (__builtin_constant_p(((!__is_kfree_rcu_offset(offset)))) ? !!((!__is_kfree_rcu_offset(offset))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 825, }; ______r = !!((!__is_kfree_rcu_offset(offset))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0);
10337 call_rcu(head, (rcu_callback)offset);
10338}
10339extern void kfree(const void *);
10340static inline __attribute__((always_inline)) void __rcu_reclaim(struct rcu_head *head)
10341{
10342 unsigned long offset = (unsigned long)head->func;
10343 if (__builtin_constant_p(((__is_kfree_rcu_offset(offset)))) ? !!((__is_kfree_rcu_offset(offset))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rcupdate.h", .line = 836, }; ______r = !!((__is_kfree_rcu_offset(offset))); ______f.miss_hit[______r]++; ______r; }))
10344 kfree((void *)head - offset);
10345 else
10346 head->func(head);
10347}
10348struct ctl_table;
10349struct nsproxy;
10350struct ctl_table_root;
10351struct ctl_table_set {
10352 struct list_head list;
10353 struct ctl_table_set *parent;
10354 int (*is_seen)(struct ctl_table_set *);
10355};
10356extern void setup_sysctl_set(struct ctl_table_set *p,
10357 struct ctl_table_set *parent,
10358 int (*is_seen)(struct ctl_table_set *));
10359struct ctl_table_header;
10360extern void sysctl_head_get(struct ctl_table_header *);
10361extern void sysctl_head_put(struct ctl_table_header *);
10362extern int sysctl_is_seen(struct ctl_table_header *);
10363extern struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *);
10364extern struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev);
10365extern struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces,
10366 struct ctl_table_header *prev);
10367extern void sysctl_head_finish(struct ctl_table_header *prev);
10368extern int sysctl_perm(struct ctl_table_root *root,
10369 struct ctl_table *table, int op);
10370typedef struct ctl_table ctl_table;
10371typedef int proc_handler (struct ctl_table *ctl, int write,
10372 void *buffer, size_t *lenp, loff_t *ppos);
10373extern int proc_dostring(struct ctl_table *, int,
10374 void *, size_t *, loff_t *);
10375extern int proc_dointvec(struct ctl_table *, int,
10376 void *, size_t *, loff_t *);
10377extern int proc_dointvec_minmax(struct ctl_table *, int,
10378 void *, size_t *, loff_t *);
10379extern int proc_dointvec_jiffies(struct ctl_table *, int,
10380 void *, size_t *, loff_t *);
10381extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
10382 void *, size_t *, loff_t *);
10383extern int proc_dointvec_ms_jiffies(struct ctl_table *, int,
10384 void *, size_t *, loff_t *);
10385extern int proc_doulongvec_minmax(struct ctl_table *, int,
10386 void *, size_t *, loff_t *);
10387extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int,
10388 void *, size_t *, loff_t *);
10389extern int proc_do_large_bitmap(struct ctl_table *, int,
10390 void *, size_t *, loff_t *);
10391struct ctl_table
10392{
10393 const char *procname;
10394 void *data;
10395 int maxlen;
10396 mode_t mode;
10397 struct ctl_table *child;
10398 struct ctl_table *parent;
10399 proc_handler *proc_handler;
10400 void *extra1;
10401 void *extra2;
10402};
10403struct ctl_table_root {
10404 struct list_head root_list;
10405 struct ctl_table_set default_set;
10406 struct ctl_table_set *(*lookup)(struct ctl_table_root *root,
10407 struct nsproxy *namespaces);
10408 int (*permissions)(struct ctl_table_root *root,
10409 struct nsproxy *namespaces, struct ctl_table *table);
10410};
10411struct ctl_table_header
10412{
10413 union {
10414 struct {
10415 struct ctl_table *ctl_table;
10416 struct list_head ctl_entry;
10417 int used;
10418 int count;
10419 };
10420 struct rcu_head rcu;
10421 };
10422 struct completion *unregistering;
10423 struct ctl_table *ctl_table_arg;
10424 struct ctl_table_root *root;
10425 struct ctl_table_set *set;
10426 struct ctl_table *attached_by;
10427 struct ctl_table *attached_to;
10428 struct ctl_table_header *parent;
10429};
10430struct ctl_path {
10431 const char *procname;
10432};
10433void register_sysctl_root(struct ctl_table_root *root);
10434struct ctl_table_header *__register_sysctl_paths(
10435 struct ctl_table_root *root, struct nsproxy *namespaces,
10436 const struct ctl_path *path, struct ctl_table *table);
10437struct ctl_table_header *register_sysctl_table(struct ctl_table * table);
10438struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
10439 struct ctl_table *table);
10440void unregister_sysctl_table(struct ctl_table_header * table);
10441int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table);
10442extern char modprobe_path[];
10443extern int __request_module(bool wait, const char *name, ...)
10444 __attribute__((format(printf, 2, 3)));
10445struct cred;
10446struct file;
10447enum umh_wait {
10448 UMH_NO_WAIT = -1,
10449 UMH_WAIT_EXEC = 0,
10450 UMH_WAIT_PROC = 1,
10451};
10452struct subprocess_info {
10453 struct work_struct work;
10454 struct completion *complete;
10455 char *path;
10456 char **argv;
10457 char **envp;
10458 enum umh_wait wait;
10459 int retval;
10460 int (*init)(struct subprocess_info *info, struct cred *new);
10461 void (*cleanup)(struct subprocess_info *info);
10462 void *data;
10463};
10464struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
10465 char **envp, gfp_t gfp_mask);
10466void call_usermodehelper_setfns(struct subprocess_info *info,
10467 int (*init)(struct subprocess_info *info, struct cred *new),
10468 void (*cleanup)(struct subprocess_info *info),
10469 void *data);
10470int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait);
10471void call_usermodehelper_freeinfo(struct subprocess_info *info);
10472static inline __attribute__((always_inline)) int
10473call_usermodehelper_fns(char *path, char **argv, char **envp,
10474 enum umh_wait wait,
10475 int (*init)(struct subprocess_info *info, struct cred *new),
10476 void (*cleanup)(struct subprocess_info *), void *data)
10477{
10478 struct subprocess_info *info;
10479 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? ((( gfp_t)0x20u)) : ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u));
10480 info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
10481 if (__builtin_constant_p(((info == ((void *)0)))) ? !!((info == ((void *)0))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kmod.h", .line = 98, }; ______r = !!((info == ((void *)0))); ______f.miss_hit[______r]++; ______r; }))
10482 return -12;
10483 call_usermodehelper_setfns(info, init, cleanup, data);
10484 return call_usermodehelper_exec(info, wait);
10485}
10486static inline __attribute__((always_inline)) int
10487call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait)
10488{
10489 return call_usermodehelper_fns(path, argv, envp, wait,
10490 ((void *)0), ((void *)0), ((void *)0));
10491}
10492extern struct ctl_table usermodehelper_table[];
10493extern void usermodehelper_init(void);
10494extern int usermodehelper_disable(void);
10495extern void usermodehelper_enable(void);
10496extern bool usermodehelper_is_disabled(void);
10497struct user_i387_struct {
10498 long cwd;
10499 long swd;
10500 long twd;
10501 long fip;
10502 long fcs;
10503 long foo;
10504 long fos;
10505 long st_space[20];
10506};
10507struct user_fxsr_struct {
10508 unsigned short cwd;
10509 unsigned short swd;
10510 unsigned short twd;
10511 unsigned short fop;
10512 long fip;
10513 long fcs;
10514 long foo;
10515 long fos;
10516 long mxcsr;
10517 long reserved;
10518 long st_space[32];
10519 long xmm_space[32];
10520 long padding[56];
10521};
10522struct user_regs_struct {
10523 unsigned long bx;
10524 unsigned long cx;
10525 unsigned long dx;
10526 unsigned long si;
10527 unsigned long di;
10528 unsigned long bp;
10529 unsigned long ax;
10530 unsigned long ds;
10531 unsigned long es;
10532 unsigned long fs;
10533 unsigned long gs;
10534 unsigned long orig_ax;
10535 unsigned long ip;
10536 unsigned long cs;
10537 unsigned long flags;
10538 unsigned long sp;
10539 unsigned long ss;
10540};
10541struct user{
10542 struct user_regs_struct regs;
10543 int u_fpvalid;
10544 struct user_i387_struct i387;
10545 unsigned long int u_tsize;
10546 unsigned long int u_dsize;
10547 unsigned long int u_ssize;
10548 unsigned long start_code;
10549 unsigned long start_stack;
10550 long int signal;
10551 int reserved;
10552 unsigned long u_ar0;
10553 struct user_i387_struct *u_fpstate;
10554 unsigned long magic;
10555 char u_comm[32];
10556 int u_debugreg[8];
10557};
10558struct user_ymmh_regs {
10559 __u32 ymmh_space[64];
10560};
10561struct user_xsave_hdr {
10562 __u64 xstate_bv;
10563 __u64 reserved1[2];
10564 __u64 reserved2[5];
10565};
10566struct user_xstateregs {
10567 struct {
10568 __u64 fpx_space[58];
10569 __u64 xstate_fx_sw[6];
10570 } i387;
10571 struct user_xsave_hdr xsave_hdr;
10572 struct user_ymmh_regs ymmh;
10573};
10574typedef unsigned long elf_greg_t;
10575typedef elf_greg_t elf_gregset_t[(sizeof(struct user_regs_struct) / sizeof(elf_greg_t))];
10576typedef struct user_i387_struct elf_fpregset_t;
10577typedef struct user_fxsr_struct elf_fpxregset_t;
10578extern const char VDSO32_PRELINK[];
10579extern void __kernel_sigreturn;
10580extern void __kernel_rt_sigreturn;
10581extern const char vdso32_int80_start, vdso32_int80_end;
10582extern const char vdso32_syscall_start, vdso32_syscall_end;
10583extern const char vdso32_sysenter_start, vdso32_sysenter_end;
10584extern unsigned int vdso_enabled;
10585struct user_desc {
10586 unsigned int entry_number;
10587 unsigned int base_addr;
10588 unsigned int limit;
10589 unsigned int seg_32bit:1;
10590 unsigned int contents:2;
10591 unsigned int read_exec_only:1;
10592 unsigned int limit_in_pages:1;
10593 unsigned int seg_not_present:1;
10594 unsigned int useable:1;
10595};
10596static inline __attribute__((always_inline)) void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
10597{
10598 desc->limit0 = info->limit & 0x0ffff;
10599 desc->base0 = (info->base_addr & 0x0000ffff);
10600 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
10601 desc->type = (info->read_exec_only ^ 1) << 1;
10602 desc->type |= info->contents << 2;
10603 desc->s = 1;
10604 desc->dpl = 0x3;
10605 desc->p = info->seg_not_present ^ 1;
10606 desc->limit = (info->limit & 0xf0000) >> 16;
10607 desc->avl = info->useable;
10608 desc->d = info->seg_32bit;
10609 desc->g = info->limit_in_pages;
10610 desc->base2 = (info->base_addr & 0xff000000) >> 24;
10611 desc->l = 0;
10612}
10613extern struct desc_ptr idt_descr;
10614extern gate_desc idt_table[];
10615struct gdt_page {
10616 struct desc_struct gdt[32];
10617} __attribute__((aligned(((1UL) << 12))));
10618extern __attribute__((section(".data..percpu" "..page_aligned"))) __typeof__(struct gdt_page) gdt_page __attribute__((aligned(((1UL) << 12))));
10619static inline __attribute__((always_inline)) struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10620{
10621 return (*({ do { const void *__vpp_verify = (typeof((&(gdt_page))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(gdt_page))) *)(&(gdt_page)))); (typeof((typeof(*(&(gdt_page))) *)(&(gdt_page)))) (__ptr + (((__per_cpu_offset[cpu])))); }); })).gdt;
10622}
10623static inline __attribute__((always_inline)) void pack_gate(gate_desc *gate, unsigned char type,
10624 unsigned long base, unsigned dpl, unsigned flags,
10625 unsigned short seg)
10626{
10627 gate->a = (seg << 16) | (base & 0xffff);
10628 gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10629}
10630static inline __attribute__((always_inline)) int desc_empty(const void *ptr)
10631{
10632 const u32 *desc = ptr;
10633 return !(desc[0] | desc[1]);
10634}
10635static inline __attribute__((always_inline)) void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
10636{
10637 __builtin_memcpy(&idt[entry], gate, sizeof(*gate));
10638}
10639static inline __attribute__((always_inline)) void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
10640{
10641 __builtin_memcpy(&ldt[entry], desc, 8);
10642}
10643static inline __attribute__((always_inline)) void
10644native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type)
10645{
10646 unsigned int size;
10647 switch (type) {
10648 case DESC_TSS: size = sizeof(tss_desc); break;
10649 case DESC_LDT: size = sizeof(ldt_desc); break;
10650 default: size = sizeof(*gdt); break;
10651 }
10652 __builtin_memcpy(&gdt[entry], desc, size);
10653}
10654static inline __attribute__((always_inline)) void pack_descriptor(struct desc_struct *desc, unsigned long base,
10655 unsigned long limit, unsigned char type,
10656 unsigned char flags)
10657{
10658 desc->a = ((base & 0xffff) << 16) | (limit & 0xffff);
10659 desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
10660 (limit & 0x000f0000) | ((type & 0xff) << 8) |
10661 ((flags & 0xf) << 20);
10662 desc->p = 1;
10663}
10664static inline __attribute__((always_inline)) void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size)
10665{
10666 pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
10667}
10668static inline __attribute__((always_inline)) void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
10669{
10670 struct desc_struct *d = get_cpu_gdt_table(cpu);
10671 tss_desc tss;
10672 set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS,
10673 __builtin_offsetof(struct tss_struct,io_bitmap) + (65536/8) +
10674 sizeof(unsigned long) - 1);
10675 write_gdt_entry(d, entry, &tss, DESC_TSS);
10676}
10677static inline __attribute__((always_inline)) void native_set_ldt(const void *addr, unsigned int entries)
10678{
10679 if (__builtin_constant_p((((__builtin_constant_p(entries == 0) ? !!(entries == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 194, }; ______r = __builtin_expect(!!(entries == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(entries == 0) ? !!(entries == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 194, }; ______r = __builtin_expect(!!(entries == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 194, }; ______r = !!(((__builtin_constant_p(entries == 0) ? !!(entries == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 194, }; ______r = __builtin_expect(!!(entries == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
10680 asm volatile("lldt %w0"::"q" (0));
10681 else {
10682 unsigned cpu = debug_smp_processor_id();
10683 ldt_desc ldt;
10684 set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT,
10685 entries * 8 - 1);
10686 write_gdt_entry(get_cpu_gdt_table(cpu), ((12)+5),
10687 &ldt, DESC_LDT);
10688 asm volatile("lldt %w0"::"q" (((12)+5)*8));
10689 }
10690}
10691static inline __attribute__((always_inline)) void native_load_tr_desc(void)
10692{
10693 asm volatile("ltr %w0"::"q" (((12)+4)*8));
10694}
10695static inline __attribute__((always_inline)) void native_load_gdt(const struct desc_ptr *dtr)
10696{
10697 asm volatile("lgdt %0"::"m" (*dtr));
10698}
10699static inline __attribute__((always_inline)) void native_load_idt(const struct desc_ptr *dtr)
10700{
10701 asm volatile("lidt %0"::"m" (*dtr));
10702}
10703static inline __attribute__((always_inline)) void native_store_gdt(struct desc_ptr *dtr)
10704{
10705 asm volatile("sgdt %0":"=m" (*dtr));
10706}
10707static inline __attribute__((always_inline)) void native_store_idt(struct desc_ptr *dtr)
10708{
10709 asm volatile("sidt %0":"=m" (*dtr));
10710}
10711static inline __attribute__((always_inline)) unsigned long native_store_tr(void)
10712{
10713 unsigned long tr;
10714 asm volatile("str %0":"=r" (tr));
10715 return tr;
10716}
10717static inline __attribute__((always_inline)) void native_load_tls(struct thread_struct *t, unsigned int cpu)
10718{
10719 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10720 unsigned int i;
10721 for (i = 0; i < 3; i++)
10722 gdt[6 + i] = t->tls_array[i];
10723}
10724static inline __attribute__((always_inline)) void clear_LDT(void)
10725{
10726 set_ldt(((void *)0), 0);
10727}
10728static inline __attribute__((always_inline)) void load_LDT_nolock(mm_context_t *pc)
10729{
10730 set_ldt(pc->ldt, pc->size);
10731}
10732static inline __attribute__((always_inline)) void load_LDT(mm_context_t *pc)
10733{
10734 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
10735 load_LDT_nolock(pc);
10736 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 284, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 284, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 284, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 284, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
10737}
10738static inline __attribute__((always_inline)) unsigned long get_desc_base(const struct desc_struct *desc)
10739{
10740 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
10741}
10742static inline __attribute__((always_inline)) void set_desc_base(struct desc_struct *desc, unsigned long base)
10743{
10744 desc->base0 = base & 0xffff;
10745 desc->base1 = (base >> 16) & 0xff;
10746 desc->base2 = (base >> 24) & 0xff;
10747}
10748static inline __attribute__((always_inline)) unsigned long get_desc_limit(const struct desc_struct *desc)
10749{
10750 return desc->limit0 | (desc->limit << 16);
10751}
10752static inline __attribute__((always_inline)) void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10753{
10754 desc->limit0 = limit & 0xffff;
10755 desc->limit = (limit >> 16) & 0xf;
10756}
10757static inline __attribute__((always_inline)) void _set_gate(int gate, unsigned type, void *addr,
10758 unsigned dpl, unsigned ist, unsigned seg)
10759{
10760 gate_desc s;
10761 pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
10762 write_idt_entry(idt_table, gate, &s);
10763}
10764static inline __attribute__((always_inline)) void set_intr_gate(unsigned int n, void *addr)
10765{
10766 do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 331, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 331, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 331, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 331, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (331), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
10767 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, (((12)+0)*8));
10768}
10769extern int first_system_vector;
10770extern unsigned long used_vectors[];
10771static inline __attribute__((always_inline)) void alloc_system_vector(int vector)
10772{
10773 if (__builtin_constant_p(((!(__builtin_constant_p((vector)) ? constant_test_bit((vector), (used_vectors)) : variable_test_bit((vector), (used_vectors)))))) ? !!((!(__builtin_constant_p((vector)) ? constant_test_bit((vector), (used_vectors)) : variable_test_bit((vector), (used_vectors))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 341, }; ______r = !!((!(__builtin_constant_p((vector)) ? constant_test_bit((vector), (used_vectors)) : variable_test_bit((vector), (used_vectors))))); ______f.miss_hit[______r]++; ______r; })) {
10774 set_bit(vector, used_vectors);
10775 if (__builtin_constant_p(((first_system_vector > vector))) ? !!((first_system_vector > vector)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 343, }; ______r = !!((first_system_vector > vector)); ______f.miss_hit[______r]++; ______r; }))
10776 first_system_vector = vector;
10777 } else {
10778 do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (346), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0);
10779 }
10780}
10781static inline __attribute__((always_inline)) void alloc_intr_gate(unsigned int n, void *addr)
10782{
10783 alloc_system_vector(n);
10784 set_intr_gate(n, addr);
10785}
10786static inline __attribute__((always_inline)) void set_system_intr_gate(unsigned int n, void *addr)
10787{
10788 do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 361, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 361, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 361, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 361, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (361), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
10789 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, (((12)+0)*8));
10790}
10791static inline __attribute__((always_inline)) void set_system_trap_gate(unsigned int n, void *addr)
10792{
10793 do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 367, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 367, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 367, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 367, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (367), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
10794 _set_gate(n, GATE_TRAP, addr, 0x3, 0, (((12)+0)*8));
10795}
10796static inline __attribute__((always_inline)) void set_trap_gate(unsigned int n, void *addr)
10797{
10798 do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 373, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 373, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 373, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 373, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (373), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
10799 _set_gate(n, GATE_TRAP, addr, 0, 0, (((12)+0)*8));
10800}
10801static inline __attribute__((always_inline)) void set_task_gate(unsigned int n, unsigned int gdt_entry)
10802{
10803 do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 379, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 379, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 379, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 379, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (379), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
10804 _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10805}
10806static inline __attribute__((always_inline)) void set_intr_gate_ist(int n, void *addr, unsigned ist)
10807{
10808 do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 385, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 385, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 385, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 385, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (385), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
10809 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, (((12)+0)*8));
10810}
10811static inline __attribute__((always_inline)) void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10812{
10813 do { if (__builtin_constant_p((((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 391, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 391, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 391, }; ______r = !!(((__builtin_constant_p((unsigned)n > 0xFF) ? !!((unsigned)n > 0xFF) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h", .line = 391, }; ______r = __builtin_expect(!!((unsigned)n > 0xFF), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("/data/exp/linux-3.0.4/arch/x86/include/asm/desc.h"), "i" (391), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
10814 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, (((12)+0)*8));
10815}
10816struct task_struct;
10817struct linux_binprm;
10818extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10819 int uses_interp);
10820extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10821extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10822struct file;
10823typedef __u32 Elf32_Addr;
10824typedef __u16 Elf32_Half;
10825typedef __u32 Elf32_Off;
10826typedef __s32 Elf32_Sword;
10827typedef __u32 Elf32_Word;
10828typedef __u64 Elf64_Addr;
10829typedef __u16 Elf64_Half;
10830typedef __s16 Elf64_SHalf;
10831typedef __u64 Elf64_Off;
10832typedef __s32 Elf64_Sword;
10833typedef __u32 Elf64_Word;
10834typedef __u64 Elf64_Xword;
10835typedef __s64 Elf64_Sxword;
10836typedef struct dynamic{
10837 Elf32_Sword d_tag;
10838 union{
10839 Elf32_Sword d_val;
10840 Elf32_Addr d_ptr;
10841 } d_un;
10842} Elf32_Dyn;
10843typedef struct {
10844 Elf64_Sxword d_tag;
10845 union {
10846 Elf64_Xword d_val;
10847 Elf64_Addr d_ptr;
10848 } d_un;
10849} Elf64_Dyn;
10850typedef struct elf32_rel {
10851 Elf32_Addr r_offset;
10852 Elf32_Word r_info;
10853} Elf32_Rel;
10854typedef struct elf64_rel {
10855 Elf64_Addr r_offset;
10856 Elf64_Xword r_info;
10857} Elf64_Rel;
10858typedef struct elf32_rela{
10859 Elf32_Addr r_offset;
10860 Elf32_Word r_info;
10861 Elf32_Sword r_addend;
10862} Elf32_Rela;
10863typedef struct elf64_rela {
10864 Elf64_Addr r_offset;
10865 Elf64_Xword r_info;
10866 Elf64_Sxword r_addend;
10867} Elf64_Rela;
10868typedef struct elf32_sym{
10869 Elf32_Word st_name;
10870 Elf32_Addr st_value;
10871 Elf32_Word st_size;
10872 unsigned char st_info;
10873 unsigned char st_other;
10874 Elf32_Half st_shndx;
10875} Elf32_Sym;
10876typedef struct elf64_sym {
10877 Elf64_Word st_name;
10878 unsigned char st_info;
10879 unsigned char st_other;
10880 Elf64_Half st_shndx;
10881 Elf64_Addr st_value;
10882 Elf64_Xword st_size;
10883} Elf64_Sym;
10884typedef struct elf32_hdr{
10885 unsigned char e_ident[16];
10886 Elf32_Half e_type;
10887 Elf32_Half e_machine;
10888 Elf32_Word e_version;
10889 Elf32_Addr e_entry;
10890 Elf32_Off e_phoff;
10891 Elf32_Off e_shoff;
10892 Elf32_Word e_flags;
10893 Elf32_Half e_ehsize;
10894 Elf32_Half e_phentsize;
10895 Elf32_Half e_phnum;
10896 Elf32_Half e_shentsize;
10897 Elf32_Half e_shnum;
10898 Elf32_Half e_shstrndx;
10899} Elf32_Ehdr;
10900typedef struct elf64_hdr {
10901 unsigned char e_ident[16];
10902 Elf64_Half e_type;
10903 Elf64_Half e_machine;
10904 Elf64_Word e_version;
10905 Elf64_Addr e_entry;
10906 Elf64_Off e_phoff;
10907 Elf64_Off e_shoff;
10908 Elf64_Word e_flags;
10909 Elf64_Half e_ehsize;
10910 Elf64_Half e_phentsize;
10911 Elf64_Half e_phnum;
10912 Elf64_Half e_shentsize;
10913 Elf64_Half e_shnum;
10914 Elf64_Half e_shstrndx;
10915} Elf64_Ehdr;
10916typedef struct elf32_phdr{
10917 Elf32_Word p_type;
10918 Elf32_Off p_offset;
10919 Elf32_Addr p_vaddr;
10920 Elf32_Addr p_paddr;
10921 Elf32_Word p_filesz;
10922 Elf32_Word p_memsz;
10923 Elf32_Word p_flags;
10924 Elf32_Word p_align;
10925} Elf32_Phdr;
10926typedef struct elf64_phdr {
10927 Elf64_Word p_type;
10928 Elf64_Word p_flags;
10929 Elf64_Off p_offset;
10930 Elf64_Addr p_vaddr;
10931 Elf64_Addr p_paddr;
10932 Elf64_Xword p_filesz;
10933 Elf64_Xword p_memsz;
10934 Elf64_Xword p_align;
10935} Elf64_Phdr;
10936typedef struct elf32_shdr {
10937 Elf32_Word sh_name;
10938 Elf32_Word sh_type;
10939 Elf32_Word sh_flags;
10940 Elf32_Addr sh_addr;
10941 Elf32_Off sh_offset;
10942 Elf32_Word sh_size;
10943 Elf32_Word sh_link;
10944 Elf32_Word sh_info;
10945 Elf32_Word sh_addralign;
10946 Elf32_Word sh_entsize;
10947} Elf32_Shdr;
10948typedef struct elf64_shdr {
10949 Elf64_Word sh_name;
10950 Elf64_Word sh_type;
10951 Elf64_Xword sh_flags;
10952 Elf64_Addr sh_addr;
10953 Elf64_Off sh_offset;
10954 Elf64_Xword sh_size;
10955 Elf64_Word sh_link;
10956 Elf64_Word sh_info;
10957 Elf64_Xword sh_addralign;
10958 Elf64_Xword sh_entsize;
10959} Elf64_Shdr;
10960typedef struct elf32_note {
10961 Elf32_Word n_namesz;
10962 Elf32_Word n_descsz;
10963 Elf32_Word n_type;
10964} Elf32_Nhdr;
10965typedef struct elf64_note {
10966 Elf64_Word n_namesz;
10967 Elf64_Word n_descsz;
10968 Elf64_Word n_type;
10969} Elf64_Nhdr;
10970extern Elf32_Dyn _DYNAMIC [];
10971static inline __attribute__((always_inline)) int elf_coredump_extra_notes_size(void) { return 0; }
10972static inline __attribute__((always_inline)) int elf_coredump_extra_notes_write(struct file *file,
10973 loff_t *foffset) { return 0; }
10974struct sock;
10975struct kobject;
10976enum kobj_ns_type {
10977 KOBJ_NS_TYPE_NONE = 0,
10978 KOBJ_NS_TYPE_NET,
10979 KOBJ_NS_TYPES
10980};
10981struct kobj_ns_type_operations {
10982 enum kobj_ns_type type;
10983 void *(*grab_current_ns)(void);
10984 const void *(*netlink_ns)(struct sock *sk);
10985 const void *(*initial_ns)(void);
10986 void (*drop_ns)(void *);
10987};
10988int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
10989int kobj_ns_type_registered(enum kobj_ns_type type);
10990const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
10991const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
10992void *kobj_ns_grab_current(enum kobj_ns_type type);
10993const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
10994const void *kobj_ns_initial(enum kobj_ns_type type);
10995void kobj_ns_drop(enum kobj_ns_type type, void *ns);
10996struct kobject;
10997struct module;
10998enum kobj_ns_type;
10999struct attribute {
11000 const char *name;
11001 mode_t mode;
11002 struct lock_class_key *key;
11003 struct lock_class_key skey;
11004};
11005struct attribute_group {
11006 const char *name;
11007 mode_t (*is_visible)(struct kobject *,
11008 struct attribute *, int);
11009 struct attribute **attrs;
11010};
11011struct file;
11012struct vm_area_struct;
11013struct bin_attribute {
11014 struct attribute attr;
11015 size_t size;
11016 void *private;
11017 ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *,
11018 char *, loff_t, size_t);
11019 ssize_t (*write)(struct file *,struct kobject *, struct bin_attribute *,
11020 char *, loff_t, size_t);
11021 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
11022 struct vm_area_struct *vma);
11023};
11024struct sysfs_ops {
11025 ssize_t (*show)(struct kobject *, struct attribute *,char *);
11026 ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
11027};
11028struct sysfs_dirent;
11029int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *),
11030 void *data, struct module *owner);
11031int __attribute__((warn_unused_result)) sysfs_create_dir(struct kobject *kobj);
11032void sysfs_remove_dir(struct kobject *kobj);
11033int __attribute__((warn_unused_result)) sysfs_rename_dir(struct kobject *kobj, const char *new_name);
11034int __attribute__((warn_unused_result)) sysfs_move_dir(struct kobject *kobj,
11035 struct kobject *new_parent_kobj);
11036int __attribute__((warn_unused_result)) sysfs_create_file(struct kobject *kobj,
11037 const struct attribute *attr);
11038int __attribute__((warn_unused_result)) sysfs_create_files(struct kobject *kobj,
11039 const struct attribute **attr);
11040int __attribute__((warn_unused_result)) sysfs_chmod_file(struct kobject *kobj,
11041 const struct attribute *attr, mode_t mode);
11042void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr);
11043void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr);
11044int __attribute__((warn_unused_result)) sysfs_create_bin_file(struct kobject *kobj,
11045 const struct bin_attribute *attr);
11046void sysfs_remove_bin_file(struct kobject *kobj,
11047 const struct bin_attribute *attr);
11048int __attribute__((warn_unused_result)) sysfs_create_link(struct kobject *kobj, struct kobject *target,
11049 const char *name);
11050int __attribute__((warn_unused_result)) sysfs_create_link_nowarn(struct kobject *kobj,
11051 struct kobject *target,
11052 const char *name);
11053void sysfs_remove_link(struct kobject *kobj, const char *name);
11054int sysfs_rename_link(struct kobject *kobj, struct kobject *target,
11055 const char *old_name, const char *new_name);
11056void sysfs_delete_link(struct kobject *dir, struct kobject *targ,
11057 const char *name);
11058int __attribute__((warn_unused_result)) sysfs_create_group(struct kobject *kobj,
11059 const struct attribute_group *grp);
11060int sysfs_update_group(struct kobject *kobj,
11061 const struct attribute_group *grp);
11062void sysfs_remove_group(struct kobject *kobj,
11063 const struct attribute_group *grp);
11064int sysfs_add_file_to_group(struct kobject *kobj,
11065 const struct attribute *attr, const char *group);
11066void sysfs_remove_file_from_group(struct kobject *kobj,
11067 const struct attribute *attr, const char *group);
11068int sysfs_merge_group(struct kobject *kobj,
11069 const struct attribute_group *grp);
11070void sysfs_unmerge_group(struct kobject *kobj,
11071 const struct attribute_group *grp);
11072void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);
11073void sysfs_notify_dirent(struct sysfs_dirent *sd);
11074struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
11075 const void *ns,
11076 const unsigned char *name);
11077struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd);
11078void sysfs_put(struct sysfs_dirent *sd);
11079int __attribute__((warn_unused_result)) sysfs_init(void);
11080struct kref {
11081 atomic_t refcount;
11082};
11083void kref_init(struct kref *kref);
11084void kref_get(struct kref *kref);
11085int kref_put(struct kref *kref, void (*release) (struct kref *kref));
11086int kref_sub(struct kref *kref, unsigned int count,
11087 void (*release) (struct kref *kref));
11088extern char uevent_helper[];
11089extern u64 uevent_seqnum;
11090enum kobject_action {
11091 KOBJ_ADD,
11092 KOBJ_REMOVE,
11093 KOBJ_CHANGE,
11094 KOBJ_MOVE,
11095 KOBJ_ONLINE,
11096 KOBJ_OFFLINE,
11097 KOBJ_MAX
11098};
11099struct kobject {
11100 const char *name;
11101 struct list_head entry;
11102 struct kobject *parent;
11103 struct kset *kset;
11104 struct kobj_type *ktype;
11105 struct sysfs_dirent *sd;
11106 struct kref kref;
11107 unsigned int state_initialized:1;
11108 unsigned int state_in_sysfs:1;
11109 unsigned int state_add_uevent_sent:1;
11110 unsigned int state_remove_uevent_sent:1;
11111 unsigned int uevent_suppress:1;
11112};
11113extern int kobject_set_name(struct kobject *kobj, const char *name, ...)
11114 __attribute__((format(printf, 2, 3)));
11115extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
11116 va_list vargs);
11117static inline __attribute__((always_inline)) const char *kobject_name(const struct kobject *kobj)
11118{
11119 return kobj->name;
11120}
11121extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype);
11122extern int __attribute__((warn_unused_result)) kobject_add(struct kobject *kobj,
11123 struct kobject *parent,
11124 const char *fmt, ...)
11125 __attribute__((format(printf, 3, 4)));
11126extern int __attribute__((warn_unused_result)) kobject_init_and_add(struct kobject *kobj,
11127 struct kobj_type *ktype,
11128 struct kobject *parent,
11129 const char *fmt, ...)
11130 __attribute__((format(printf, 4, 5)));
11131extern void kobject_del(struct kobject *kobj);
11132extern struct kobject * __attribute__((warn_unused_result)) kobject_create(void);
11133extern struct kobject * __attribute__((warn_unused_result)) kobject_create_and_add(const char *name,
11134 struct kobject *parent);
11135extern int __attribute__((warn_unused_result)) kobject_rename(struct kobject *, const char *new_name);
11136extern int __attribute__((warn_unused_result)) kobject_move(struct kobject *, struct kobject *);
11137extern struct kobject *kobject_get(struct kobject *kobj);
11138extern void kobject_put(struct kobject *kobj);
11139extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
11140struct kobj_type {
11141 void (*release)(struct kobject *kobj);
11142 const struct sysfs_ops *sysfs_ops;
11143 struct attribute **default_attrs;
11144 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
11145 const void *(*namespace)(struct kobject *kobj);
11146};
11147struct kobj_uevent_env {
11148 char *envp[32];
11149 int envp_idx;
11150 char buf[2048];
11151 int buflen;
11152};
11153struct kset_uevent_ops {
11154 int (* const filter)(struct kset *kset, struct kobject *kobj);
11155 const char *(* const name)(struct kset *kset, struct kobject *kobj);
11156 int (* const uevent)(struct kset *kset, struct kobject *kobj,
11157 struct kobj_uevent_env *env);
11158};
11159struct kobj_attribute {
11160 struct attribute attr;
11161 ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
11162 char *buf);
11163 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
11164 const char *buf, size_t count);
11165};
11166extern const struct sysfs_ops kobj_sysfs_ops;
11167struct sock;
11168struct kset {
11169 struct list_head list;
11170 spinlock_t list_lock;
11171 struct kobject kobj;
11172 const struct kset_uevent_ops *uevent_ops;
11173};
11174extern void kset_init(struct kset *kset);
11175extern int __attribute__((warn_unused_result)) kset_register(struct kset *kset);
11176extern void kset_unregister(struct kset *kset);
11177extern struct kset * __attribute__((warn_unused_result)) kset_create_and_add(const char *name,
11178 const struct kset_uevent_ops *u,
11179 struct kobject *parent_kobj);
11180static inline __attribute__((always_inline)) struct kset *to_kset(struct kobject *kobj)
11181{
11182 return kobj ? ({ const typeof( ((struct kset *)0)->kobj ) *__mptr = (kobj); (struct kset *)( (char *)__mptr - __builtin_offsetof(struct kset,kobj) );}) : ((void *)0);
11183}
11184static inline __attribute__((always_inline)) struct kset *kset_get(struct kset *k)
11185{
11186 return k ? to_kset(kobject_get(&k->kobj)) : ((void *)0);
11187}
11188static inline __attribute__((always_inline)) void kset_put(struct kset *k)
11189{
11190 kobject_put(&k->kobj);
11191}
11192static inline __attribute__((always_inline)) struct kobj_type *get_ktype(struct kobject *kobj)
11193{
11194 return kobj->ktype;
11195}
11196extern struct kobject *kset_find_obj(struct kset *, const char *);
11197extern struct kobject *kset_find_obj_hinted(struct kset *, const char *,
11198 struct kobject *);
11199extern struct kobject *kernel_kobj;
11200extern struct kobject *mm_kobj;
11201extern struct kobject *hypervisor_kobj;
11202extern struct kobject *power_kobj;
11203extern struct kobject *firmware_kobj;
11204int kobject_uevent(struct kobject *kobj, enum kobject_action action);
11205int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
11206 char *envp[]);
11207int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
11208 __attribute__((format (printf, 2, 3)));
11209int kobject_action_type(const char *buf, size_t count,
11210 enum kobject_action *type);
11211struct kernel_param;
11212struct kernel_param_ops {
11213 int (*set)(const char *val, const struct kernel_param *kp);
11214 int (*get)(char *buffer, const struct kernel_param *kp);
11215 void (*free)(void *arg);
11216};
11217struct kernel_param {
11218 const char *name;
11219 const struct kernel_param_ops *ops;
11220 u16 perm;
11221 u16 flags;
11222 union {
11223 void *arg;
11224 const struct kparam_string *str;
11225 const struct kparam_array *arr;
11226 };
11227};
11228struct kparam_string {
11229 unsigned int maxlen;
11230 char *string;
11231};
11232struct kparam_array
11233{
11234 unsigned int max;
11235 unsigned int elemsize;
11236 unsigned int *num;
11237 const struct kernel_param_ops *ops;
11238 void *elem;
11239};
11240static inline __attribute__((always_inline)) int
11241__check_old_set_param(int (*oldset)(const char *, struct kernel_param *))
11242{
11243 return 0;
11244}
11245extern void __kernel_param_lock(void);
11246extern void __kernel_param_unlock(void);
11247extern int parse_args(const char *name,
11248 char *args,
11249 const struct kernel_param *params,
11250 unsigned num,
11251 int (*unknown)(char *param, char *val));
11252extern void destroy_params(const struct kernel_param *params, unsigned num);
11253extern struct kernel_param_ops param_ops_byte;
11254extern int param_set_byte(const char *val, const struct kernel_param *kp);
11255extern int param_get_byte(char *buffer, const struct kernel_param *kp);
11256extern struct kernel_param_ops param_ops_short;
11257extern int param_set_short(const char *val, const struct kernel_param *kp);
11258extern int param_get_short(char *buffer, const struct kernel_param *kp);
11259extern struct kernel_param_ops param_ops_ushort;
11260extern int param_set_ushort(const char *val, const struct kernel_param *kp);
11261extern int param_get_ushort(char *buffer, const struct kernel_param *kp);
11262extern struct kernel_param_ops param_ops_int;
11263extern int param_set_int(const char *val, const struct kernel_param *kp);
11264extern int param_get_int(char *buffer, const struct kernel_param *kp);
11265extern struct kernel_param_ops param_ops_uint;
11266extern int param_set_uint(const char *val, const struct kernel_param *kp);
11267extern int param_get_uint(char *buffer, const struct kernel_param *kp);
11268extern struct kernel_param_ops param_ops_long;
11269extern int param_set_long(const char *val, const struct kernel_param *kp);
11270extern int param_get_long(char *buffer, const struct kernel_param *kp);
11271extern struct kernel_param_ops param_ops_ulong;
11272extern int param_set_ulong(const char *val, const struct kernel_param *kp);
11273extern int param_get_ulong(char *buffer, const struct kernel_param *kp);
11274extern struct kernel_param_ops param_ops_charp;
11275extern int param_set_charp(const char *val, const struct kernel_param *kp);
11276extern int param_get_charp(char *buffer, const struct kernel_param *kp);
11277extern struct kernel_param_ops param_ops_bool;
11278extern int param_set_bool(const char *val, const struct kernel_param *kp);
11279extern int param_get_bool(char *buffer, const struct kernel_param *kp);
11280extern struct kernel_param_ops param_ops_invbool;
11281extern int param_set_invbool(const char *val, const struct kernel_param *kp);
11282extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
11283extern struct kernel_param_ops param_array_ops;
11284extern struct kernel_param_ops param_ops_string;
11285extern int param_set_copystring(const char *val, const struct kernel_param *);
11286extern int param_get_string(char *buffer, const struct kernel_param *kp);
11287struct module;
11288extern int module_param_sysfs_setup(struct module *mod,
11289 const struct kernel_param *kparam,
11290 unsigned int num_params);
11291extern void module_param_sysfs_remove(struct module *mod);
11292struct jump_label_key {
11293 atomic_t enabled;
11294 struct jump_entry *entries;
11295 struct jump_label_mod *next;
11296};
11297static inline __attribute__((always_inline)) __attribute__((always_inline)) bool arch_static_branch(struct jump_label_key *key)
11298{
11299 asm goto("1:"
11300 ".byte 0xe9 \n\t .long 0\n\t"
11301 ".pushsection __jump_table, \"aw\" \n\t"
11302 " " ".balign 4" " " "\n\t"
11303 " " ".long" " " "1b, %l[l_yes], %c0 \n\t"
11304 ".popsection \n\t"
11305 : : "i" (key) : : l_yes);
11306 return false;
11307l_yes:
11308 return true;
11309}
11310typedef u32 jump_label_t;
11311struct jump_entry {
11312 jump_label_t code;
11313 jump_label_t target;
11314 jump_label_t key;
11315};
11316enum jump_label_type {
11317 JUMP_LABEL_DISABLE = 0,
11318 JUMP_LABEL_ENABLE,
11319};
11320struct module;
11321static inline __attribute__((always_inline)) __attribute__((always_inline)) bool static_branch(struct jump_label_key *key)
11322{
11323 return arch_static_branch(key);
11324}
11325extern struct jump_entry __start___jump_table[];
11326extern struct jump_entry __stop___jump_table[];
11327extern void jump_label_lock(void);
11328extern void jump_label_unlock(void);
11329extern void arch_jump_label_transform(struct jump_entry *entry,
11330 enum jump_label_type type);
11331extern void arch_jump_label_text_poke_early(jump_label_t addr);
11332extern int jump_label_text_reserved(void *start, void *end);
11333extern void jump_label_inc(struct jump_label_key *key);
11334extern void jump_label_dec(struct jump_label_key *key);
11335extern bool jump_label_enabled(struct jump_label_key *key);
11336extern void jump_label_apply_nops(struct module *mod);
11337struct module;
11338struct tracepoint;
11339struct tracepoint_func {
11340 void *func;
11341 void *data;
11342};
11343struct tracepoint {
11344 const char *name;
11345 struct jump_label_key key;
11346 void (*regfunc)(void);
11347 void (*unregfunc)(void);
11348 struct tracepoint_func *funcs;
11349};
11350extern int tracepoint_probe_register(const char *name, void *probe, void *data);
11351extern int
11352tracepoint_probe_unregister(const char *name, void *probe, void *data);
11353extern int tracepoint_probe_register_noupdate(const char *name, void *probe,
11354 void *data);
11355extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
11356 void *data);
11357extern void tracepoint_probe_update_all(void);
11358struct tracepoint_iter {
11359 struct module *module;
11360 struct tracepoint * const *tracepoint;
11361};
11362extern void tracepoint_iter_start(struct tracepoint_iter *iter);
11363extern void tracepoint_iter_next(struct tracepoint_iter *iter);
11364extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
11365extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
11366extern int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
11367 struct tracepoint * const *begin, struct tracepoint * const *end);
11368static inline __attribute__((always_inline)) void tracepoint_synchronize_unregister(void)
11369{
11370 synchronize_sched();
11371}
11372extern
11373void tracepoint_update_probe_range(struct tracepoint * const *begin,
11374 struct tracepoint * const *end);
11375struct mod_arch_specific
11376{
11377};
11378struct module;
11379extern struct tracepoint
11380 __tracepoint_module_load
11381 ; static inline __attribute__((always_inline)) void
11382 trace_module_load
11383 (struct module *mod) { if (__builtin_constant_p(((static_branch(&__tracepoint_module_load.key)))) ? !!((static_branch(&__tracepoint_module_load.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11384 "include/trace/events/module.h"
11385 , .line =
11386 45
11387 , }; ______r = !!((static_branch(&__tracepoint_module_load.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11388 "include/trace/events/module.h"
11389 , .line =
11390 45
11391 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_module_load)->funcs)) *_________p1 = (typeof(*((&__tracepoint_module_load)->funcs))* )(*(volatile typeof(((&__tracepoint_module_load)->funcs)) *)&(((&__tracepoint_module_load)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_module_load)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11392 "include/trace/events/module.h"
11393 , .line =
11394 45
11395 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, struct module *mod))(it_func))(__data, mod); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
11396 register_trace_module_load
11397 (void (*probe)(void *__data, struct module *mod), void *data) { return tracepoint_probe_register("module_load", (void *)probe, data); } static inline __attribute__((always_inline)) int
11398 unregister_trace_module_load
11399 (void (*probe)(void *__data, struct module *mod), void *data) { return tracepoint_probe_unregister("module_load", (void *)probe, data); } static inline __attribute__((always_inline)) void
11400 check_trace_callback_type_module_load
11401 (void (*cb)(void *__data, struct module *mod)) { }
11402 ;
11403extern struct tracepoint
11404 __tracepoint_module_free
11405 ; static inline __attribute__((always_inline)) void
11406 trace_module_free
11407 (struct module *mod) { if (__builtin_constant_p(((static_branch(&__tracepoint_module_free.key)))) ? !!((static_branch(&__tracepoint_module_free.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11408 "include/trace/events/module.h"
11409 , .line =
11410 62
11411 , }; ______r = !!((static_branch(&__tracepoint_module_free.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11412 "include/trace/events/module.h"
11413 , .line =
11414 62
11415 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_module_free)->funcs)) *_________p1 = (typeof(*((&__tracepoint_module_free)->funcs))* )(*(volatile typeof(((&__tracepoint_module_free)->funcs)) *)&(((&__tracepoint_module_free)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_module_free)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11416 "include/trace/events/module.h"
11417 , .line =
11418 62
11419 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, struct module *mod))(it_func))(__data, mod); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
11420 register_trace_module_free
11421 (void (*probe)(void *__data, struct module *mod), void *data) { return tracepoint_probe_register("module_free", (void *)probe, data); } static inline __attribute__((always_inline)) int
11422 unregister_trace_module_free
11423 (void (*probe)(void *__data, struct module *mod), void *data) { return tracepoint_probe_unregister("module_free", (void *)probe, data); } static inline __attribute__((always_inline)) void
11424 check_trace_callback_type_module_free
11425 (void (*cb)(void *__data, struct module *mod)) { }
11426 ;
11427 ;
11428extern struct tracepoint
11429 __tracepoint_module_get
11430 ; static inline __attribute__((always_inline)) void
11431 trace_module_get
11432 (struct module *mod, unsigned long ip) { if (__builtin_constant_p(((static_branch(&__tracepoint_module_get.key)))) ? !!((static_branch(&__tracepoint_module_get.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11433 "include/trace/events/module.h"
11434 , .line =
11435 94
11436 , }; ______r = !!((static_branch(&__tracepoint_module_get.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11437 "include/trace/events/module.h"
11438 , .line =
11439 94
11440 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_module_get)->funcs)) *_________p1 = (typeof(*((&__tracepoint_module_get)->funcs))* )(*(volatile typeof(((&__tracepoint_module_get)->funcs)) *)&(((&__tracepoint_module_get)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_module_get)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11441 "include/trace/events/module.h"
11442 , .line =
11443 94
11444 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, struct module *mod, unsigned long ip))(it_func))(__data, mod, ip); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
11445 register_trace_module_get
11446 (void (*probe)(void *__data, struct module *mod, unsigned long ip), void *data) { return tracepoint_probe_register("module_get", (void *)probe, data); } static inline __attribute__((always_inline)) int
11447 unregister_trace_module_get
11448 (void (*probe)(void *__data, struct module *mod, unsigned long ip), void *data) { return tracepoint_probe_unregister("module_get", (void *)probe, data); } static inline __attribute__((always_inline)) void
11449 check_trace_callback_type_module_get
11450 (void (*cb)(void *__data, struct module *mod, unsigned long ip)) { }
11451 ;
11452extern struct tracepoint
11453 __tracepoint_module_put
11454 ; static inline __attribute__((always_inline)) void
11455 trace_module_put
11456 (struct module *mod, unsigned long ip) { if (__builtin_constant_p(((static_branch(&__tracepoint_module_put.key)))) ? !!((static_branch(&__tracepoint_module_put.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11457 "include/trace/events/module.h"
11458 , .line =
11459 101
11460 , }; ______r = !!((static_branch(&__tracepoint_module_put.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11461 "include/trace/events/module.h"
11462 , .line =
11463 101
11464 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_module_put)->funcs)) *_________p1 = (typeof(*((&__tracepoint_module_put)->funcs))* )(*(volatile typeof(((&__tracepoint_module_put)->funcs)) *)&(((&__tracepoint_module_put)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_module_put)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11465 "include/trace/events/module.h"
11466 , .line =
11467 101
11468 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, struct module *mod, unsigned long ip))(it_func))(__data, mod, ip); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
11469 register_trace_module_put
11470 (void (*probe)(void *__data, struct module *mod, unsigned long ip), void *data) { return tracepoint_probe_register("module_put", (void *)probe, data); } static inline __attribute__((always_inline)) int
11471 unregister_trace_module_put
11472 (void (*probe)(void *__data, struct module *mod, unsigned long ip), void *data) { return tracepoint_probe_unregister("module_put", (void *)probe, data); } static inline __attribute__((always_inline)) void
11473 check_trace_callback_type_module_put
11474 (void (*cb)(void *__data, struct module *mod, unsigned long ip)) { }
11475 ;
11476extern struct tracepoint
11477 __tracepoint_module_request
11478 ; static inline __attribute__((always_inline)) void
11479 trace_module_request
11480 (char *name, bool wait, unsigned long ip) { if (__builtin_constant_p(((static_branch(&__tracepoint_module_request.key)))) ? !!((static_branch(&__tracepoint_module_request.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11481 "include/trace/events/module.h"
11482 , .line =
11483 124
11484 , }; ______r = !!((static_branch(&__tracepoint_module_request.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11485 "include/trace/events/module.h"
11486 , .line =
11487 124
11488 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_module_request)->funcs)) *_________p1 = (typeof(*((&__tracepoint_module_request)->funcs))* )(*(volatile typeof(((&__tracepoint_module_request)->funcs)) *)&(((&__tracepoint_module_request)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_module_request)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
11489 "include/trace/events/module.h"
11490 , .line =
11491 124
11492 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, char *name, bool wait, unsigned long ip))(it_func))(__data, name, wait, ip); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
11493 register_trace_module_request
11494 (void (*probe)(void *__data, char *name, bool wait, unsigned long ip), void *data) { return tracepoint_probe_register("module_request", (void *)probe, data); } static inline __attribute__((always_inline)) int
11495 unregister_trace_module_request
11496 (void (*probe)(void *__data, char *name, bool wait, unsigned long ip), void *data) { return tracepoint_probe_unregister("module_request", (void *)probe, data); } static inline __attribute__((always_inline)) void
11497 check_trace_callback_type_module_request
11498 (void (*cb)(void *__data, char *name, bool wait, unsigned long ip)) { }
11499 ;
11500struct kernel_symbol
11501{
11502 unsigned long value;
11503 const char *name;
11504};
11505struct modversion_info
11506{
11507 unsigned long crc;
11508 char name[(64 - sizeof(unsigned long))];
11509};
11510struct module;
11511struct module_attribute {
11512 struct attribute attr;
11513 ssize_t (*show)(struct module_attribute *, struct module *, char *);
11514 ssize_t (*store)(struct module_attribute *, struct module *,
11515 const char *, size_t count);
11516 void (*setup)(struct module *, const char *);
11517 int (*test)(struct module *);
11518 void (*free)(struct module *);
11519};
11520struct module_version_attribute {
11521 struct module_attribute mattr;
11522 const char *module_name;
11523 const char *version;
11524} __attribute__ ((__aligned__(sizeof(void *))));
11525extern ssize_t __modver_version_show(struct module_attribute *,
11526 struct module *, char *);
11527struct module_kobject
11528{
11529 struct kobject kobj;
11530 struct module *mod;
11531 struct kobject *drivers_dir;
11532 struct module_param_attrs *mp;
11533};
11534extern int init_module(void);
11535extern void cleanup_module(void);
11536struct exception_table_entry;
11537const struct exception_table_entry *
11538search_extable(const struct exception_table_entry *first,
11539 const struct exception_table_entry *last,
11540 unsigned long value);
11541void sort_extable(struct exception_table_entry *start,
11542 struct exception_table_entry *finish);
11543void sort_main_extable(void);
11544void trim_init_extable(struct module *m);
11545extern struct module __this_module;
11546const struct exception_table_entry *search_exception_tables(unsigned long add);
11547struct notifier_block;
11548extern int modules_disabled;
11549void *__symbol_get(const char *symbol);
11550void *__symbol_get_gpl(const char *symbol);
11551struct module_use {
11552 struct list_head source_list;
11553 struct list_head target_list;
11554 struct module *source, *target;
11555};
11556enum module_state
11557{
11558 MODULE_STATE_LIVE,
11559 MODULE_STATE_COMING,
11560 MODULE_STATE_GOING,
11561};
11562struct module
11563{
11564 enum module_state state;
11565 struct list_head list;
11566 char name[(64 - sizeof(unsigned long))];
11567 struct module_kobject mkobj;
11568 struct module_attribute *modinfo_attrs;
11569 const char *version;
11570 const char *srcversion;
11571 struct kobject *holders_dir;
11572 const struct kernel_symbol *syms;
11573 const unsigned long *crcs;
11574 unsigned int num_syms;
11575 struct kernel_param *kp;
11576 unsigned int num_kp;
11577 unsigned int num_gpl_syms;
11578 const struct kernel_symbol *gpl_syms;
11579 const unsigned long *gpl_crcs;
11580 const struct kernel_symbol *unused_syms;
11581 const unsigned long *unused_crcs;
11582 unsigned int num_unused_syms;
11583 unsigned int num_unused_gpl_syms;
11584 const struct kernel_symbol *unused_gpl_syms;
11585 const unsigned long *unused_gpl_crcs;
11586 const struct kernel_symbol *gpl_future_syms;
11587 const unsigned long *gpl_future_crcs;
11588 unsigned int num_gpl_future_syms;
11589 unsigned int num_exentries;
11590 struct exception_table_entry *extable;
11591 int (*init)(void);
11592 void *module_init;
11593 void *module_core;
11594 unsigned int init_size, core_size;
11595 unsigned int init_text_size, core_text_size;
11596 unsigned int init_ro_size, core_ro_size;
11597 struct mod_arch_specific arch;
11598 unsigned int taints;
11599 unsigned num_bugs;
11600 struct list_head bug_list;
11601 struct bug_entry *bug_table;
11602 Elf32_Sym *symtab, *core_symtab;
11603 unsigned int num_symtab, core_num_syms;
11604 char *strtab, *core_strtab;
11605 struct module_sect_attrs *sect_attrs;
11606 struct module_notes_attrs *notes_attrs;
11607 char *args;
11608 void *percpu;
11609 unsigned int percpu_size;
11610 unsigned int num_tracepoints;
11611 struct tracepoint * const *tracepoints_ptrs;
11612 struct jump_entry *jump_entries;
11613 unsigned int num_jump_entries;
11614 unsigned int num_trace_bprintk_fmt;
11615 const char **trace_bprintk_fmt_start;
11616 struct ftrace_event_call **trace_events;
11617 unsigned int num_trace_events;
11618 unsigned int num_ftrace_callsites;
11619 unsigned long *ftrace_callsites;
11620 struct list_head source_list;
11621 struct list_head target_list;
11622 struct task_struct *waiter;
11623 void (*exit)(void);
11624 struct module_ref {
11625 unsigned int incs;
11626 unsigned int decs;
11627 } *refptr;
11628};
11629extern struct mutex module_mutex;
11630static inline __attribute__((always_inline)) int module_is_live(struct module *mod)
11631{
11632 return mod->state != MODULE_STATE_GOING;
11633}
11634struct module *__module_text_address(unsigned long addr);
11635struct module *__module_address(unsigned long addr);
11636bool is_module_address(unsigned long addr);
11637bool is_module_percpu_address(unsigned long addr);
11638bool is_module_text_address(unsigned long addr);
11639static inline __attribute__((always_inline)) int within_module_core(unsigned long addr, struct module *mod)
11640{
11641 return (unsigned long)mod->module_core <= addr &&
11642 addr < (unsigned long)mod->module_core + mod->core_size;
11643}
11644static inline __attribute__((always_inline)) int within_module_init(unsigned long addr, struct module *mod)
11645{
11646 return (unsigned long)mod->module_init <= addr &&
11647 addr < (unsigned long)mod->module_init + mod->init_size;
11648}
11649struct module *find_module(const char *name);
11650struct symsearch {
11651 const struct kernel_symbol *start, *stop;
11652 const unsigned long *crcs;
11653 enum {
11654 NOT_GPL_ONLY,
11655 GPL_ONLY,
11656 WILL_BE_GPL_ONLY,
11657 } licence;
11658 bool unused;
11659};
11660const struct kernel_symbol *find_symbol(const char *name,
11661 struct module **owner,
11662 const unsigned long **crc,
11663 bool gplok,
11664 bool warn);
11665bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
11666 struct module *owner,
11667 void *data), void *data);
11668int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
11669 char *name, char *module_name, int *exported);
11670unsigned long module_kallsyms_lookup_name(const char *name);
11671int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
11672 struct module *, unsigned long),
11673 void *data);
11674extern void __module_put_and_exit(struct module *mod, long code)
11675 __attribute__((noreturn));
11676unsigned int module_refcount(struct module *mod);
11677void __symbol_put(const char *symbol);
11678void symbol_put_addr(void *addr);
11679static inline __attribute__((always_inline)) void __module_get(struct module *module)
11680{
11681 if (__builtin_constant_p(((module))) ? !!((module)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 510, }; ______r = !!((module)); ______f.miss_hit[______r]++; ______r; })) {
11682 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
11683 do { do { const void *__vpp_verify = (typeof(&(((module->refptr->incs)))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((module->refptr->incs)))) { case 1: do { typedef typeof((((module->refptr->incs)))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((module->refptr->incs))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((((module->refptr->incs)))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((module->refptr->incs))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((((module->refptr->incs)))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((module->refptr->incs))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 512, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((((module->refptr->incs))))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((((module->refptr->incs)))))); (typeof(*(&((((module->refptr->incs)))))) *)tcp_ptr__; }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
11684 trace_module_get(module, ({ __label__ __here; __here: (unsigned long)&&__here; }));
11685 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 514, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 514, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 514, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 514, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
11686 }
11687}
11688static inline __attribute__((always_inline)) int try_module_get(struct module *module)
11689{
11690 int ret = 1;
11691 if (__builtin_constant_p(((module))) ? !!((module)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 522, }; ______r = !!((module)); ______f.miss_hit[______r]++; ______r; })) {
11692 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
11693 if (__builtin_constant_p((((__builtin_constant_p(module_is_live(module)) ? !!(module_is_live(module)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 525, }; ______r = __builtin_expect(!!(module_is_live(module)), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(module_is_live(module)) ? !!(module_is_live(module)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 525, }; ______r = __builtin_expect(!!(module_is_live(module)), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 525, }; ______r = !!(((__builtin_constant_p(module_is_live(module)) ? !!(module_is_live(module)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 525, }; ______r = __builtin_expect(!!(module_is_live(module)), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; })) {
11694 do { do { const void *__vpp_verify = (typeof(&(((module->refptr->incs)))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((module->refptr->incs)))) { case 1: do { typedef typeof((((module->refptr->incs)))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((module->refptr->incs))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((((module->refptr->incs)))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((module->refptr->incs))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((((module->refptr->incs)))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((module->refptr->incs))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 526, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((module->refptr->incs)))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((((module->refptr->incs))))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((((module->refptr->incs)))))); (typeof(*(&((((module->refptr->incs)))))) *)tcp_ptr__; }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
11695 trace_module_get(module, ({ __label__ __here; __here: (unsigned long)&&__here; }));
11696 } else
11697 ret = 0;
11698 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 531, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 531, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 531, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/module.h", .line = 531, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
11699 }
11700 return ret;
11701}
11702extern void module_put(struct module *module);
11703int ref_module(struct module *a, struct module *b);
11704const char *module_address_lookup(unsigned long addr,
11705 unsigned long *symbolsize,
11706 unsigned long *offset,
11707 char **modname,
11708 char *namebuf);
11709int lookup_module_symbol_name(unsigned long addr, char *symname);
11710int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
11711const struct exception_table_entry *search_module_extables(unsigned long addr);
11712int register_module_notifier(struct notifier_block * nb);
11713int unregister_module_notifier(struct notifier_block * nb);
11714extern void print_modules(void);
11715extern void module_update_tracepoints(void);
11716extern int module_get_iter_tracepoints(struct tracepoint_iter *iter);
11717extern struct kset *module_kset;
11718extern struct kobj_type module_ktype;
11719extern int module_sysfs_initialized;
11720static inline __attribute__((always_inline)) void set_all_modules_text_rw(void) { }
11721static inline __attribute__((always_inline)) void set_all_modules_text_ro(void) { }
11722void module_bug_finalize(const Elf32_Ehdr *, const Elf32_Shdr *,
11723 struct module *);
11724void module_bug_cleanup(struct module *);
11725void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) kmem_cache_init(void);
11726int slab_is_available(void);
11727struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
11728 unsigned long,
11729 void (*)(void *));
11730void kmem_cache_destroy(struct kmem_cache *);
11731int kmem_cache_shrink(struct kmem_cache *);
11732void kmem_cache_free(struct kmem_cache *, void *);
11733unsigned int kmem_cache_size(struct kmem_cache *);
11734void * __attribute__((warn_unused_result)) __krealloc(const void *, size_t, gfp_t);
11735void * __attribute__((warn_unused_result)) krealloc(const void *, size_t, gfp_t);
11736void kfree(const void *);
11737void kzfree(const void *);
11738size_t ksize(const void *);
11739static inline __attribute__((always_inline)) void kmemleak_init(void)
11740{
11741}
11742static inline __attribute__((always_inline)) void kmemleak_alloc(const void *ptr, size_t size, int min_count,
11743 gfp_t gfp)
11744{
11745}
11746static inline __attribute__((always_inline)) void kmemleak_alloc_recursive(const void *ptr, size_t size,
11747 int min_count, unsigned long flags,
11748 gfp_t gfp)
11749{
11750}
11751static inline __attribute__((always_inline)) void kmemleak_free(const void *ptr)
11752{
11753}
11754static inline __attribute__((always_inline)) void kmemleak_free_part(const void *ptr, size_t size)
11755{
11756}
11757static inline __attribute__((always_inline)) void kmemleak_free_recursive(const void *ptr, unsigned long flags)
11758{
11759}
11760static inline __attribute__((always_inline)) void kmemleak_not_leak(const void *ptr)
11761{
11762}
11763static inline __attribute__((always_inline)) void kmemleak_ignore(const void *ptr)
11764{
11765}
11766static inline __attribute__((always_inline)) void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
11767{
11768}
11769static inline __attribute__((always_inline)) void kmemleak_erase(void **ptr)
11770{
11771}
11772static inline __attribute__((always_inline)) void kmemleak_no_scan(const void *ptr)
11773{
11774}
11775enum stat_item {
11776 ALLOC_FASTPATH,
11777 ALLOC_SLOWPATH,
11778 FREE_FASTPATH,
11779 FREE_SLOWPATH,
11780 FREE_FROZEN,
11781 FREE_ADD_PARTIAL,
11782 FREE_REMOVE_PARTIAL,
11783 ALLOC_FROM_PARTIAL,
11784 ALLOC_SLAB,
11785 ALLOC_REFILL,
11786 FREE_SLAB,
11787 CPUSLAB_FLUSH,
11788 DEACTIVATE_FULL,
11789 DEACTIVATE_EMPTY,
11790 DEACTIVATE_TO_HEAD,
11791 DEACTIVATE_TO_TAIL,
11792 DEACTIVATE_REMOTE_FREES,
11793 ORDER_FALLBACK,
11794 CMPXCHG_DOUBLE_CPU_FAIL,
11795 NR_SLUB_STAT_ITEMS };
11796struct kmem_cache_cpu {
11797 void **freelist;
11798 unsigned long tid;
11799 struct page *page;
11800 int node;
11801};
11802struct kmem_cache_node {
11803 spinlock_t list_lock;
11804 unsigned long nr_partial;
11805 struct list_head partial;
11806 atomic_long_t nr_slabs;
11807 atomic_long_t total_objects;
11808 struct list_head full;
11809};
11810struct kmem_cache_order_objects {
11811 unsigned long x;
11812};
11813struct kmem_cache {
11814 struct kmem_cache_cpu *cpu_slab;
11815 unsigned long flags;
11816 unsigned long min_partial;
11817 int size;
11818 int objsize;
11819 int offset;
11820 struct kmem_cache_order_objects oo;
11821 struct kmem_cache_order_objects max;
11822 struct kmem_cache_order_objects min;
11823 gfp_t allocflags;
11824 int refcount;
11825 void (*ctor)(void *);
11826 int inuse;
11827 int align;
11828 int reserved;
11829 const char *name;
11830 struct list_head list;
11831 struct kobject kobj;
11832 struct kmem_cache_node *node[(1 << 0)];
11833};
11834extern struct kmem_cache *kmalloc_caches[(12 + 2)];
11835static inline __attribute__((always_inline)) __attribute__((always_inline)) int kmalloc_index(size_t size)
11836{
11837 if (__builtin_constant_p(((!size))) ? !!((!size)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 158, }; ______r = !!((!size)); ______f.miss_hit[______r]++; ______r; }))
11838 return 0;
11839 if (__builtin_constant_p(((size <= 8))) ? !!((size <= 8)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 161, }; ______r = !!((size <= 8)); ______f.miss_hit[______r]++; ______r; }))
11840 return ( __builtin_constant_p(8) ? ( (8) < 1 ? ____ilog2_NaN() : (8) & (1ULL << 63) ? 63 : (8) & (1ULL << 62) ? 62 : (8) & (1ULL << 61) ? 61 : (8) & (1ULL << 60) ? 60 : (8) & (1ULL << 59) ? 59 : (8) & (1ULL << 58) ? 58 : (8) & (1ULL << 57) ? 57 : (8) & (1ULL << 56) ? 56 : (8) & (1ULL << 55) ? 55 : (8) & (1ULL << 54) ? 54 : (8) & (1ULL << 53) ? 53 : (8) & (1ULL << 52) ? 52 : (8) & (1ULL << 51) ? 51 : (8) & (1ULL << 50) ? 50 : (8) & (1ULL << 49) ? 49 : (8) & (1ULL << 48) ? 48 : (8) & (1ULL << 47) ? 47 : (8) & (1ULL << 46) ? 46 : (8) & (1ULL << 45) ? 45 : (8) & (1ULL << 44) ? 44 : (8) & (1ULL << 43) ? 43 : (8) & (1ULL << 42) ? 42 : (8) & (1ULL << 41) ? 41 : (8) & (1ULL << 40) ? 40 : (8) & (1ULL << 39) ? 39 : (8) & (1ULL << 38) ? 38 : (8) & (1ULL << 37) ? 37 : (8) & (1ULL << 36) ? 36 : (8) & (1ULL << 35) ? 35 : (8) & (1ULL << 34) ? 34 : (8) & (1ULL << 33) ? 33 : (8) & (1ULL << 32) ? 32 : (8) & (1ULL << 31) ? 31 : (8) & (1ULL << 30) ? 30 : (8) & (1ULL << 29) ? 29 : (8) & (1ULL << 28) ? 28 : (8) & (1ULL << 27) ? 27 : (8) & (1ULL << 26) ? 26 : (8) & (1ULL << 25) ? 25 : (8) & (1ULL << 24) ? 24 : (8) & (1ULL << 23) ? 23 : (8) & (1ULL << 22) ? 22 : (8) & (1ULL << 21) ? 21 : (8) & (1ULL << 20) ? 20 : (8) & (1ULL << 19) ? 19 : (8) & (1ULL << 18) ? 18 : (8) & (1ULL << 17) ? 17 : (8) & (1ULL << 16) ? 16 : (8) & (1ULL << 15) ? 15 : (8) & (1ULL << 14) ? 14 : (8) & (1ULL << 13) ? 13 : (8) & (1ULL << 12) ? 12 : (8) & (1ULL << 11) ? 11 : (8) & (1ULL << 10) ? 10 : (8) & (1ULL << 9) ? 9 : (8) & (1ULL << 8) ? 8 : (8) & (1ULL << 7) ? 7 : (8) & (1ULL << 6) ? 6 : (8) & (1ULL << 5) ? 5 : (8) & (1ULL << 4) ? 4 : (8) & (1ULL << 3) ? 3 : (8) & (1ULL << 2) ? 2 : (8) & (1ULL << 1) ? 1 : (8) & (1ULL << 0) ? 0 : ____ilog2_NaN() ) : (sizeof(8) <= 4) ? __ilog2_u32(8) : __ilog2_u64(8) );
11841 if (__builtin_constant_p(((8 <= 32 && size > 64 && size <= 96))) ? !!((8 <= 32 && size > 64 && size <= 96)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 164, }; ______r = !!((8 <= 32 && size > 64 && size <= 96)); ______f.miss_hit[______r]++; ______r; }))
11842 return 1;
11843 if (__builtin_constant_p(((8 <= 64 && size > 128 && size <= 192))) ? !!((8 <= 64 && size > 128 && size <= 192)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 166, }; ______r = !!((8 <= 64 && size > 128 && size <= 192)); ______f.miss_hit[______r]++; ______r; }))
11844 return 2;
11845 if (__builtin_constant_p(((size <= 8))) ? !!((size <= 8)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 168, }; ______r = !!((size <= 8)); ______f.miss_hit[______r]++; ______r; })) return 3;
11846 if (__builtin_constant_p(((size <= 16))) ? !!((size <= 16)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 169, }; ______r = !!((size <= 16)); ______f.miss_hit[______r]++; ______r; })) return 4;
11847 if (__builtin_constant_p(((size <= 32))) ? !!((size <= 32)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 170, }; ______r = !!((size <= 32)); ______f.miss_hit[______r]++; ______r; })) return 5;
11848 if (__builtin_constant_p(((size <= 64))) ? !!((size <= 64)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 171, }; ______r = !!((size <= 64)); ______f.miss_hit[______r]++; ______r; })) return 6;
11849 if (__builtin_constant_p(((size <= 128))) ? !!((size <= 128)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 172, }; ______r = !!((size <= 128)); ______f.miss_hit[______r]++; ______r; })) return 7;
11850 if (__builtin_constant_p(((size <= 256))) ? !!((size <= 256)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 173, }; ______r = !!((size <= 256)); ______f.miss_hit[______r]++; ______r; })) return 8;
11851 if (__builtin_constant_p(((size <= 512))) ? !!((size <= 512)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 174, }; ______r = !!((size <= 512)); ______f.miss_hit[______r]++; ______r; })) return 9;
11852 if (__builtin_constant_p(((size <= 1024))) ? !!((size <= 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 175, }; ______r = !!((size <= 1024)); ______f.miss_hit[______r]++; ______r; })) return 10;
11853 if (__builtin_constant_p(((size <= 2 * 1024))) ? !!((size <= 2 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 176, }; ______r = !!((size <= 2 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 11;
11854 if (__builtin_constant_p(((size <= 4 * 1024))) ? !!((size <= 4 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 177, }; ______r = !!((size <= 4 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 12;
11855 if (__builtin_constant_p(((size <= 8 * 1024))) ? !!((size <= 8 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 183, }; ______r = !!((size <= 8 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 13;
11856 if (__builtin_constant_p(((size <= 16 * 1024))) ? !!((size <= 16 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 184, }; ______r = !!((size <= 16 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 14;
11857 if (__builtin_constant_p(((size <= 32 * 1024))) ? !!((size <= 32 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 185, }; ______r = !!((size <= 32 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 15;
11858 if (__builtin_constant_p(((size <= 64 * 1024))) ? !!((size <= 64 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 186, }; ______r = !!((size <= 64 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 16;
11859 if (__builtin_constant_p(((size <= 128 * 1024))) ? !!((size <= 128 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 187, }; ______r = !!((size <= 128 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 17;
11860 if (__builtin_constant_p(((size <= 256 * 1024))) ? !!((size <= 256 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 188, }; ______r = !!((size <= 256 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 18;
11861 if (__builtin_constant_p(((size <= 512 * 1024))) ? !!((size <= 512 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 189, }; ______r = !!((size <= 512 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 19;
11862 if (__builtin_constant_p(((size <= 1024 * 1024))) ? !!((size <= 1024 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 190, }; ______r = !!((size <= 1024 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 20;
11863 if (__builtin_constant_p(((size <= 2 * 1024 * 1024))) ? !!((size <= 2 * 1024 * 1024)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 191, }; ______r = !!((size <= 2 * 1024 * 1024)); ______f.miss_hit[______r]++; ______r; })) return 21;
11864 do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/slub_def.h"), "i" (192), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0);
11865 return -1;
11866}
11867static inline __attribute__((always_inline)) __attribute__((always_inline)) struct kmem_cache *kmalloc_slab(size_t size)
11868{
11869 int index = kmalloc_index(size);
11870 if (__builtin_constant_p(((index == 0))) ? !!((index == 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 214, }; ______r = !!((index == 0)); ______f.miss_hit[______r]++; ______r; }))
11871 return ((void *)0);
11872 return kmalloc_caches[index];
11873}
11874void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
11875void *__kmalloc(size_t size, gfp_t flags);
11876static inline __attribute__((always_inline)) __attribute__((always_inline)) void *
11877kmalloc_order(size_t size, gfp_t flags, unsigned int order)
11878{
11879 void *ret = (void *) __get_free_pages(flags | (( gfp_t)0x4000u), order);
11880 kmemleak_alloc(ret, size, 1, flags);
11881 return ret;
11882}
11883extern void *
11884kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
11885extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
11886static inline __attribute__((always_inline)) __attribute__((always_inline)) void *kmalloc_large(size_t size, gfp_t flags)
11887{
11888 unsigned int order = get_order(size);
11889 return kmalloc_order_trace(size, flags, order);
11890}
11891static inline __attribute__((always_inline)) __attribute__((always_inline)) void *kmalloc(size_t size, gfp_t flags)
11892{
11893 if (__builtin_constant_p(((__builtin_constant_p(size)))) ? !!((__builtin_constant_p(size))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 257, }; ______r = !!((__builtin_constant_p(size))); ______f.miss_hit[______r]++; ______r; })) {
11894 if (__builtin_constant_p(((size > (2 * ((1UL) << 12))))) ? !!((size > (2 * ((1UL) << 12)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 258, }; ______r = !!((size > (2 * ((1UL) << 12)))); ______f.miss_hit[______r]++; ______r; }))
11895 return kmalloc_large(size, flags);
11896 if (__builtin_constant_p(((!(flags & (( gfp_t)0x01u))))) ? !!((!(flags & (( gfp_t)0x01u)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 261, }; ______r = !!((!(flags & (( gfp_t)0x01u)))); ______f.miss_hit[______r]++; ______r; })) {
11897 struct kmem_cache *s = kmalloc_slab(size);
11898 if (__builtin_constant_p(((!s))) ? !!((!s)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slub_def.h", .line = 264, }; ______r = !!((!s)); ______f.miss_hit[______r]++; ______r; }))
11899 return ((void *)16);
11900 return kmem_cache_alloc_trace(s, flags, size);
11901 }
11902 }
11903 return __kmalloc(size, flags);
11904}
11905static inline __attribute__((always_inline)) void *kcalloc(size_t n, size_t size, gfp_t flags)
11906{
11907 if (__builtin_constant_p(((size != 0 && n > (~0UL) / size))) ? !!((size != 0 && n > (~0UL) / size)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/slab.h", .line = 225, }; ______r = !!((size != 0 && n > (~0UL) / size)); ______f.miss_hit[______r]++; ______r; }))
11908 return ((void *)0);
11909 return __kmalloc(n * size, flags | (( gfp_t)0x8000u));
11910}
11911static inline __attribute__((always_inline)) void *kmalloc_node(size_t size, gfp_t flags, int node)
11912{
11913 return kmalloc(size, flags);
11914}
11915static inline __attribute__((always_inline)) void *__kmalloc_node(size_t size, gfp_t flags, int node)
11916{
11917 return __kmalloc(size, flags);
11918}
11919void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
11920static inline __attribute__((always_inline)) void *kmem_cache_alloc_node(struct kmem_cache *cachep,
11921 gfp_t flags, int node)
11922{
11923 return kmem_cache_alloc(cachep, flags);
11924}
11925extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
11926static inline __attribute__((always_inline)) void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
11927{
11928 return kmem_cache_alloc(k, flags | (( gfp_t)0x8000u));
11929}
11930static inline __attribute__((always_inline)) void *kzalloc(size_t size, gfp_t flags)
11931{
11932 return kmalloc(size, flags | (( gfp_t)0x8000u));
11933}
11934static inline __attribute__((always_inline)) void *kzalloc_node(size_t size, gfp_t flags, int node)
11935{
11936 return kmalloc_node(size, flags | (( gfp_t)0x8000u), node);
11937}
11938void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) kmem_cache_init_late(void);
11939static inline __attribute__((always_inline)) void pagefault_disable(void)
11940{
11941 add_preempt_count(1);
11942 __asm__ __volatile__("": : :"memory");
11943}
11944static inline __attribute__((always_inline)) void pagefault_enable(void)
11945{
11946 __asm__ __volatile__("": : :"memory");
11947 sub_preempt_count(1);
11948 __asm__ __volatile__("": : :"memory");
11949 do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/uaccess.h", .line = 38, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/uaccess.h", .line = 38, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/uaccess.h", .line = 38, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/uaccess.h", .line = 38, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0);
11950}
11951extern long probe_kernel_read(void *dst, const void *src, size_t size);
11952extern long __probe_kernel_read(void *dst, const void *src, size_t size);
11953extern long __attribute__((no_instrument_function)) probe_kernel_write(void *dst, const void *src, size_t size);
11954extern long __attribute__((no_instrument_function)) __probe_kernel_write(void *dst, const void *src, size_t size);
11955struct scatterlist;
11956struct crypto_ablkcipher;
11957struct crypto_async_request;
11958struct crypto_aead;
11959struct crypto_blkcipher;
11960struct crypto_hash;
11961struct crypto_rng;
11962struct crypto_tfm;
11963struct crypto_type;
11964struct aead_givcrypt_request;
11965struct skcipher_givcrypt_request;
11966typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
11967struct crypto_async_request {
11968 struct list_head list;
11969 crypto_completion_t complete;
11970 void *data;
11971 struct crypto_tfm *tfm;
11972 u32 flags;
11973};
11974struct ablkcipher_request {
11975 struct crypto_async_request base;
11976 unsigned int nbytes;
11977 void *info;
11978 struct scatterlist *src;
11979 struct scatterlist *dst;
11980 void *__ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long))));
11981};
11982struct aead_request {
11983 struct crypto_async_request base;
11984 unsigned int assoclen;
11985 unsigned int cryptlen;
11986 u8 *iv;
11987 struct scatterlist *assoc;
11988 struct scatterlist *src;
11989 struct scatterlist *dst;
11990 void *__ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long))));
11991};
11992struct blkcipher_desc {
11993 struct crypto_blkcipher *tfm;
11994 void *info;
11995 u32 flags;
11996};
11997struct cipher_desc {
11998 struct crypto_tfm *tfm;
11999 void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
12000 unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
12001 const u8 *src, unsigned int nbytes);
12002 void *info;
12003};
12004struct hash_desc {
12005 struct crypto_hash *tfm;
12006 u32 flags;
12007};
12008struct ablkcipher_alg {
12009 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
12010 unsigned int keylen);
12011 int (*encrypt)(struct ablkcipher_request *req);
12012 int (*decrypt)(struct ablkcipher_request *req);
12013 int (*givencrypt)(struct skcipher_givcrypt_request *req);
12014 int (*givdecrypt)(struct skcipher_givcrypt_request *req);
12015 const char *geniv;
12016 unsigned int min_keysize;
12017 unsigned int max_keysize;
12018 unsigned int ivsize;
12019};
12020struct aead_alg {
12021 int (*setkey)(struct crypto_aead *tfm, const u8 *key,
12022 unsigned int keylen);
12023 int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
12024 int (*encrypt)(struct aead_request *req);
12025 int (*decrypt)(struct aead_request *req);
12026 int (*givencrypt)(struct aead_givcrypt_request *req);
12027 int (*givdecrypt)(struct aead_givcrypt_request *req);
12028 const char *geniv;
12029 unsigned int ivsize;
12030 unsigned int maxauthsize;
12031};
12032struct blkcipher_alg {
12033 int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
12034 unsigned int keylen);
12035 int (*encrypt)(struct blkcipher_desc *desc,
12036 struct scatterlist *dst, struct scatterlist *src,
12037 unsigned int nbytes);
12038 int (*decrypt)(struct blkcipher_desc *desc,
12039 struct scatterlist *dst, struct scatterlist *src,
12040 unsigned int nbytes);
12041 const char *geniv;
12042 unsigned int min_keysize;
12043 unsigned int max_keysize;
12044 unsigned int ivsize;
12045};
12046struct cipher_alg {
12047 unsigned int cia_min_keysize;
12048 unsigned int cia_max_keysize;
12049 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
12050 unsigned int keylen);
12051 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
12052 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
12053};
12054struct compress_alg {
12055 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
12056 unsigned int slen, u8 *dst, unsigned int *dlen);
12057 int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
12058 unsigned int slen, u8 *dst, unsigned int *dlen);
12059};
12060struct rng_alg {
12061 int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
12062 unsigned int dlen);
12063 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
12064 unsigned int seedsize;
12065};
12066struct crypto_alg {
12067 struct list_head cra_list;
12068 struct list_head cra_users;
12069 u32 cra_flags;
12070 unsigned int cra_blocksize;
12071 unsigned int cra_ctxsize;
12072 unsigned int cra_alignmask;
12073 int cra_priority;
12074 atomic_t cra_refcnt;
12075 char cra_name[64];
12076 char cra_driver_name[64];
12077 const struct crypto_type *cra_type;
12078 union {
12079 struct ablkcipher_alg ablkcipher;
12080 struct aead_alg aead;
12081 struct blkcipher_alg blkcipher;
12082 struct cipher_alg cipher;
12083 struct compress_alg compress;
12084 struct rng_alg rng;
12085 } cra_u;
12086 int (*cra_init)(struct crypto_tfm *tfm);
12087 void (*cra_exit)(struct crypto_tfm *tfm);
12088 void (*cra_destroy)(struct crypto_alg *alg);
12089 struct module *cra_module;
12090};
12091int crypto_register_alg(struct crypto_alg *alg);
12092int crypto_unregister_alg(struct crypto_alg *alg);
12093int crypto_has_alg(const char *name, u32 type, u32 mask);
12094struct ablkcipher_tfm {
12095 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
12096 unsigned int keylen);
12097 int (*encrypt)(struct ablkcipher_request *req);
12098 int (*decrypt)(struct ablkcipher_request *req);
12099 int (*givencrypt)(struct skcipher_givcrypt_request *req);
12100 int (*givdecrypt)(struct skcipher_givcrypt_request *req);
12101 struct crypto_ablkcipher *base;
12102 unsigned int ivsize;
12103 unsigned int reqsize;
12104};
12105struct aead_tfm {
12106 int (*setkey)(struct crypto_aead *tfm, const u8 *key,
12107 unsigned int keylen);
12108 int (*encrypt)(struct aead_request *req);
12109 int (*decrypt)(struct aead_request *req);
12110 int (*givencrypt)(struct aead_givcrypt_request *req);
12111 int (*givdecrypt)(struct aead_givcrypt_request *req);
12112 struct crypto_aead *base;
12113 unsigned int ivsize;
12114 unsigned int authsize;
12115 unsigned int reqsize;
12116};
12117struct blkcipher_tfm {
12118 void *iv;
12119 int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
12120 unsigned int keylen);
12121 int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
12122 struct scatterlist *src, unsigned int nbytes);
12123 int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
12124 struct scatterlist *src, unsigned int nbytes);
12125};
12126struct cipher_tfm {
12127 int (*cit_setkey)(struct crypto_tfm *tfm,
12128 const u8 *key, unsigned int keylen);
12129 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
12130 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
12131};
12132struct hash_tfm {
12133 int (*init)(struct hash_desc *desc);
12134 int (*update)(struct hash_desc *desc,
12135 struct scatterlist *sg, unsigned int nsg);
12136 int (*final)(struct hash_desc *desc, u8 *out);
12137 int (*digest)(struct hash_desc *desc, struct scatterlist *sg,
12138 unsigned int nsg, u8 *out);
12139 int (*setkey)(struct crypto_hash *tfm, const u8 *key,
12140 unsigned int keylen);
12141 unsigned int digestsize;
12142};
12143struct compress_tfm {
12144 int (*cot_compress)(struct crypto_tfm *tfm,
12145 const u8 *src, unsigned int slen,
12146 u8 *dst, unsigned int *dlen);
12147 int (*cot_decompress)(struct crypto_tfm *tfm,
12148 const u8 *src, unsigned int slen,
12149 u8 *dst, unsigned int *dlen);
12150};
12151struct rng_tfm {
12152 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
12153 unsigned int dlen);
12154 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
12155};
12156struct crypto_tfm {
12157 u32 crt_flags;
12158 union {
12159 struct ablkcipher_tfm ablkcipher;
12160 struct aead_tfm aead;
12161 struct blkcipher_tfm blkcipher;
12162 struct cipher_tfm cipher;
12163 struct hash_tfm hash;
12164 struct compress_tfm compress;
12165 struct rng_tfm rng;
12166 } crt_u;
12167 void (*exit)(struct crypto_tfm *tfm);
12168 struct crypto_alg *__crt_alg;
12169 void *__crt_ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long))));
12170};
12171struct crypto_ablkcipher {
12172 struct crypto_tfm base;
12173};
12174struct crypto_aead {
12175 struct crypto_tfm base;
12176};
12177struct crypto_blkcipher {
12178 struct crypto_tfm base;
12179};
12180struct crypto_cipher {
12181 struct crypto_tfm base;
12182};
12183struct crypto_comp {
12184 struct crypto_tfm base;
12185};
12186struct crypto_hash {
12187 struct crypto_tfm base;
12188};
12189struct crypto_rng {
12190 struct crypto_tfm base;
12191};
12192enum {
12193 CRYPTOA_UNSPEC,
12194 CRYPTOA_ALG,
12195 CRYPTOA_TYPE,
12196 CRYPTOA_U32,
12197 __CRYPTOA_MAX,
12198};
12199struct crypto_attr_alg {
12200 char name[64];
12201};
12202struct crypto_attr_type {
12203 u32 type;
12204 u32 mask;
12205};
12206struct crypto_attr_u32 {
12207 u32 num;
12208};
12209struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
12210void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
12211static inline __attribute__((always_inline)) void crypto_free_tfm(struct crypto_tfm *tfm)
12212{
12213 return crypto_destroy_tfm(tfm, tfm);
12214}
12215int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
12216static inline __attribute__((always_inline)) const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
12217{
12218 return tfm->__crt_alg->cra_name;
12219}
12220static inline __attribute__((always_inline)) const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
12221{
12222 return tfm->__crt_alg->cra_driver_name;
12223}
12224static inline __attribute__((always_inline)) int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
12225{
12226 return tfm->__crt_alg->cra_priority;
12227}
12228static inline __attribute__((always_inline)) const char *crypto_tfm_alg_modname(struct crypto_tfm *tfm)
12229{
12230 return ({ struct module *__mod = (tfm->__crt_alg->cra_module); __mod ? __mod->name : "kernel"; });
12231}
12232static inline __attribute__((always_inline)) u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
12233{
12234 return tfm->__crt_alg->cra_flags & 0x0000000f;
12235}
12236static inline __attribute__((always_inline)) unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
12237{
12238 return tfm->__crt_alg->cra_blocksize;
12239}
12240static inline __attribute__((always_inline)) unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
12241{
12242 return tfm->__crt_alg->cra_alignmask;
12243}
12244static inline __attribute__((always_inline)) u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
12245{
12246 return tfm->crt_flags;
12247}
12248static inline __attribute__((always_inline)) void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags)
12249{
12250 tfm->crt_flags |= flags;
12251}
12252static inline __attribute__((always_inline)) void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
12253{
12254 tfm->crt_flags &= ~flags;
12255}
12256static inline __attribute__((always_inline)) void *crypto_tfm_ctx(struct crypto_tfm *tfm)
12257{
12258 return tfm->__crt_ctx;
12259}
12260static inline __attribute__((always_inline)) unsigned int crypto_tfm_ctx_alignment(void)
12261{
12262 struct crypto_tfm *tfm;
12263 return __alignof__(tfm->__crt_ctx);
12264}
12265static inline __attribute__((always_inline)) struct crypto_ablkcipher *__crypto_ablkcipher_cast(
12266 struct crypto_tfm *tfm)
12267{
12268 return (struct crypto_ablkcipher *)tfm;
12269}
12270static inline __attribute__((always_inline)) u32 crypto_skcipher_type(u32 type)
12271{
12272 type &= ~(0x0000000f | 0x00000200);
12273 type |= 0x00000004;
12274 return type;
12275}
12276static inline __attribute__((always_inline)) u32 crypto_skcipher_mask(u32 mask)
12277{
12278 mask &= ~(0x0000000f | 0x00000200);
12279 mask |= 0x0000000c;
12280 return mask;
12281}
12282struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
12283 u32 type, u32 mask);
12284static inline __attribute__((always_inline)) struct crypto_tfm *crypto_ablkcipher_tfm(
12285 struct crypto_ablkcipher *tfm)
12286{
12287 return &tfm->base;
12288}
12289static inline __attribute__((always_inline)) void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
12290{
12291 crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
12292}
12293static inline __attribute__((always_inline)) int crypto_has_ablkcipher(const char *alg_name, u32 type,
12294 u32 mask)
12295{
12296 return crypto_has_alg(alg_name, crypto_skcipher_type(type),
12297 crypto_skcipher_mask(mask));
12298}
12299static inline __attribute__((always_inline)) struct ablkcipher_tfm *crypto_ablkcipher_crt(
12300 struct crypto_ablkcipher *tfm)
12301{
12302 return &crypto_ablkcipher_tfm(tfm)->crt_u.ablkcipher;
12303}
12304static inline __attribute__((always_inline)) unsigned int crypto_ablkcipher_ivsize(
12305 struct crypto_ablkcipher *tfm)
12306{
12307 return crypto_ablkcipher_crt(tfm)->ivsize;
12308}
12309static inline __attribute__((always_inline)) unsigned int crypto_ablkcipher_blocksize(
12310 struct crypto_ablkcipher *tfm)
12311{
12312 return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm));
12313}
12314static inline __attribute__((always_inline)) unsigned int crypto_ablkcipher_alignmask(
12315 struct crypto_ablkcipher *tfm)
12316{
12317 return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm));
12318}
12319static inline __attribute__((always_inline)) u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm)
12320{
12321 return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm));
12322}
12323static inline __attribute__((always_inline)) void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm,
12324 u32 flags)
12325{
12326 crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags);
12327}
12328static inline __attribute__((always_inline)) void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
12329 u32 flags)
12330{
12331 crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
12332}
12333static inline __attribute__((always_inline)) int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
12334 const u8 *key, unsigned int keylen)
12335{
12336 struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm);
12337 return crt->setkey(crt->base, key, keylen);
12338}
12339static inline __attribute__((always_inline)) struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
12340 struct ablkcipher_request *req)
12341{
12342 return __crypto_ablkcipher_cast(req->base.tfm);
12343}
12344static inline __attribute__((always_inline)) int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
12345{
12346 struct ablkcipher_tfm *crt =
12347 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
12348 return crt->encrypt(req);
12349}
12350static inline __attribute__((always_inline)) int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
12351{
12352 struct ablkcipher_tfm *crt =
12353 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
12354 return crt->decrypt(req);
12355}
12356static inline __attribute__((always_inline)) unsigned int crypto_ablkcipher_reqsize(
12357 struct crypto_ablkcipher *tfm)
12358{
12359 return crypto_ablkcipher_crt(tfm)->reqsize;
12360}
12361static inline __attribute__((always_inline)) void ablkcipher_request_set_tfm(
12362 struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
12363{
12364 req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base);
12365}
12366static inline __attribute__((always_inline)) struct ablkcipher_request *ablkcipher_request_cast(
12367 struct crypto_async_request *req)
12368{
12369 return ({ const typeof( ((struct ablkcipher_request *)0)->base ) *__mptr = (req); (struct ablkcipher_request *)( (char *)__mptr - __builtin_offsetof(struct ablkcipher_request,base) );});
12370}
12371static inline __attribute__((always_inline)) struct ablkcipher_request *ablkcipher_request_alloc(
12372 struct crypto_ablkcipher *tfm, gfp_t gfp)
12373{
12374 struct ablkcipher_request *req;
12375 req = kmalloc(sizeof(struct ablkcipher_request) +
12376 crypto_ablkcipher_reqsize(tfm), gfp);
12377 if (__builtin_constant_p((((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 693, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 693, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 693, }; ______r = !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 693, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
12378 ablkcipher_request_set_tfm(req, tfm);
12379 return req;
12380}
12381static inline __attribute__((always_inline)) void ablkcipher_request_free(struct ablkcipher_request *req)
12382{
12383 kzfree(req);
12384}
12385static inline __attribute__((always_inline)) void ablkcipher_request_set_callback(
12386 struct ablkcipher_request *req,
12387 u32 flags, crypto_completion_t complete, void *data)
12388{
12389 req->base.complete = complete;
12390 req->base.data = data;
12391 req->base.flags = flags;
12392}
12393static inline __attribute__((always_inline)) void ablkcipher_request_set_crypt(
12394 struct ablkcipher_request *req,
12395 struct scatterlist *src, struct scatterlist *dst,
12396 unsigned int nbytes, void *iv)
12397{
12398 req->src = src;
12399 req->dst = dst;
12400 req->nbytes = nbytes;
12401 req->info = iv;
12402}
12403static inline __attribute__((always_inline)) struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
12404{
12405 return (struct crypto_aead *)tfm;
12406}
12407struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
12408static inline __attribute__((always_inline)) struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
12409{
12410 return &tfm->base;
12411}
12412static inline __attribute__((always_inline)) void crypto_free_aead(struct crypto_aead *tfm)
12413{
12414 crypto_free_tfm(crypto_aead_tfm(tfm));
12415}
12416static inline __attribute__((always_inline)) struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm)
12417{
12418 return &crypto_aead_tfm(tfm)->crt_u.aead;
12419}
12420static inline __attribute__((always_inline)) unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
12421{
12422 return crypto_aead_crt(tfm)->ivsize;
12423}
12424static inline __attribute__((always_inline)) unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
12425{
12426 return crypto_aead_crt(tfm)->authsize;
12427}
12428static inline __attribute__((always_inline)) unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
12429{
12430 return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
12431}
12432static inline __attribute__((always_inline)) unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
12433{
12434 return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
12435}
12436static inline __attribute__((always_inline)) u32 crypto_aead_get_flags(struct crypto_aead *tfm)
12437{
12438 return crypto_tfm_get_flags(crypto_aead_tfm(tfm));
12439}
12440static inline __attribute__((always_inline)) void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags)
12441{
12442 crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags);
12443}
12444static inline __attribute__((always_inline)) void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
12445{
12446 crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
12447}
12448static inline __attribute__((always_inline)) int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
12449 unsigned int keylen)
12450{
12451 struct aead_tfm *crt = crypto_aead_crt(tfm);
12452 return crt->setkey(crt->base, key, keylen);
12453}
12454int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
12455static inline __attribute__((always_inline)) struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
12456{
12457 return __crypto_aead_cast(req->base.tfm);
12458}
12459static inline __attribute__((always_inline)) int crypto_aead_encrypt(struct aead_request *req)
12460{
12461 return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req);
12462}
12463static inline __attribute__((always_inline)) int crypto_aead_decrypt(struct aead_request *req)
12464{
12465 return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
12466}
12467static inline __attribute__((always_inline)) unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
12468{
12469 return crypto_aead_crt(tfm)->reqsize;
12470}
12471static inline __attribute__((always_inline)) void aead_request_set_tfm(struct aead_request *req,
12472 struct crypto_aead *tfm)
12473{
12474 req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
12475}
12476static inline __attribute__((always_inline)) struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
12477 gfp_t gfp)
12478{
12479 struct aead_request *req;
12480 req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp);
12481 if (__builtin_constant_p((((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 824, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 824, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 824, }; ______r = !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 824, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
12482 aead_request_set_tfm(req, tfm);
12483 return req;
12484}
12485static inline __attribute__((always_inline)) void aead_request_free(struct aead_request *req)
12486{
12487 kzfree(req);
12488}
12489static inline __attribute__((always_inline)) void aead_request_set_callback(struct aead_request *req,
12490 u32 flags,
12491 crypto_completion_t complete,
12492 void *data)
12493{
12494 req->base.complete = complete;
12495 req->base.data = data;
12496 req->base.flags = flags;
12497}
12498static inline __attribute__((always_inline)) void aead_request_set_crypt(struct aead_request *req,
12499 struct scatterlist *src,
12500 struct scatterlist *dst,
12501 unsigned int cryptlen, u8 *iv)
12502{
12503 req->src = src;
12504 req->dst = dst;
12505 req->cryptlen = cryptlen;
12506 req->iv = iv;
12507}
12508static inline __attribute__((always_inline)) void aead_request_set_assoc(struct aead_request *req,
12509 struct scatterlist *assoc,
12510 unsigned int assoclen)
12511{
12512 req->assoc = assoc;
12513 req->assoclen = assoclen;
12514}
12515static inline __attribute__((always_inline)) struct crypto_blkcipher *__crypto_blkcipher_cast(
12516 struct crypto_tfm *tfm)
12517{
12518 return (struct crypto_blkcipher *)tfm;
12519}
12520static inline __attribute__((always_inline)) struct crypto_blkcipher *crypto_blkcipher_cast(
12521 struct crypto_tfm *tfm)
12522{
12523 do { if (__builtin_constant_p((((__builtin_constant_p(crypto_tfm_alg_type(tfm) != 0x00000004) ? !!(crypto_tfm_alg_type(tfm) != 0x00000004) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 873, }; ______r = __builtin_expect(!!(crypto_tfm_alg_type(tfm) != 0x00000004), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(crypto_tfm_alg_type(tfm) != 0x00000004) ? !!(crypto_tfm_alg_type(tfm) != 0x00000004) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 873, }; ______r = __builtin_expect(!!(crypto_tfm_alg_type(tfm) != 0x00000004), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 873, }; ______r = !!(((__builtin_constant_p(crypto_tfm_alg_type(tfm) != 0x00000004) ? !!(crypto_tfm_alg_type(tfm) != 0x00000004) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 873, }; ______r = __builtin_expect(!!(crypto_tfm_alg_type(tfm) != 0x00000004), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/crypto.h"), "i" (873), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
12524 return __crypto_blkcipher_cast(tfm);
12525}
12526static inline __attribute__((always_inline)) struct crypto_blkcipher *crypto_alloc_blkcipher(
12527 const char *alg_name, u32 type, u32 mask)
12528{
12529 type &= ~0x0000000f;
12530 type |= 0x00000004;
12531 mask |= 0x0000000f;
12532 return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
12533}
12534static inline __attribute__((always_inline)) struct crypto_tfm *crypto_blkcipher_tfm(
12535 struct crypto_blkcipher *tfm)
12536{
12537 return &tfm->base;
12538}
12539static inline __attribute__((always_inline)) void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
12540{
12541 crypto_free_tfm(crypto_blkcipher_tfm(tfm));
12542}
12543static inline __attribute__((always_inline)) int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
12544{
12545 type &= ~0x0000000f;
12546 type |= 0x00000004;
12547 mask |= 0x0000000f;
12548 return crypto_has_alg(alg_name, type, mask);
12549}
12550static inline __attribute__((always_inline)) const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
12551{
12552 return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
12553}
12554static inline __attribute__((always_inline)) struct blkcipher_tfm *crypto_blkcipher_crt(
12555 struct crypto_blkcipher *tfm)
12556{
12557 return &crypto_blkcipher_tfm(tfm)->crt_u.blkcipher;
12558}
12559static inline __attribute__((always_inline)) struct blkcipher_alg *crypto_blkcipher_alg(
12560 struct crypto_blkcipher *tfm)
12561{
12562 return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_u.blkcipher;
12563}
12564static inline __attribute__((always_inline)) unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
12565{
12566 return crypto_blkcipher_alg(tfm)->ivsize;
12567}
12568static inline __attribute__((always_inline)) unsigned int crypto_blkcipher_blocksize(
12569 struct crypto_blkcipher *tfm)
12570{
12571 return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
12572}
12573static inline __attribute__((always_inline)) unsigned int crypto_blkcipher_alignmask(
12574 struct crypto_blkcipher *tfm)
12575{
12576 return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
12577}
12578static inline __attribute__((always_inline)) u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
12579{
12580 return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
12581}
12582static inline __attribute__((always_inline)) void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
12583 u32 flags)
12584{
12585 crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
12586}
12587static inline __attribute__((always_inline)) void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
12588 u32 flags)
12589{
12590 crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
12591}
12592static inline __attribute__((always_inline)) int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
12593 const u8 *key, unsigned int keylen)
12594{
12595 return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
12596 key, keylen);
12597}
12598static inline __attribute__((always_inline)) int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
12599 struct scatterlist *dst,
12600 struct scatterlist *src,
12601 unsigned int nbytes)
12602{
12603 desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
12604 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
12605}
12606static inline __attribute__((always_inline)) int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
12607 struct scatterlist *dst,
12608 struct scatterlist *src,
12609 unsigned int nbytes)
12610{
12611 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
12612}
12613static inline __attribute__((always_inline)) int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
12614 struct scatterlist *dst,
12615 struct scatterlist *src,
12616 unsigned int nbytes)
12617{
12618 desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
12619 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
12620}
12621static inline __attribute__((always_inline)) int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
12622 struct scatterlist *dst,
12623 struct scatterlist *src,
12624 unsigned int nbytes)
12625{
12626 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
12627}
12628static inline __attribute__((always_inline)) void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
12629 const u8 *src, unsigned int len)
12630{
12631 __builtin_memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
12632}
12633static inline __attribute__((always_inline)) void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
12634 u8 *dst, unsigned int len)
12635{
12636 __builtin_memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
12637}
12638static inline __attribute__((always_inline)) struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
12639{
12640 return (struct crypto_cipher *)tfm;
12641}
12642static inline __attribute__((always_inline)) struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
12643{
12644 do { if (__builtin_constant_p((((__builtin_constant_p(crypto_tfm_alg_type(tfm) != 0x00000001) ? !!(crypto_tfm_alg_type(tfm) != 0x00000001) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1018, }; ______r = __builtin_expect(!!(crypto_tfm_alg_type(tfm) != 0x00000001), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(crypto_tfm_alg_type(tfm) != 0x00000001) ? !!(crypto_tfm_alg_type(tfm) != 0x00000001) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1018, }; ______r = __builtin_expect(!!(crypto_tfm_alg_type(tfm) != 0x00000001), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1018, }; ______r = !!(((__builtin_constant_p(crypto_tfm_alg_type(tfm) != 0x00000001) ? !!(crypto_tfm_alg_type(tfm) != 0x00000001) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1018, }; ______r = __builtin_expect(!!(crypto_tfm_alg_type(tfm) != 0x00000001), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/crypto.h"), "i" (1018), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
12645 return __crypto_cipher_cast(tfm);
12646}
12647static inline __attribute__((always_inline)) struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
12648 u32 type, u32 mask)
12649{
12650 type &= ~0x0000000f;
12651 type |= 0x00000001;
12652 mask |= 0x0000000f;
12653 return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
12654}
12655static inline __attribute__((always_inline)) struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
12656{
12657 return &tfm->base;
12658}
12659static inline __attribute__((always_inline)) void crypto_free_cipher(struct crypto_cipher *tfm)
12660{
12661 crypto_free_tfm(crypto_cipher_tfm(tfm));
12662}
12663static inline __attribute__((always_inline)) int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
12664{
12665 type &= ~0x0000000f;
12666 type |= 0x00000001;
12667 mask |= 0x0000000f;
12668 return crypto_has_alg(alg_name, type, mask);
12669}
12670static inline __attribute__((always_inline)) struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
12671{
12672 return &crypto_cipher_tfm(tfm)->crt_u.cipher;
12673}
12674static inline __attribute__((always_inline)) unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
12675{
12676 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
12677}
12678static inline __attribute__((always_inline)) unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
12679{
12680 return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
12681}
12682static inline __attribute__((always_inline)) u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
12683{
12684 return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
12685}
12686static inline __attribute__((always_inline)) void crypto_cipher_set_flags(struct crypto_cipher *tfm,
12687 u32 flags)
12688{
12689 crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
12690}
12691static inline __attribute__((always_inline)) void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
12692 u32 flags)
12693{
12694 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
12695}
12696static inline __attribute__((always_inline)) int crypto_cipher_setkey(struct crypto_cipher *tfm,
12697 const u8 *key, unsigned int keylen)
12698{
12699 return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
12700 key, keylen);
12701}
12702static inline __attribute__((always_inline)) void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
12703 u8 *dst, const u8 *src)
12704{
12705 crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
12706 dst, src);
12707}
12708static inline __attribute__((always_inline)) void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
12709 u8 *dst, const u8 *src)
12710{
12711 crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
12712 dst, src);
12713}
12714static inline __attribute__((always_inline)) struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
12715{
12716 return (struct crypto_hash *)tfm;
12717}
12718static inline __attribute__((always_inline)) struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
12719{
12720 do { if (__builtin_constant_p((((__builtin_constant_p((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e) ? !!((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1112, }; ______r = __builtin_expect(!!((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e) ? !!((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1112, }; ______r = __builtin_expect(!!((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
12721 "include/linux/crypto.h"
12722 , .line =
12723 1112
12724 , }; ______r = !!(((__builtin_constant_p((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e) ? !!((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1112, }; ______r = __builtin_expect(!!((crypto_tfm_alg_type(tfm) ^ 0x00000008) & 0x0000000e), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" (
12725 "include/linux/crypto.h"
12726 ), "i" (
12727 1112
12728 ), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0)
12729 ;
12730 return __crypto_hash_cast(tfm);
12731}
12732static inline __attribute__((always_inline)) struct crypto_hash *crypto_alloc_hash(const char *alg_name,
12733 u32 type, u32 mask)
12734{
12735 type &= ~0x0000000f;
12736 mask &= ~0x0000000f;
12737 type |= 0x00000008;
12738 mask |= 0x0000000e;
12739 return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask));
12740}
12741static inline __attribute__((always_inline)) struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm)
12742{
12743 return &tfm->base;
12744}
12745static inline __attribute__((always_inline)) void crypto_free_hash(struct crypto_hash *tfm)
12746{
12747 crypto_free_tfm(crypto_hash_tfm(tfm));
12748}
12749static inline __attribute__((always_inline)) int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
12750{
12751 type &= ~0x0000000f;
12752 mask &= ~0x0000000f;
12753 type |= 0x00000008;
12754 mask |= 0x0000000e;
12755 return crypto_has_alg(alg_name, type, mask);
12756}
12757static inline __attribute__((always_inline)) struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm)
12758{
12759 return &crypto_hash_tfm(tfm)->crt_u.hash;
12760}
12761static inline __attribute__((always_inline)) unsigned int crypto_hash_blocksize(struct crypto_hash *tfm)
12762{
12763 return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm));
12764}
12765static inline __attribute__((always_inline)) unsigned int crypto_hash_alignmask(struct crypto_hash *tfm)
12766{
12767 return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm));
12768}
12769static inline __attribute__((always_inline)) unsigned int crypto_hash_digestsize(struct crypto_hash *tfm)
12770{
12771 return crypto_hash_crt(tfm)->digestsize;
12772}
12773static inline __attribute__((always_inline)) u32 crypto_hash_get_flags(struct crypto_hash *tfm)
12774{
12775 return crypto_tfm_get_flags(crypto_hash_tfm(tfm));
12776}
12777static inline __attribute__((always_inline)) void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags)
12778{
12779 crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags);
12780}
12781static inline __attribute__((always_inline)) void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags)
12782{
12783 crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags);
12784}
12785static inline __attribute__((always_inline)) int crypto_hash_init(struct hash_desc *desc)
12786{
12787 return crypto_hash_crt(desc->tfm)->init(desc);
12788}
12789static inline __attribute__((always_inline)) int crypto_hash_update(struct hash_desc *desc,
12790 struct scatterlist *sg,
12791 unsigned int nbytes)
12792{
12793 return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes);
12794}
12795static inline __attribute__((always_inline)) int crypto_hash_final(struct hash_desc *desc, u8 *out)
12796{
12797 return crypto_hash_crt(desc->tfm)->final(desc, out);
12798}
12799static inline __attribute__((always_inline)) int crypto_hash_digest(struct hash_desc *desc,
12800 struct scatterlist *sg,
12801 unsigned int nbytes, u8 *out)
12802{
12803 return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out);
12804}
12805static inline __attribute__((always_inline)) int crypto_hash_setkey(struct crypto_hash *hash,
12806 const u8 *key, unsigned int keylen)
12807{
12808 return crypto_hash_crt(hash)->setkey(hash, key, keylen);
12809}
12810static inline __attribute__((always_inline)) struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
12811{
12812 return (struct crypto_comp *)tfm;
12813}
12814static inline __attribute__((always_inline)) struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
12815{
12816 do { if (__builtin_constant_p((((__builtin_constant_p((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f) ? !!((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1220, }; ______r = __builtin_expect(!!((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f) ? !!((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1220, }; ______r = __builtin_expect(!!((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
12817 "include/linux/crypto.h"
12818 , .line =
12819 1220
12820 , }; ______r = !!(((__builtin_constant_p((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f) ? !!((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/crypto.h", .line = 1220, }; ______r = __builtin_expect(!!((crypto_tfm_alg_type(tfm) ^ 0x00000002) & 0x0000000f), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" (
12821 "include/linux/crypto.h"
12822 ), "i" (
12823 1220
12824 ), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0)
12825 ;
12826 return __crypto_comp_cast(tfm);
12827}
12828static inline __attribute__((always_inline)) struct crypto_comp *crypto_alloc_comp(const char *alg_name,
12829 u32 type, u32 mask)
12830{
12831 type &= ~0x0000000f;
12832 type |= 0x00000002;
12833 mask |= 0x0000000f;
12834 return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask));
12835}
12836static inline __attribute__((always_inline)) struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
12837{
12838 return &tfm->base;
12839}
12840static inline __attribute__((always_inline)) void crypto_free_comp(struct crypto_comp *tfm)
12841{
12842 crypto_free_tfm(crypto_comp_tfm(tfm));
12843}
12844static inline __attribute__((always_inline)) int crypto_has_comp(const char *alg_name, u32 type, u32 mask)
12845{
12846 type &= ~0x0000000f;
12847 type |= 0x00000002;
12848 mask |= 0x0000000f;
12849 return crypto_has_alg(alg_name, type, mask);
12850}
12851static inline __attribute__((always_inline)) const char *crypto_comp_name(struct crypto_comp *tfm)
12852{
12853 return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
12854}
12855static inline __attribute__((always_inline)) struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
12856{
12857 return &crypto_comp_tfm(tfm)->crt_u.compress;
12858}
12859static inline __attribute__((always_inline)) int crypto_comp_compress(struct crypto_comp *tfm,
12860 const u8 *src, unsigned int slen,
12861 u8 *dst, unsigned int *dlen)
12862{
12863 return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
12864 src, slen, dst, dlen);
12865}
12866static inline __attribute__((always_inline)) int crypto_comp_decompress(struct crypto_comp *tfm,
12867 const u8 *src, unsigned int slen,
12868 u8 *dst, unsigned int *dlen)
12869{
12870 return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
12871 src, slen, dst, dlen);
12872}
12873struct module;
12874struct rtattr;
12875struct seq_file;
12876struct crypto_type {
12877 unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
12878 unsigned int (*extsize)(struct crypto_alg *alg);
12879 int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
12880 int (*init_tfm)(struct crypto_tfm *tfm);
12881 void (*show)(struct seq_file *m, struct crypto_alg *alg);
12882 struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
12883 unsigned int type;
12884 unsigned int maskclear;
12885 unsigned int maskset;
12886 unsigned int tfmsize;
12887};
12888struct crypto_instance {
12889 struct crypto_alg alg;
12890 struct crypto_template *tmpl;
12891 struct hlist_node list;
12892 void *__ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long))));
12893};
12894struct crypto_template {
12895 struct list_head list;
12896 struct hlist_head instances;
12897 struct module *module;
12898 struct crypto_instance *(*alloc)(struct rtattr **tb);
12899 void (*free)(struct crypto_instance *inst);
12900 int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
12901 char name[64];
12902};
12903struct crypto_spawn {
12904 struct list_head list;
12905 struct crypto_alg *alg;
12906 struct crypto_instance *inst;
12907 const struct crypto_type *frontend;
12908 u32 mask;
12909};
12910struct crypto_queue {
12911 struct list_head list;
12912 struct list_head *backlog;
12913 unsigned int qlen;
12914 unsigned int max_qlen;
12915};
12916struct scatter_walk {
12917 struct scatterlist *sg;
12918 unsigned int offset;
12919};
12920struct blkcipher_walk {
12921 union {
12922 struct {
12923 struct page *page;
12924 unsigned long offset;
12925 } phys;
12926 struct {
12927 u8 *page;
12928 u8 *addr;
12929 } virt;
12930 } src, dst;
12931 struct scatter_walk in;
12932 unsigned int nbytes;
12933 struct scatter_walk out;
12934 unsigned int total;
12935 void *page;
12936 u8 *buffer;
12937 u8 *iv;
12938 int flags;
12939 unsigned int blocksize;
12940};
12941struct ablkcipher_walk {
12942 struct {
12943 struct page *page;
12944 unsigned int offset;
12945 } src, dst;
12946 struct scatter_walk in;
12947 unsigned int nbytes;
12948 struct scatter_walk out;
12949 unsigned int total;
12950 struct list_head buffers;
12951 u8 *iv_buffer;
12952 u8 *iv;
12953 int flags;
12954 unsigned int blocksize;
12955};
12956extern const struct crypto_type crypto_ablkcipher_type;
12957extern const struct crypto_type crypto_aead_type;
12958extern const struct crypto_type crypto_blkcipher_type;
12959void crypto_mod_put(struct crypto_alg *alg);
12960int crypto_register_template(struct crypto_template *tmpl);
12961void crypto_unregister_template(struct crypto_template *tmpl);
12962struct crypto_template *crypto_lookup_template(const char *name);
12963int crypto_register_instance(struct crypto_template *tmpl,
12964 struct crypto_instance *inst);
12965int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
12966 struct crypto_instance *inst, u32 mask);
12967int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
12968 struct crypto_instance *inst,
12969 const struct crypto_type *frontend);
12970void crypto_drop_spawn(struct crypto_spawn *spawn);
12971struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
12972 u32 mask);
12973void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
12974static inline __attribute__((always_inline)) void crypto_set_spawn(struct crypto_spawn *spawn,
12975 struct crypto_instance *inst)
12976{
12977 spawn->inst = inst;
12978}
12979struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
12980int crypto_check_attr_type(struct rtattr **tb, u32 type);
12981const char *crypto_attr_alg_name(struct rtattr *rta);
12982struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
12983 const struct crypto_type *frontend,
12984 u32 type, u32 mask);
12985static inline __attribute__((always_inline)) struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
12986 u32 type, u32 mask)
12987{
12988 return crypto_attr_alg2(rta, ((void *)0), type, mask);
12989}
12990int crypto_attr_u32(struct rtattr *rta, u32 *num);
12991void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
12992 unsigned int head);
12993struct crypto_instance *crypto_alloc_instance(const char *name,
12994 struct crypto_alg *alg);
12995void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
12996int crypto_enqueue_request(struct crypto_queue *queue,
12997 struct crypto_async_request *request);
12998void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset);
12999struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
13000int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
13001void crypto_inc(u8 *a, unsigned int size);
13002void crypto_xor(u8 *dst, const u8 *src, unsigned int size);
13003int blkcipher_walk_done(struct blkcipher_desc *desc,
13004 struct blkcipher_walk *walk, int err);
13005int blkcipher_walk_virt(struct blkcipher_desc *desc,
13006 struct blkcipher_walk *walk);
13007int blkcipher_walk_phys(struct blkcipher_desc *desc,
13008 struct blkcipher_walk *walk);
13009int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
13010 struct blkcipher_walk *walk,
13011 unsigned int blocksize);
13012int ablkcipher_walk_done(struct ablkcipher_request *req,
13013 struct ablkcipher_walk *walk, int err);
13014int ablkcipher_walk_phys(struct ablkcipher_request *req,
13015 struct ablkcipher_walk *walk);
13016void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
13017static inline __attribute__((always_inline)) void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
13018{
13019 return ((typeof(crypto_tfm_ctx(tfm)))(((((unsigned long)(crypto_tfm_ctx(tfm)))) + ((typeof(((unsigned long)(crypto_tfm_ctx(tfm)))))(((crypto_tfm_alg_alignmask(tfm) + 1))) - 1)) & ~((typeof(((unsigned long)(crypto_tfm_ctx(tfm)))))(((crypto_tfm_alg_alignmask(tfm) + 1))) - 1)))
13020 ;
13021}
13022static inline __attribute__((always_inline)) struct crypto_instance *crypto_tfm_alg_instance(
13023 struct crypto_tfm *tfm)
13024{
13025 return ({ const typeof( ((struct crypto_instance *)0)->alg ) *__mptr = (tfm->__crt_alg); (struct crypto_instance *)( (char *)__mptr - __builtin_offsetof(struct crypto_instance,alg) );});
13026}
13027static inline __attribute__((always_inline)) void *crypto_instance_ctx(struct crypto_instance *inst)
13028{
13029 return inst->__ctx;
13030}
13031static inline __attribute__((always_inline)) struct ablkcipher_alg *crypto_ablkcipher_alg(
13032 struct crypto_ablkcipher *tfm)
13033{
13034 return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_u.ablkcipher;
13035}
13036static inline __attribute__((always_inline)) void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
13037{
13038 return crypto_tfm_ctx(&tfm->base);
13039}
13040static inline __attribute__((always_inline)) void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
13041{
13042 return crypto_tfm_ctx_aligned(&tfm->base);
13043}
13044static inline __attribute__((always_inline)) struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
13045{
13046 return &crypto_aead_tfm(tfm)->__crt_alg->cra_u.aead;
13047}
13048static inline __attribute__((always_inline)) void *crypto_aead_ctx(struct crypto_aead *tfm)
13049{
13050 return crypto_tfm_ctx(&tfm->base);
13051}
13052static inline __attribute__((always_inline)) struct crypto_instance *crypto_aead_alg_instance(
13053 struct crypto_aead *aead)
13054{
13055 return crypto_tfm_alg_instance(&aead->base);
13056}
13057static inline __attribute__((always_inline)) struct crypto_blkcipher *crypto_spawn_blkcipher(
13058 struct crypto_spawn *spawn)
13059{
13060 u32 type = 0x00000004;
13061 u32 mask = 0x0000000f;
13062 return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
13063}
13064static inline __attribute__((always_inline)) void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
13065{
13066 return crypto_tfm_ctx(&tfm->base);
13067}
13068static inline __attribute__((always_inline)) void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
13069{
13070 return crypto_tfm_ctx_aligned(&tfm->base);
13071}
13072static inline __attribute__((always_inline)) struct crypto_cipher *crypto_spawn_cipher(
13073 struct crypto_spawn *spawn)
13074{
13075 u32 type = 0x00000001;
13076 u32 mask = 0x0000000f;
13077 return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
13078}
13079static inline __attribute__((always_inline)) struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
13080{
13081 return &crypto_cipher_tfm(tfm)->__crt_alg->cra_u.cipher;
13082}
13083static inline __attribute__((always_inline)) struct crypto_hash *crypto_spawn_hash(struct crypto_spawn *spawn)
13084{
13085 u32 type = 0x00000008;
13086 u32 mask = 0x0000000e;
13087 return __crypto_hash_cast(crypto_spawn_tfm(spawn, type, mask));
13088}
13089static inline __attribute__((always_inline)) void *crypto_hash_ctx(struct crypto_hash *tfm)
13090{
13091 return crypto_tfm_ctx(&tfm->base);
13092}
13093static inline __attribute__((always_inline)) void *crypto_hash_ctx_aligned(struct crypto_hash *tfm)
13094{
13095 return crypto_tfm_ctx_aligned(&tfm->base);
13096}
13097static inline __attribute__((always_inline)) void blkcipher_walk_init(struct blkcipher_walk *walk,
13098 struct scatterlist *dst,
13099 struct scatterlist *src,
13100 unsigned int nbytes)
13101{
13102 walk->in.sg = src;
13103 walk->out.sg = dst;
13104 walk->total = nbytes;
13105}
13106static inline __attribute__((always_inline)) void ablkcipher_walk_init(struct ablkcipher_walk *walk,
13107 struct scatterlist *dst,
13108 struct scatterlist *src,
13109 unsigned int nbytes)
13110{
13111 walk->in.sg = src;
13112 walk->out.sg = dst;
13113 walk->total = nbytes;
13114 INIT_LIST_HEAD(&walk->buffers);
13115}
13116static inline __attribute__((always_inline)) void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
13117{
13118 if (__builtin_constant_p((((__builtin_constant_p(!list_empty(&walk->buffers)) ? !!(!list_empty(&walk->buffers)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/algapi.h", .line = 322, }; ______r = __builtin_expect(!!(!list_empty(&walk->buffers)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(!list_empty(&walk->buffers)) ? !!(!list_empty(&walk->buffers)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/algapi.h", .line = 322, }; ______r = __builtin_expect(!!(!list_empty(&walk->buffers)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/algapi.h", .line = 322, }; ______r = !!(((__builtin_constant_p(!list_empty(&walk->buffers)) ? !!(!list_empty(&walk->buffers)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/algapi.h", .line = 322, }; ______r = __builtin_expect(!!(!list_empty(&walk->buffers)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
13119 __ablkcipher_walk_complete(walk);
13120}
13121static inline __attribute__((always_inline)) struct crypto_async_request *crypto_get_backlog(
13122 struct crypto_queue *queue)
13123{
13124 return queue->backlog == &queue->list ? ((void *)0) :
13125 ({ const typeof( ((struct crypto_async_request *)0)->list ) *__mptr = (queue->backlog); (struct crypto_async_request *)( (char *)__mptr - __builtin_offsetof(struct crypto_async_request,list) );});
13126}
13127static inline __attribute__((always_inline)) int ablkcipher_enqueue_request(struct crypto_queue *queue,
13128 struct ablkcipher_request *request)
13129{
13130 return crypto_enqueue_request(queue, &request->base);
13131}
13132static inline __attribute__((always_inline)) struct ablkcipher_request *ablkcipher_dequeue_request(
13133 struct crypto_queue *queue)
13134{
13135 return ablkcipher_request_cast(crypto_dequeue_request(queue));
13136}
13137static inline __attribute__((always_inline)) void *ablkcipher_request_ctx(struct ablkcipher_request *req)
13138{
13139 return req->__ctx;
13140}
13141static inline __attribute__((always_inline)) int ablkcipher_tfm_in_queue(struct crypto_queue *queue,
13142 struct crypto_ablkcipher *tfm)
13143{
13144 return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm));
13145}
13146static inline __attribute__((always_inline)) void *aead_request_ctx(struct aead_request *req)
13147{
13148 return req->__ctx;
13149}
13150static inline __attribute__((always_inline)) void aead_request_complete(struct aead_request *req, int err)
13151{
13152 req->base.complete(&req->base, err);
13153}
13154static inline __attribute__((always_inline)) u32 aead_request_flags(struct aead_request *req)
13155{
13156 return req->base.flags;
13157}
13158static inline __attribute__((always_inline)) struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
13159 u32 type, u32 mask)
13160{
13161 return crypto_attr_alg(tb[1], type, mask);
13162}
13163static inline __attribute__((always_inline)) int crypto_requires_sync(u32 type, u32 mask)
13164{
13165 return (type ^ 0x00000080) & mask & 0x00000080;
13166}
13167struct crypto_aes_ctx {
13168 u32 key_enc[((15 * 16) / sizeof(u32))];
13169 u32 key_dec[((15 * 16) / sizeof(u32))];
13170 u32 key_length;
13171};
13172extern const u32 crypto_ft_tab[4][256];
13173extern const u32 crypto_fl_tab[4][256];
13174extern const u32 crypto_it_tab[4][256];
13175extern const u32 crypto_il_tab[4][256];
13176int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
13177 unsigned int key_len);
13178int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
13179 unsigned int key_len);
13180struct crypto_ahash;
13181struct hash_alg_common {
13182 unsigned int digestsize;
13183 unsigned int statesize;
13184 struct crypto_alg base;
13185};
13186struct ahash_request {
13187 struct crypto_async_request base;
13188 unsigned int nbytes;
13189 struct scatterlist *src;
13190 u8 *result;
13191 void *priv;
13192 void *__ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long))));
13193};
13194struct ahash_alg {
13195 int (*init)(struct ahash_request *req);
13196 int (*update)(struct ahash_request *req);
13197 int (*final)(struct ahash_request *req);
13198 int (*finup)(struct ahash_request *req);
13199 int (*digest)(struct ahash_request *req);
13200 int (*export)(struct ahash_request *req, void *out);
13201 int (*import)(struct ahash_request *req, const void *in);
13202 int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
13203 unsigned int keylen);
13204 struct hash_alg_common halg;
13205};
13206struct shash_desc {
13207 struct crypto_shash *tfm;
13208 u32 flags;
13209 void *__ctx[] __attribute__ ((__aligned__(__alignof__(unsigned long long))));
13210};
13211struct shash_alg {
13212 int (*init)(struct shash_desc *desc);
13213 int (*update)(struct shash_desc *desc, const u8 *data,
13214 unsigned int len);
13215 int (*final)(struct shash_desc *desc, u8 *out);
13216 int (*finup)(struct shash_desc *desc, const u8 *data,
13217 unsigned int len, u8 *out);
13218 int (*digest)(struct shash_desc *desc, const u8 *data,
13219 unsigned int len, u8 *out);
13220 int (*export)(struct shash_desc *desc, void *out);
13221 int (*import)(struct shash_desc *desc, const void *in);
13222 int (*setkey)(struct crypto_shash *tfm, const u8 *key,
13223 unsigned int keylen);
13224 unsigned int descsize;
13225 unsigned int digestsize
13226 __attribute__ ((aligned(__alignof__(struct hash_alg_common))));
13227 unsigned int statesize;
13228 struct crypto_alg base;
13229};
13230struct crypto_ahash {
13231 int (*init)(struct ahash_request *req);
13232 int (*update)(struct ahash_request *req);
13233 int (*final)(struct ahash_request *req);
13234 int (*finup)(struct ahash_request *req);
13235 int (*digest)(struct ahash_request *req);
13236 int (*export)(struct ahash_request *req, void *out);
13237 int (*import)(struct ahash_request *req, const void *in);
13238 int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
13239 unsigned int keylen);
13240 unsigned int reqsize;
13241 struct crypto_tfm base;
13242};
13243struct crypto_shash {
13244 unsigned int descsize;
13245 struct crypto_tfm base;
13246};
13247static inline __attribute__((always_inline)) struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
13248{
13249 return ({ const typeof( ((struct crypto_ahash *)0)->base ) *__mptr = (tfm); (struct crypto_ahash *)( (char *)__mptr - __builtin_offsetof(struct crypto_ahash,base) );});
13250}
13251struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
13252 u32 mask);
13253static inline __attribute__((always_inline)) struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
13254{
13255 return &tfm->base;
13256}
13257static inline __attribute__((always_inline)) void crypto_free_ahash(struct crypto_ahash *tfm)
13258{
13259 crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm));
13260}
13261static inline __attribute__((always_inline)) unsigned int crypto_ahash_alignmask(
13262 struct crypto_ahash *tfm)
13263{
13264 return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm));
13265}
13266static inline __attribute__((always_inline)) struct hash_alg_common *__crypto_hash_alg_common(
13267 struct crypto_alg *alg)
13268{
13269 return ({ const typeof( ((struct hash_alg_common *)0)->base ) *__mptr = (alg); (struct hash_alg_common *)( (char *)__mptr - __builtin_offsetof(struct hash_alg_common,base) );});
13270}
13271static inline __attribute__((always_inline)) struct hash_alg_common *crypto_hash_alg_common(
13272 struct crypto_ahash *tfm)
13273{
13274 return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg);
13275}
13276static inline __attribute__((always_inline)) unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
13277{
13278 return crypto_hash_alg_common(tfm)->digestsize;
13279}
13280static inline __attribute__((always_inline)) unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm)
13281{
13282 return crypto_hash_alg_common(tfm)->statesize;
13283}
13284static inline __attribute__((always_inline)) u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
13285{
13286 return crypto_tfm_get_flags(crypto_ahash_tfm(tfm));
13287}
13288static inline __attribute__((always_inline)) void crypto_ahash_set_flags(struct crypto_ahash *tfm, u32 flags)
13289{
13290 crypto_tfm_set_flags(crypto_ahash_tfm(tfm), flags);
13291}
13292static inline __attribute__((always_inline)) void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags)
13293{
13294 crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags);
13295}
13296static inline __attribute__((always_inline)) struct crypto_ahash *crypto_ahash_reqtfm(
13297 struct ahash_request *req)
13298{
13299 return __crypto_ahash_cast(req->base.tfm);
13300}
13301static inline __attribute__((always_inline)) unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
13302{
13303 return tfm->reqsize;
13304}
13305static inline __attribute__((always_inline)) void *ahash_request_ctx(struct ahash_request *req)
13306{
13307 return req->__ctx;
13308}
13309int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
13310 unsigned int keylen);
13311int crypto_ahash_finup(struct ahash_request *req);
13312int crypto_ahash_final(struct ahash_request *req);
13313int crypto_ahash_digest(struct ahash_request *req);
13314static inline __attribute__((always_inline)) int crypto_ahash_export(struct ahash_request *req, void *out)
13315{
13316 return crypto_ahash_reqtfm(req)->export(req, out);
13317}
13318static inline __attribute__((always_inline)) int crypto_ahash_import(struct ahash_request *req, const void *in)
13319{
13320 return crypto_ahash_reqtfm(req)->import(req, in);
13321}
13322static inline __attribute__((always_inline)) int crypto_ahash_init(struct ahash_request *req)
13323{
13324 return crypto_ahash_reqtfm(req)->init(req);
13325}
13326static inline __attribute__((always_inline)) int crypto_ahash_update(struct ahash_request *req)
13327{
13328 return crypto_ahash_reqtfm(req)->update(req);
13329}
13330static inline __attribute__((always_inline)) void ahash_request_set_tfm(struct ahash_request *req,
13331 struct crypto_ahash *tfm)
13332{
13333 req->base.tfm = crypto_ahash_tfm(tfm);
13334}
13335static inline __attribute__((always_inline)) struct ahash_request *ahash_request_alloc(
13336 struct crypto_ahash *tfm, gfp_t gfp)
13337{
13338 struct ahash_request *req;
13339 req = kmalloc(sizeof(struct ahash_request) +
13340 crypto_ahash_reqsize(tfm), gfp);
13341 if (__builtin_constant_p((((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/hash.h", .line = 222, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/hash.h", .line = 222, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/hash.h", .line = 222, }; ______r = !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/hash.h", .line = 222, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
13342 ahash_request_set_tfm(req, tfm);
13343 return req;
13344}
13345static inline __attribute__((always_inline)) void ahash_request_free(struct ahash_request *req)
13346{
13347 kzfree(req);
13348}
13349static inline __attribute__((always_inline)) struct ahash_request *ahash_request_cast(
13350 struct crypto_async_request *req)
13351{
13352 return ({ const typeof( ((struct ahash_request *)0)->base ) *__mptr = (req); (struct ahash_request *)( (char *)__mptr - __builtin_offsetof(struct ahash_request,base) );});
13353}
13354static inline __attribute__((always_inline)) void ahash_request_set_callback(struct ahash_request *req,
13355 u32 flags,
13356 crypto_completion_t complete,
13357 void *data)
13358{
13359 req->base.complete = complete;
13360 req->base.data = data;
13361 req->base.flags = flags;
13362}
13363static inline __attribute__((always_inline)) void ahash_request_set_crypt(struct ahash_request *req,
13364 struct scatterlist *src, u8 *result,
13365 unsigned int nbytes)
13366{
13367 req->src = src;
13368 req->nbytes = nbytes;
13369 req->result = result;
13370}
13371struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
13372 u32 mask);
13373static inline __attribute__((always_inline)) struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
13374{
13375 return &tfm->base;
13376}
13377static inline __attribute__((always_inline)) void crypto_free_shash(struct crypto_shash *tfm)
13378{
13379 crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
13380}
13381static inline __attribute__((always_inline)) unsigned int crypto_shash_alignmask(
13382 struct crypto_shash *tfm)
13383{
13384 return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm));
13385}
13386static inline __attribute__((always_inline)) unsigned int crypto_shash_blocksize(struct crypto_shash *tfm)
13387{
13388 return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm));
13389}
13390static inline __attribute__((always_inline)) struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg)
13391{
13392 return ({ const typeof( ((struct shash_alg *)0)->base ) *__mptr = (alg); (struct shash_alg *)( (char *)__mptr - __builtin_offsetof(struct shash_alg,base) );});
13393}
13394static inline __attribute__((always_inline)) struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm)
13395{
13396 return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg);
13397}
13398static inline __attribute__((always_inline)) unsigned int crypto_shash_digestsize(struct crypto_shash *tfm)
13399{
13400 return crypto_shash_alg(tfm)->digestsize;
13401}
13402static inline __attribute__((always_inline)) unsigned int crypto_shash_statesize(struct crypto_shash *tfm)
13403{
13404 return crypto_shash_alg(tfm)->statesize;
13405}
13406static inline __attribute__((always_inline)) u32 crypto_shash_get_flags(struct crypto_shash *tfm)
13407{
13408 return crypto_tfm_get_flags(crypto_shash_tfm(tfm));
13409}
13410static inline __attribute__((always_inline)) void crypto_shash_set_flags(struct crypto_shash *tfm, u32 flags)
13411{
13412 crypto_tfm_set_flags(crypto_shash_tfm(tfm), flags);
13413}
13414static inline __attribute__((always_inline)) void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
13415{
13416 crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags);
13417}
13418static inline __attribute__((always_inline)) unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
13419{
13420 return tfm->descsize;
13421}
13422static inline __attribute__((always_inline)) void *shash_desc_ctx(struct shash_desc *desc)
13423{
13424 return desc->__ctx;
13425}
13426int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
13427 unsigned int keylen);
13428int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
13429 unsigned int len, u8 *out);
13430static inline __attribute__((always_inline)) int crypto_shash_export(struct shash_desc *desc, void *out)
13431{
13432 return crypto_shash_alg(desc->tfm)->export(desc, out);
13433}
13434static inline __attribute__((always_inline)) int crypto_shash_import(struct shash_desc *desc, const void *in)
13435{
13436 return crypto_shash_alg(desc->tfm)->import(desc, in);
13437}
13438static inline __attribute__((always_inline)) int crypto_shash_init(struct shash_desc *desc)
13439{
13440 return crypto_shash_alg(desc->tfm)->init(desc);
13441}
13442int crypto_shash_update(struct shash_desc *desc, const u8 *data,
13443 unsigned int len);
13444int crypto_shash_final(struct shash_desc *desc, u8 *out);
13445int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
13446 unsigned int len, u8 *out);
13447struct cryptd_ablkcipher {
13448 struct crypto_ablkcipher base;
13449};
13450static inline __attribute__((always_inline)) struct cryptd_ablkcipher *__cryptd_ablkcipher_cast(
13451 struct crypto_ablkcipher *tfm)
13452{
13453 return (struct cryptd_ablkcipher *)tfm;
13454}
13455struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
13456 u32 type, u32 mask);
13457struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
13458void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
13459struct cryptd_ahash {
13460 struct crypto_ahash base;
13461};
13462static inline __attribute__((always_inline)) struct cryptd_ahash *__cryptd_ahash_cast(
13463 struct crypto_ahash *tfm)
13464{
13465 return (struct cryptd_ahash *)tfm;
13466}
13467struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
13468 u32 type, u32 mask);
13469struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm);
13470struct shash_desc *cryptd_shash_desc(struct ahash_request *req);
13471void cryptd_free_ahash(struct cryptd_ahash *tfm);
13472struct cryptd_aead {
13473 struct crypto_aead base;
13474};
13475static inline __attribute__((always_inline)) struct cryptd_aead *__cryptd_aead_cast(
13476 struct crypto_aead *tfm)
13477{
13478 return (struct cryptd_aead *)tfm;
13479}
13480struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
13481 u32 type, u32 mask);
13482struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm);
13483void cryptd_free_aead(struct cryptd_aead *tfm);
13484struct sched_param {
13485 int sched_priority;
13486};
13487struct task_struct;
13488typedef struct __user_cap_header_struct {
13489 __u32 version;
13490 int pid;
13491} *cap_user_header_t;
13492typedef struct __user_cap_data_struct {
13493 __u32 effective;
13494 __u32 permitted;
13495 __u32 inheritable;
13496} *cap_user_data_t;
13497struct vfs_cap_data {
13498 __le32 magic_etc;
13499 struct {
13500 __le32 permitted;
13501 __le32 inheritable;
13502 } data[2];
13503};
13504extern int file_caps_enabled;
13505typedef struct kernel_cap_struct {
13506 __u32 cap[2];
13507} kernel_cap_t;
13508struct cpu_vfs_cap_data {
13509 __u32 magic_etc;
13510 kernel_cap_t permitted;
13511 kernel_cap_t inheritable;
13512};
13513struct dentry;
13514struct user_namespace;
13515struct user_namespace *current_user_ns(void);
13516extern const kernel_cap_t __cap_empty_set;
13517extern const kernel_cap_t __cap_full_set;
13518extern const kernel_cap_t __cap_init_eff_set;
13519static inline __attribute__((always_inline)) kernel_cap_t cap_combine(const kernel_cap_t a,
13520 const kernel_cap_t b)
13521{
13522 kernel_cap_t dest;
13523 do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] | b.cap[__capi]; } } while (0);
13524 return dest;
13525}
13526static inline __attribute__((always_inline)) kernel_cap_t cap_intersect(const kernel_cap_t a,
13527 const kernel_cap_t b)
13528{
13529 kernel_cap_t dest;
13530 do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] & b.cap[__capi]; } } while (0);
13531 return dest;
13532}
13533static inline __attribute__((always_inline)) kernel_cap_t cap_drop(const kernel_cap_t a,
13534 const kernel_cap_t drop)
13535{
13536 kernel_cap_t dest;
13537 do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] &~ drop.cap[__capi]; } } while (0);
13538 return dest;
13539}
13540static inline __attribute__((always_inline)) kernel_cap_t cap_invert(const kernel_cap_t c)
13541{
13542 kernel_cap_t dest;
13543 do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = ~ c.cap[__capi]; } } while (0);
13544 return dest;
13545}
13546static inline __attribute__((always_inline)) int cap_isclear(const kernel_cap_t a)
13547{
13548 unsigned __capi;
13549 for (__capi = 0; __capi < 2; ++__capi) {
13550 if (__builtin_constant_p(((a.cap[__capi] != 0))) ? !!((a.cap[__capi] != 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/capability.h", .line = 486, }; ______r = !!((a.cap[__capi] != 0)); ______f.miss_hit[______r]++; ______r; }))
13551 return 0;
13552 }
13553 return 1;
13554}
13555static inline __attribute__((always_inline)) int cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
13556{
13557 kernel_cap_t dest;
13558 dest = cap_drop(a, set);
13559 return cap_isclear(dest);
13560}
13561static inline __attribute__((always_inline)) int cap_is_fs_cap(int cap)
13562{
13563 const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } });
13564 return !!((1 << ((cap) & 31)) & __cap_fs_set.cap[((cap) >> 5)]);
13565}
13566static inline __attribute__((always_inline)) kernel_cap_t cap_drop_fs_set(const kernel_cap_t a)
13567{
13568 const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } });
13569 return cap_drop(a, __cap_fs_set);
13570}
13571static inline __attribute__((always_inline)) kernel_cap_t cap_raise_fs_set(const kernel_cap_t a,
13572 const kernel_cap_t permitted)
13573{
13574 const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } });
13575 return cap_combine(a,
13576 cap_intersect(permitted, __cap_fs_set));
13577}
13578static inline __attribute__((always_inline)) kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
13579{
13580 const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((24) & 31)), ((1 << ((32) & 31))) } });
13581 return cap_drop(a, __cap_fs_set);
13582}
13583static inline __attribute__((always_inline)) kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
13584 const kernel_cap_t permitted)
13585{
13586 const kernel_cap_t __cap_nfsd_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((24) & 31)), ((1 << ((32) & 31))) } });
13587 return cap_combine(a,
13588 cap_intersect(permitted, __cap_nfsd_set));
13589}
13590extern bool has_capability(struct task_struct *t, int cap);
13591extern bool has_ns_capability(struct task_struct *t,
13592 struct user_namespace *ns, int cap);
13593extern bool has_capability_noaudit(struct task_struct *t, int cap);
13594extern bool capable(int cap);
13595extern bool ns_capable(struct user_namespace *ns, int cap);
13596extern bool task_ns_capable(struct task_struct *t, int cap);
13597extern bool nsown_capable(int cap);
13598extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
13599struct rb_node
13600{
13601 unsigned long rb_parent_color;
13602 struct rb_node *rb_right;
13603 struct rb_node *rb_left;
13604} __attribute__((aligned(sizeof(long))));
13605struct rb_root
13606{
13607 struct rb_node *rb_node;
13608};
13609static inline __attribute__((always_inline)) void rb_set_parent(struct rb_node *rb, struct rb_node *p)
13610{
13611 rb->rb_parent_color = (rb->rb_parent_color & 3) | (unsigned long)p;
13612}
13613static inline __attribute__((always_inline)) void rb_set_color(struct rb_node *rb, int color)
13614{
13615 rb->rb_parent_color = (rb->rb_parent_color & ~1) | color;
13616}
13617static inline __attribute__((always_inline)) void rb_init_node(struct rb_node *rb)
13618{
13619 rb->rb_parent_color = 0;
13620 rb->rb_right = ((void *)0);
13621 rb->rb_left = ((void *)0);
13622 (rb_set_parent(rb, rb));
13623}
13624extern void rb_insert_color(struct rb_node *, struct rb_root *);
13625extern void rb_erase(struct rb_node *, struct rb_root *);
13626typedef void (*rb_augment_f)(struct rb_node *node, void *data);
13627extern void rb_augment_insert(struct rb_node *node,
13628 rb_augment_f func, void *data);
13629extern struct rb_node *rb_augment_erase_begin(struct rb_node *node);
13630extern void rb_augment_erase_end(struct rb_node *node,
13631 rb_augment_f func, void *data);
13632extern struct rb_node *rb_next(const struct rb_node *);
13633extern struct rb_node *rb_prev(const struct rb_node *);
13634extern struct rb_node *rb_first(const struct rb_root *);
13635extern struct rb_node *rb_last(const struct rb_root *);
13636extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
13637 struct rb_root *root);
13638static inline __attribute__((always_inline)) void rb_link_node(struct rb_node * node, struct rb_node * parent,
13639 struct rb_node ** rb_link)
13640{
13641 node->rb_parent_color = (unsigned long )parent;
13642 node->rb_left = node->rb_right = ((void *)0);
13643 *rb_link = node;
13644}
13645struct raw_prio_tree_node {
13646 struct prio_tree_node *left;
13647 struct prio_tree_node *right;
13648 struct prio_tree_node *parent;
13649};
13650struct prio_tree_node {
13651 struct prio_tree_node *left;
13652 struct prio_tree_node *right;
13653 struct prio_tree_node *parent;
13654 unsigned long start;
13655 unsigned long last;
13656};
13657struct prio_tree_root {
13658 struct prio_tree_node *prio_tree_node;
13659 unsigned short index_bits;
13660 unsigned short raw;
13661};
13662struct prio_tree_iter {
13663 struct prio_tree_node *cur;
13664 unsigned long mask;
13665 unsigned long value;
13666 int size_level;
13667 struct prio_tree_root *root;
13668 unsigned long r_index;
13669 unsigned long h_index;
13670};
13671static inline __attribute__((always_inline)) void prio_tree_iter_init(struct prio_tree_iter *iter,
13672 struct prio_tree_root *root, unsigned long r_index, unsigned long h_index)
13673{
13674 iter->root = root;
13675 iter->r_index = r_index;
13676 iter->h_index = h_index;
13677 iter->cur = ((void *)0);
13678}
13679static inline __attribute__((always_inline)) int prio_tree_empty(const struct prio_tree_root *root)
13680{
13681 return root->prio_tree_node == ((void *)0);
13682}
13683static inline __attribute__((always_inline)) int prio_tree_root(const struct prio_tree_node *node)
13684{
13685 return node->parent == node;
13686}
13687static inline __attribute__((always_inline)) int prio_tree_left_empty(const struct prio_tree_node *node)
13688{
13689 return node->left == node;
13690}
13691static inline __attribute__((always_inline)) int prio_tree_right_empty(const struct prio_tree_node *node)
13692{
13693 return node->right == node;
13694}
13695struct prio_tree_node *prio_tree_replace(struct prio_tree_root *root,
13696 struct prio_tree_node *old, struct prio_tree_node *node);
13697struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root,
13698 struct prio_tree_node *node);
13699void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node);
13700struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter);
13701enum page_debug_flags {
13702 PAGE_DEBUG_FLAG_POISON,
13703};
13704struct address_space;
13705struct page {
13706 unsigned long flags;
13707 atomic_t _count;
13708 union {
13709 atomic_t _mapcount;
13710 struct {
13711 u16 inuse;
13712 u16 objects;
13713 };
13714 };
13715 union {
13716 struct {
13717 unsigned long private;
13718 struct address_space *mapping;
13719 };
13720 struct kmem_cache *slab;
13721 struct page *first_page;
13722 };
13723 union {
13724 unsigned long index;
13725 void *freelist;
13726 };
13727 struct list_head lru;
13728};
13729typedef unsigned long vm_flags_t;
13730struct vm_region {
13731 struct rb_node vm_rb;
13732 vm_flags_t vm_flags;
13733 unsigned long vm_start;
13734 unsigned long vm_end;
13735 unsigned long vm_top;
13736 unsigned long vm_pgoff;
13737 struct file *vm_file;
13738 int vm_usage;
13739 bool vm_icache_flushed : 1;
13740};
13741struct vm_area_struct {
13742 struct mm_struct * vm_mm;
13743 unsigned long vm_start;
13744 unsigned long vm_end;
13745 struct vm_area_struct *vm_next, *vm_prev;
13746 pgprot_t vm_page_prot;
13747 unsigned long vm_flags;
13748 struct rb_node vm_rb;
13749 union {
13750 struct {
13751 struct list_head list;
13752 void *parent;
13753 struct vm_area_struct *head;
13754 } vm_set;
13755 struct raw_prio_tree_node prio_tree_node;
13756 } shared;
13757 struct list_head anon_vma_chain;
13758 struct anon_vma *anon_vma;
13759 const struct vm_operations_struct *vm_ops;
13760 unsigned long vm_pgoff;
13761 struct file * vm_file;
13762 void * vm_private_data;
13763};
13764struct core_thread {
13765 struct task_struct *task;
13766 struct core_thread *next;
13767};
13768struct core_state {
13769 atomic_t nr_threads;
13770 struct core_thread dumper;
13771 struct completion startup;
13772};
13773enum {
13774 MM_FILEPAGES,
13775 MM_ANONPAGES,
13776 MM_SWAPENTS,
13777 NR_MM_COUNTERS
13778};
13779struct mm_rss_stat {
13780 atomic_long_t count[NR_MM_COUNTERS];
13781};
13782struct mm_struct {
13783 struct vm_area_struct * mmap;
13784 struct rb_root mm_rb;
13785 struct vm_area_struct * mmap_cache;
13786 unsigned long (*get_unmapped_area) (struct file *filp,
13787 unsigned long addr, unsigned long len,
13788 unsigned long pgoff, unsigned long flags);
13789 void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
13790 unsigned long mmap_base;
13791 unsigned long task_size;
13792 unsigned long cached_hole_size;
13793 unsigned long free_area_cache;
13794 pgd_t * pgd;
13795 atomic_t mm_users;
13796 atomic_t mm_count;
13797 int map_count;
13798 spinlock_t page_table_lock;
13799 struct rw_semaphore mmap_sem;
13800 struct list_head mmlist;
13801 unsigned long hiwater_rss;
13802 unsigned long hiwater_vm;
13803 unsigned long total_vm, locked_vm, shared_vm, exec_vm;
13804 unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
13805 unsigned long start_code, end_code, start_data, end_data;
13806 unsigned long start_brk, brk, start_stack;
13807 unsigned long arg_start, arg_end, env_start, env_end;
13808 unsigned long saved_auxv[(2*(2 + 19 + 1))];
13809 struct mm_rss_stat rss_stat;
13810 struct linux_binfmt *binfmt;
13811 cpumask_var_t cpu_vm_mask_var;
13812 mm_context_t context;
13813 unsigned int faultstamp;
13814 unsigned int token_priority;
13815 unsigned int last_interval;
13816 atomic_t oom_disable_count;
13817 unsigned long flags;
13818 struct core_state *core_state;
13819 spinlock_t ioctx_lock;
13820 struct hlist_head ioctx_list;
13821 struct file *exe_file;
13822 unsigned long num_exe_file_vmas;
13823 struct mmu_notifier_mm *mmu_notifier_mm;
13824 pgtable_t pmd_huge_pte;
13825};
13826static inline __attribute__((always_inline)) void mm_init_cpumask(struct mm_struct *mm)
13827{
13828}
13829static inline __attribute__((always_inline)) cpumask_t *mm_cpumask(struct mm_struct *mm)
13830{
13831 return mm->cpu_vm_mask_var;
13832}
13833typedef unsigned long cputime_t;
13834typedef u64 cputime64_t;
13835struct ipc_perm
13836{
13837 __kernel_key_t key;
13838 __kernel_uid_t uid;
13839 __kernel_gid_t gid;
13840 __kernel_uid_t cuid;
13841 __kernel_gid_t cgid;
13842 __kernel_mode_t mode;
13843 unsigned short seq;
13844};
13845struct ipc64_perm {
13846 __kernel_key_t key;
13847 __kernel_uid32_t uid;
13848 __kernel_gid32_t gid;
13849 __kernel_uid32_t cuid;
13850 __kernel_gid32_t cgid;
13851 __kernel_mode_t mode;
13852 unsigned char __pad1[4 - sizeof(__kernel_mode_t)];
13853 unsigned short seq;
13854 unsigned short __pad2;
13855 unsigned long __unused1;
13856 unsigned long __unused2;
13857};
13858struct ipc_kludge {
13859 struct msgbuf *msgp;
13860 long msgtyp;
13861};
13862struct kern_ipc_perm
13863{
13864 spinlock_t lock;
13865 int deleted;
13866 int id;
13867 key_t key;
13868 uid_t uid;
13869 gid_t gid;
13870 uid_t cuid;
13871 gid_t cgid;
13872 mode_t mode;
13873 unsigned long seq;
13874 void *security;
13875};
13876struct semid_ds {
13877 struct ipc_perm sem_perm;
13878 __kernel_time_t sem_otime;
13879 __kernel_time_t sem_ctime;
13880 struct sem *sem_base;
13881 struct sem_queue *sem_pending;
13882 struct sem_queue **sem_pending_last;
13883 struct sem_undo *undo;
13884 unsigned short sem_nsems;
13885};
13886struct semid64_ds {
13887 struct ipc64_perm sem_perm;
13888 __kernel_time_t sem_otime;
13889 unsigned long __unused1;
13890 __kernel_time_t sem_ctime;
13891 unsigned long __unused2;
13892 unsigned long sem_nsems;
13893 unsigned long __unused3;
13894 unsigned long __unused4;
13895};
13896struct sembuf {
13897 unsigned short sem_num;
13898 short sem_op;
13899 short sem_flg;
13900};
13901union semun {
13902 int val;
13903 struct semid_ds *buf;
13904 unsigned short *array;
13905 struct seminfo *__buf;
13906 void *__pad;
13907};
13908struct seminfo {
13909 int semmap;
13910 int semmni;
13911 int semmns;
13912 int semmnu;
13913 int semmsl;
13914 int semopm;
13915 int semume;
13916 int semusz;
13917 int semvmx;
13918 int semaem;
13919};
13920struct task_struct;
13921struct sem {
13922 int semval;
13923 int sempid;
13924 struct list_head sem_pending;
13925};
13926struct sem_array {
13927 struct kern_ipc_perm __attribute__((__aligned__((1 << (6)))))
13928 sem_perm;
13929 time_t sem_otime;
13930 time_t sem_ctime;
13931 struct sem *sem_base;
13932 struct list_head sem_pending;
13933 struct list_head list_id;
13934 int sem_nsems;
13935 int complex_count;
13936};
13937struct sem_queue {
13938 struct list_head simple_list;
13939 struct list_head list;
13940 struct task_struct *sleeper;
13941 struct sem_undo *undo;
13942 int pid;
13943 int status;
13944 struct sembuf *sops;
13945 int nsops;
13946 int alter;
13947};
13948struct sem_undo {
13949 struct list_head list_proc;
13950 struct rcu_head rcu;
13951 struct sem_undo_list *ulp;
13952 struct list_head list_id;
13953 int semid;
13954 short * semadj;
13955};
13956struct sem_undo_list {
13957 atomic_t refcnt;
13958 spinlock_t lock;
13959 struct list_head list_proc;
13960};
13961struct sysv_sem {
13962 struct sem_undo_list *undo_list;
13963};
13964extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
13965extern void exit_sem(struct task_struct *tsk);
13966struct siginfo;
13967typedef unsigned long old_sigset_t;
13968typedef struct {
13969 unsigned long sig[(64 / 32)];
13970} sigset_t;
13971typedef void __signalfn_t(int);
13972typedef __signalfn_t *__sighandler_t;
13973typedef void __restorefn_t(void);
13974typedef __restorefn_t *__sigrestore_t;
13975extern void do_notify_resume(struct pt_regs *, void *, __u32);
13976struct old_sigaction {
13977 __sighandler_t sa_handler;
13978 old_sigset_t sa_mask;
13979 unsigned long sa_flags;
13980 __sigrestore_t sa_restorer;
13981};
13982struct sigaction {
13983 __sighandler_t sa_handler;
13984 unsigned long sa_flags;
13985 __sigrestore_t sa_restorer;
13986 sigset_t sa_mask;
13987};
13988struct k_sigaction {
13989 struct sigaction sa;
13990};
13991typedef struct sigaltstack {
13992 void *ss_sp;
13993 int ss_flags;
13994 size_t ss_size;
13995} stack_t;
13996static inline __attribute__((always_inline)) void __gen_sigaddset(sigset_t *set, int _sig)
13997{
13998 asm("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
13999}
14000static inline __attribute__((always_inline)) void __const_sigaddset(sigset_t *set, int _sig)
14001{
14002 unsigned long sig = _sig - 1;
14003 set->sig[sig / 32] |= 1 << (sig % 32);
14004}
14005static inline __attribute__((always_inline)) void __gen_sigdelset(sigset_t *set, int _sig)
14006{
14007 asm("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc");
14008}
14009static inline __attribute__((always_inline)) void __const_sigdelset(sigset_t *set, int _sig)
14010{
14011 unsigned long sig = _sig - 1;
14012 set->sig[sig / 32] &= ~(1 << (sig % 32));
14013}
14014static inline __attribute__((always_inline)) int __const_sigismember(sigset_t *set, int _sig)
14015{
14016 unsigned long sig = _sig - 1;
14017 return 1 & (set->sig[sig / 32] >> (sig % 32));
14018}
14019static inline __attribute__((always_inline)) int __gen_sigismember(sigset_t *set, int _sig)
14020{
14021 int ret;
14022 asm("btl %2,%1\n\tsbbl %0,%0"
14023 : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
14024 return ret;
14025}
14026static inline __attribute__((always_inline)) int sigfindinword(unsigned long word)
14027{
14028 asm("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc");
14029 return word;
14030}
14031struct pt_regs;
14032typedef union sigval {
14033 int sival_int;
14034 void *sival_ptr;
14035} sigval_t;
14036typedef struct siginfo {
14037 int si_signo;
14038 int si_errno;
14039 int si_code;
14040 union {
14041 int _pad[((128 - (3 * sizeof(int))) / sizeof(int))];
14042 struct {
14043 __kernel_pid_t _pid;
14044 __kernel_uid32_t _uid;
14045 } _kill;
14046 struct {
14047 __kernel_timer_t _tid;
14048 int _overrun;
14049 char _pad[sizeof( __kernel_uid32_t) - sizeof(int)];
14050 sigval_t _sigval;
14051 int _sys_private;
14052 } _timer;
14053 struct {
14054 __kernel_pid_t _pid;
14055 __kernel_uid32_t _uid;
14056 sigval_t _sigval;
14057 } _rt;
14058 struct {
14059 __kernel_pid_t _pid;
14060 __kernel_uid32_t _uid;
14061 int _status;
14062 __kernel_clock_t _utime;
14063 __kernel_clock_t _stime;
14064 } _sigchld;
14065 struct {
14066 void *_addr;
14067 short _addr_lsb;
14068 } _sigfault;
14069 struct {
14070 long _band;
14071 int _fd;
14072 } _sigpoll;
14073 } _sifields;
14074} siginfo_t;
14075typedef struct sigevent {
14076 sigval_t sigev_value;
14077 int sigev_signo;
14078 int sigev_notify;
14079 union {
14080 int _pad[((64 - (sizeof(int) * 2 + sizeof(sigval_t))) / sizeof(int))];
14081 int _tid;
14082 struct {
14083 void (*_function)(sigval_t);
14084 void *_attribute;
14085 } _sigev_thread;
14086 } _sigev_un;
14087} sigevent_t;
14088struct siginfo;
14089void do_schedule_next_timer(struct siginfo *info);
14090static inline __attribute__((always_inline)) void copy_siginfo(struct siginfo *to, struct siginfo *from)
14091{
14092 if (__builtin_constant_p(((from->si_code < 0))) ? !!((from->si_code < 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/siginfo.h", .line = 289, }; ______r = !!((from->si_code < 0)); ______f.miss_hit[______r]++; ______r; }))
14093 __builtin_memcpy(to, from, sizeof(*to));
14094 else
14095 __builtin_memcpy(to, from, (3 * sizeof(int)) + sizeof(from->_sifields._sigchld));
14096}
14097extern int copy_siginfo_to_user(struct siginfo *to, struct siginfo *from);
14098struct task_struct;
14099extern int print_fatal_signals;
14100struct sigqueue {
14101 struct list_head list;
14102 int flags;
14103 siginfo_t info;
14104 struct user_struct *user;
14105};
14106struct sigpending {
14107 struct list_head list;
14108 sigset_t signal;
14109};
14110static inline __attribute__((always_inline)) int sigisemptyset(sigset_t *set)
14111{
14112 extern void _NSIG_WORDS_is_unsupported_size(void);
14113 switch ((64 / 32)) {
14114 case 4:
14115 return (set->sig[3] | set->sig[2] |
14116 set->sig[1] | set->sig[0]) == 0;
14117 case 2:
14118 return (set->sig[1] | set->sig[0]) == 0;
14119 case 1:
14120 return set->sig[0] == 0;
14121 default:
14122 _NSIG_WORDS_is_unsupported_size();
14123 return 0;
14124 }
14125}
14126static inline __attribute__((always_inline)) void sigorsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) | (b3)); r->sig[2] = ((a2) | (b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) | (b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) | (b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } }
14127static inline __attribute__((always_inline)) void sigandsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & (b3)); r->sig[2] = ((a2) & (b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & (b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & (b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } }
14128static inline __attribute__((always_inline)) void sigandnsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & ~(b3)); r->sig[2] = ((a2) & ~(b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & ~(b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & ~(b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } }
14129static inline __attribute__((always_inline)) void signotset(sigset_t *set) { extern void _NSIG_WORDS_is_unsupported_size(void); switch ((64 / 32)) { case 4: set->sig[3] = (~(set->sig[3])); set->sig[2] = (~(set->sig[2])); case 2: set->sig[1] = (~(set->sig[1])); case 1: set->sig[0] = (~(set->sig[0])); break; default: _NSIG_WORDS_is_unsupported_size(); } }
14130static inline __attribute__((always_inline)) void sigemptyset(sigset_t *set)
14131{
14132 switch ((64 / 32)) {
14133 default:
14134 __builtin_memset(set, 0, sizeof(sigset_t));
14135 break;
14136 case 2: set->sig[1] = 0;
14137 case 1: set->sig[0] = 0;
14138 break;
14139 }
14140}
14141static inline __attribute__((always_inline)) void sigfillset(sigset_t *set)
14142{
14143 switch ((64 / 32)) {
14144 default:
14145 __builtin_memset(set, -1, sizeof(sigset_t));
14146 break;
14147 case 2: set->sig[1] = -1;
14148 case 1: set->sig[0] = -1;
14149 break;
14150 }
14151}
14152static inline __attribute__((always_inline)) void sigaddsetmask(sigset_t *set, unsigned long mask)
14153{
14154 set->sig[0] |= mask;
14155}
14156static inline __attribute__((always_inline)) void sigdelsetmask(sigset_t *set, unsigned long mask)
14157{
14158 set->sig[0] &= ~mask;
14159}
14160static inline __attribute__((always_inline)) int sigtestsetmask(sigset_t *set, unsigned long mask)
14161{
14162 return (set->sig[0] & mask) != 0;
14163}
14164static inline __attribute__((always_inline)) void siginitset(sigset_t *set, unsigned long mask)
14165{
14166 set->sig[0] = mask;
14167 switch ((64 / 32)) {
14168 default:
14169 __builtin_memset(&set->sig[1], 0, sizeof(long)*((64 / 32)-1));
14170 break;
14171 case 2: set->sig[1] = 0;
14172 case 1: ;
14173 }
14174}
14175static inline __attribute__((always_inline)) void siginitsetinv(sigset_t *set, unsigned long mask)
14176{
14177 set->sig[0] = ~mask;
14178 switch ((64 / 32)) {
14179 default:
14180 __builtin_memset(&set->sig[1], -1, sizeof(long)*((64 / 32)-1));
14181 break;
14182 case 2: set->sig[1] = -1;
14183 case 1: ;
14184 }
14185}
14186static inline __attribute__((always_inline)) void init_sigpending(struct sigpending *sig)
14187{
14188 sigemptyset(&sig->signal);
14189 INIT_LIST_HEAD(&sig->list);
14190}
14191extern void flush_sigqueue(struct sigpending *queue);
14192static inline __attribute__((always_inline)) int valid_signal(unsigned long sig)
14193{
14194 return sig <= 64 ? 1 : 0;
14195}
14196struct timespec;
14197struct pt_regs;
14198extern int next_signal(struct sigpending *pending, sigset_t *mask);
14199extern int do_send_sig_info(int sig, struct siginfo *info,
14200 struct task_struct *p, bool group);
14201extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
14202extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *);
14203extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
14204 siginfo_t *info);
14205extern long do_sigpending(void *, unsigned long);
14206extern int do_sigtimedwait(const sigset_t *, siginfo_t *,
14207 const struct timespec *);
14208extern int sigprocmask(int, sigset_t *, sigset_t *);
14209extern void set_current_blocked(const sigset_t *);
14210extern int show_unhandled_signals;
14211extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie);
14212extern void exit_signals(struct task_struct *tsk);
14213extern struct kmem_cache *sighand_cachep;
14214int unhandled_signal(struct task_struct *tsk, int sig);
14215void signals_init(void);
14216enum pid_type
14217{
14218 PIDTYPE_PID,
14219 PIDTYPE_PGID,
14220 PIDTYPE_SID,
14221 PIDTYPE_MAX
14222};
14223struct upid {
14224 int nr;
14225 struct pid_namespace *ns;
14226 struct hlist_node pid_chain;
14227};
14228struct pid
14229{
14230 atomic_t count;
14231 unsigned int level;
14232 struct hlist_head tasks[PIDTYPE_MAX];
14233 struct rcu_head rcu;
14234 struct upid numbers[1];
14235};
14236extern struct pid init_struct_pid;
14237struct pid_link
14238{
14239 struct hlist_node node;
14240 struct pid *pid;
14241};
14242static inline __attribute__((always_inline)) struct pid *get_pid(struct pid *pid)
14243{
14244 if (__builtin_constant_p(((pid))) ? !!((pid)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/pid.h", .line = 77, }; ______r = !!((pid)); ______f.miss_hit[______r]++; ______r; }))
14245 atomic_inc(&pid->count);
14246 return pid;
14247}
14248extern void put_pid(struct pid *pid);
14249extern struct task_struct *pid_task(struct pid *pid, enum pid_type);
14250extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type);
14251extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
14252extern void attach_pid(struct task_struct *task, enum pid_type type,
14253 struct pid *pid);
14254extern void detach_pid(struct task_struct *task, enum pid_type);
14255extern void change_pid(struct task_struct *task, enum pid_type,
14256 struct pid *pid);
14257extern void transfer_pid(struct task_struct *old, struct task_struct *new,
14258 enum pid_type);
14259struct pid_namespace;
14260extern struct pid_namespace init_pid_ns;
14261extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns);
14262extern struct pid *find_vpid(int nr);
14263extern struct pid *find_get_pid(int nr);
14264extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
14265int next_pidmap(struct pid_namespace *pid_ns, unsigned int last);
14266extern struct pid *alloc_pid(struct pid_namespace *ns);
14267extern void free_pid(struct pid *pid);
14268static inline __attribute__((always_inline)) struct pid_namespace *ns_of_pid(struct pid *pid)
14269{
14270 struct pid_namespace *ns = ((void *)0);
14271 if (__builtin_constant_p(((pid))) ? !!((pid)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/pid.h", .line = 138, }; ______r = !!((pid)); ______f.miss_hit[______r]++; ______r; }))
14272 ns = pid->numbers[pid->level].ns;
14273 return ns;
14274}
14275static inline __attribute__((always_inline)) bool is_child_reaper(struct pid *pid)
14276{
14277 return pid->numbers[pid->level].nr == 1;
14278}
14279static inline __attribute__((always_inline)) pid_t pid_nr(struct pid *pid)
14280{
14281 pid_t nr = 0;
14282 if (__builtin_constant_p(((pid))) ? !!((pid)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/pid.h", .line = 168, }; ______r = !!((pid)); ______f.miss_hit[______r]++; ______r; }))
14283 nr = pid->numbers[0].nr;
14284 return nr;
14285}
14286pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns);
14287pid_t pid_vnr(struct pid *pid);
14288struct percpu_counter {
14289 spinlock_t lock;
14290 s64 count;
14291 struct list_head list;
14292 s32 *counters;
14293};
14294extern int percpu_counter_batch;
14295int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
14296 struct lock_class_key *key);
14297void percpu_counter_destroy(struct percpu_counter *fbc);
14298void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
14299void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
14300s64 __percpu_counter_sum(struct percpu_counter *fbc);
14301int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs);
14302static inline __attribute__((always_inline)) void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
14303{
14304 __percpu_counter_add(fbc, amount, percpu_counter_batch);
14305}
14306static inline __attribute__((always_inline)) s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
14307{
14308 s64 ret = __percpu_counter_sum(fbc);
14309 return ret < 0 ? 0 : ret;
14310}
14311static inline __attribute__((always_inline)) s64 percpu_counter_sum(struct percpu_counter *fbc)
14312{
14313 return __percpu_counter_sum(fbc);
14314}
14315static inline __attribute__((always_inline)) s64 percpu_counter_read(struct percpu_counter *fbc)
14316{
14317 return fbc->count;
14318}
14319static inline __attribute__((always_inline)) s64 percpu_counter_read_positive(struct percpu_counter *fbc)
14320{
14321 s64 ret = fbc->count;
14322 __asm__ __volatile__("": : :"memory");
14323 if (__builtin_constant_p(((ret >= 0))) ? !!((ret >= 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/percpu_counter.h", .line = 76, }; ______r = !!((ret >= 0)); ______f.miss_hit[______r]++; ______r; }))
14324 return ret;
14325 return 0;
14326}
14327static inline __attribute__((always_inline)) int percpu_counter_initialized(struct percpu_counter *fbc)
14328{
14329 return (fbc->counters != ((void *)0));
14330}
14331static inline __attribute__((always_inline)) void percpu_counter_inc(struct percpu_counter *fbc)
14332{
14333 percpu_counter_add(fbc, 1);
14334}
14335static inline __attribute__((always_inline)) void percpu_counter_dec(struct percpu_counter *fbc)
14336{
14337 percpu_counter_add(fbc, -1);
14338}
14339static inline __attribute__((always_inline)) void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
14340{
14341 percpu_counter_add(fbc, -amount);
14342}
14343struct prop_global {
14344 int shift;
14345 struct percpu_counter events;
14346};
14347struct prop_descriptor {
14348 int index;
14349 struct prop_global pg[2];
14350 struct mutex mutex;
14351};
14352int prop_descriptor_init(struct prop_descriptor *pd, int shift);
14353void prop_change_shift(struct prop_descriptor *pd, int new_shift);
14354struct prop_local_percpu {
14355 struct percpu_counter events;
14356 int shift;
14357 unsigned long period;
14358 spinlock_t lock;
14359};
14360int prop_local_init_percpu(struct prop_local_percpu *pl);
14361void prop_local_destroy_percpu(struct prop_local_percpu *pl);
14362void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
14363void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
14364 long *numerator, long *denominator);
14365static inline __attribute__((always_inline))
14366void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
14367{
14368 unsigned long flags;
14369 do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0);
14370 __prop_inc_percpu(pd, pl);
14371 do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/proportions.h", .line = 77, }; ______r = !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0);
14372}
14373void __prop_inc_percpu_max(struct prop_descriptor *pd,
14374 struct prop_local_percpu *pl, long frac);
14375struct prop_local_single {
14376 unsigned long events;
14377 unsigned long period;
14378 int shift;
14379 spinlock_t lock;
14380};
14381int prop_local_init_single(struct prop_local_single *pl);
14382void prop_local_destroy_single(struct prop_local_single *pl);
14383void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
14384void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
14385 long *numerator, long *denominator);
14386static inline __attribute__((always_inline))
14387void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
14388{
14389 unsigned long flags;
14390 do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0);
14391 __prop_inc_single(pd, pl);
14392 do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/proportions.h", .line = 129, }; ______r = !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0);
14393}
14394typedef struct { int mode; } seccomp_t;
14395extern void __secure_computing(int);
14396static inline __attribute__((always_inline)) void secure_computing(int this_syscall)
14397{
14398 if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 8)) ? !!(test_ti_thread_flag(current_thread_info(), 8)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seccomp.h", .line = 15, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 8)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 8)) ? !!(test_ti_thread_flag(current_thread_info(), 8)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seccomp.h", .line = 15, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 8)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/seccomp.h", .line = 15, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 8)) ? !!(test_ti_thread_flag(current_thread_info(), 8)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/seccomp.h", .line = 15, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 8)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
14399 __secure_computing(this_syscall);
14400}
14401extern long prctl_get_seccomp(void);
14402extern long prctl_set_seccomp(unsigned long);
14403static inline __attribute__((always_inline)) void __list_add_rcu(struct list_head *new,
14404 struct list_head *prev, struct list_head *next)
14405{
14406 new->next = next;
14407 new->prev = prev;
14408 ({ if (__builtin_constant_p(((!__builtin_constant_p((new)) || (((new)) != ((void *)0))))) ? !!((!__builtin_constant_p((new)) || (((new)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 38, }; ______r = !!((!__builtin_constant_p((new)) || (((new)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); (((*((struct list_head **)(&(prev)->next))))) = (typeof(*(new)) *)((new)); });
14409 next->prev = new;
14410}
14411static inline __attribute__((always_inline)) void list_add_rcu(struct list_head *new, struct list_head *head)
14412{
14413 __list_add_rcu(new, head, head->next);
14414}
14415static inline __attribute__((always_inline)) void list_add_tail_rcu(struct list_head *new,
14416 struct list_head *head)
14417{
14418 __list_add_rcu(new, head->prev, head);
14419}
14420static inline __attribute__((always_inline)) void list_del_rcu(struct list_head *entry)
14421{
14422 __list_del(entry->prev, entry->next);
14423 entry->prev = ((void *) 0x00200200 + (0x0UL));
14424}
14425static inline __attribute__((always_inline)) void hlist_del_init_rcu(struct hlist_node *n)
14426{
14427 if (__builtin_constant_p(((!hlist_unhashed(n)))) ? !!((!hlist_unhashed(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 137, }; ______r = !!((!hlist_unhashed(n))); ______f.miss_hit[______r]++; ______r; })) {
14428 __hlist_del(n);
14429 n->pprev = ((void *)0);
14430 }
14431}
14432static inline __attribute__((always_inline)) void list_replace_rcu(struct list_head *old,
14433 struct list_head *new)
14434{
14435 new->next = old->next;
14436 new->prev = old->prev;
14437 ({ if (__builtin_constant_p(((!__builtin_constant_p((new)) || (((new)) != ((void *)0))))) ? !!((!__builtin_constant_p((new)) || (((new)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 156, }; ______r = !!((!__builtin_constant_p((new)) || (((new)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); (((*((struct list_head **)(&(new->prev)->next))))) = (typeof(*(new)) *)((new)); });
14438 new->next->prev = new;
14439 old->prev = ((void *) 0x00200200 + (0x0UL));
14440}
14441static inline __attribute__((always_inline)) void list_splice_init_rcu(struct list_head *list,
14442 struct list_head *head,
14443 void (*sync)(void))
14444{
14445 struct list_head *first = list->next;
14446 struct list_head *last = list->prev;
14447 struct list_head *at = head->next;
14448 if (__builtin_constant_p(((list_empty(head)))) ? !!((list_empty(head))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 186, }; ______r = !!((list_empty(head))); ______f.miss_hit[______r]++; ______r; }))
14449 return;
14450 INIT_LIST_HEAD(list);
14451 sync();
14452 last->next = at;
14453 ({ if (__builtin_constant_p(((!__builtin_constant_p((first)) || (((first)) != ((void *)0))))) ? !!((!__builtin_constant_p((first)) || (((first)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 211, }; ______r = !!((!__builtin_constant_p((first)) || (((first)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); (((*((struct list_head **)(&(head)->next))))) = (typeof(*(first)) *)((first)); });
14454 first->prev = head;
14455 at->prev = last;
14456}
14457static inline __attribute__((always_inline)) void hlist_del_rcu(struct hlist_node *n)
14458{
14459 __hlist_del(n);
14460 n->pprev = ((void *) 0x00200200 + (0x0UL));
14461}
14462static inline __attribute__((always_inline)) void hlist_replace_rcu(struct hlist_node *old,
14463 struct hlist_node *new)
14464{
14465 struct hlist_node *next = old->next;
14466 new->next = next;
14467 new->pprev = old->pprev;
14468 ({ if (__builtin_constant_p(((!__builtin_constant_p((new)) || (((new)) != ((void *)0))))) ? !!((!__builtin_constant_p((new)) || (((new)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 329, }; ______r = !!((!__builtin_constant_p((new)) || (((new)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); ((*(struct hlist_node **)new->pprev)) = (typeof(*(new)) *)((new)); });
14469 if (__builtin_constant_p(((next))) ? !!((next)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 330, }; ______r = !!((next)); ______f.miss_hit[______r]++; ______r; }))
14470 new->next->pprev = &new->next;
14471 old->pprev = ((void *) 0x00200200 + (0x0UL));
14472}
14473static inline __attribute__((always_inline)) void hlist_add_head_rcu(struct hlist_node *n,
14474 struct hlist_head *h)
14475{
14476 struct hlist_node *first = h->first;
14477 n->next = first;
14478 n->pprev = &h->first;
14479 ({ if (__builtin_constant_p(((!__builtin_constant_p((n)) || (((n)) != ((void *)0))))) ? !!((!__builtin_constant_p((n)) || (((n)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 368, }; ______r = !!((!__builtin_constant_p((n)) || (((n)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); (((*((struct hlist_node **)(&(h)->first))))) = (typeof(*(n)) *)((n)); });
14480 if (__builtin_constant_p(((first))) ? !!((first)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 369, }; ______r = !!((first)); ______f.miss_hit[______r]++; ______r; }))
14481 first->pprev = &n->next;
14482}
14483static inline __attribute__((always_inline)) void hlist_add_before_rcu(struct hlist_node *n,
14484 struct hlist_node *next)
14485{
14486 n->pprev = next->pprev;
14487 n->next = next;
14488 ({ if (__builtin_constant_p(((!__builtin_constant_p((n)) || (((n)) != ((void *)0))))) ? !!((!__builtin_constant_p((n)) || (((n)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 396, }; ______r = !!((!__builtin_constant_p((n)) || (((n)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); (((*((struct hlist_node **)((n)->pprev))))) = (typeof(*(n)) *)((n)); });
14489 next->pprev = &n->next;
14490}
14491static inline __attribute__((always_inline)) void hlist_add_after_rcu(struct hlist_node *prev,
14492 struct hlist_node *n)
14493{
14494 n->next = prev->next;
14495 n->pprev = &prev->next;
14496 ({ if (__builtin_constant_p(((!__builtin_constant_p((n)) || (((n)) != ((void *)0))))) ? !!((!__builtin_constant_p((n)) || (((n)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 423, }; ______r = !!((!__builtin_constant_p((n)) || (((n)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); (((*((struct hlist_node **)(&(prev)->next))))) = (typeof(*(n)) *)((n)); });
14497 if (__builtin_constant_p(((n->next))) ? !!((n->next)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist.h", .line = 424, }; ______r = !!((n->next)); ______f.miss_hit[______r]++; ______r; }))
14498 n->next->pprev = &n->next;
14499}
14500struct plist_head {
14501 struct list_head node_list;
14502 raw_spinlock_t *rawlock;
14503 spinlock_t *spinlock;
14504};
14505struct plist_node {
14506 int prio;
14507 struct list_head prio_list;
14508 struct list_head node_list;
14509};
14510static inline __attribute__((always_inline)) void
14511plist_head_init(struct plist_head *head, spinlock_t *lock)
14512{
14513 INIT_LIST_HEAD(&head->node_list);
14514 head->spinlock = lock;
14515 head->rawlock = ((void *)0);
14516}
14517static inline __attribute__((always_inline)) void
14518plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
14519{
14520 INIT_LIST_HEAD(&head->node_list);
14521 head->rawlock = lock;
14522 head->spinlock = ((void *)0);
14523}
14524static inline __attribute__((always_inline)) void plist_node_init(struct plist_node *node, int prio)
14525{
14526 node->prio = prio;
14527 INIT_LIST_HEAD(&node->prio_list);
14528 INIT_LIST_HEAD(&node->node_list);
14529}
14530extern void plist_add(struct plist_node *node, struct plist_head *head);
14531extern void plist_del(struct plist_node *node, struct plist_head *head);
14532static inline __attribute__((always_inline)) int plist_head_empty(const struct plist_head *head)
14533{
14534 return list_empty(&head->node_list);
14535}
14536static inline __attribute__((always_inline)) int plist_node_empty(const struct plist_node *node)
14537{
14538 return list_empty(&node->node_list);
14539}
14540static inline __attribute__((always_inline)) struct plist_node *plist_first(const struct plist_head *head)
14541{
14542 return ({ const typeof( ((struct plist_node *)0)->node_list ) *__mptr = (head->node_list.next); (struct plist_node *)( (char *)__mptr - __builtin_offsetof(struct plist_node,node_list) );})
14543 ;
14544}
14545static inline __attribute__((always_inline)) struct plist_node *plist_last(const struct plist_head *head)
14546{
14547 return ({ const typeof( ((struct plist_node *)0)->node_list ) *__mptr = (head->node_list.prev); (struct plist_node *)( (char *)__mptr - __builtin_offsetof(struct plist_node,node_list) );})
14548 ;
14549}
14550extern int max_lock_depth;
14551struct rt_mutex {
14552 raw_spinlock_t wait_lock;
14553 struct plist_head wait_list;
14554 struct task_struct *owner;
14555 int save_state;
14556 const char *name, *file;
14557 int line;
14558 void *magic;
14559};
14560struct rt_mutex_waiter;
14561struct hrtimer_sleeper;
14562 extern int rt_mutex_debug_check_no_locks_freed(const void *from,
14563 unsigned long len);
14564 extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task);
14565 extern void rt_mutex_debug_task_free(struct task_struct *tsk);
14566static inline __attribute__((always_inline)) int rt_mutex_is_locked(struct rt_mutex *lock)
14567{
14568 return lock->owner != ((void *)0);
14569}
14570extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
14571extern void rt_mutex_destroy(struct rt_mutex *lock);
14572extern void rt_mutex_lock(struct rt_mutex *lock);
14573extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
14574 int detect_deadlock);
14575extern int rt_mutex_timed_lock(struct rt_mutex *lock,
14576 struct hrtimer_sleeper *timeout,
14577 int detect_deadlock);
14578extern int rt_mutex_trylock(struct rt_mutex *lock);
14579extern void rt_mutex_unlock(struct rt_mutex *lock);
14580struct rusage {
14581 struct timeval ru_utime;
14582 struct timeval ru_stime;
14583 long ru_maxrss;
14584 long ru_ixrss;
14585 long ru_idrss;
14586 long ru_isrss;
14587 long ru_minflt;
14588 long ru_majflt;
14589 long ru_nswap;
14590 long ru_inblock;
14591 long ru_oublock;
14592 long ru_msgsnd;
14593 long ru_msgrcv;
14594 long ru_nsignals;
14595 long ru_nvcsw;
14596 long ru_nivcsw;
14597};
14598struct rlimit {
14599 unsigned long rlim_cur;
14600 unsigned long rlim_max;
14601};
14602struct rlimit64 {
14603 __u64 rlim_cur;
14604 __u64 rlim_max;
14605};
14606struct task_struct;
14607int getrusage(struct task_struct *p, int who, struct rusage *ru);
14608int do_prlimit(struct task_struct *tsk, unsigned int resource,
14609 struct rlimit *new_rlim, struct rlimit *old_rlim);
14610struct timerqueue_node {
14611 struct rb_node node;
14612 ktime_t expires;
14613};
14614struct timerqueue_head {
14615 struct rb_root head;
14616 struct timerqueue_node *next;
14617};
14618extern void timerqueue_add(struct timerqueue_head *head,
14619 struct timerqueue_node *node);
14620extern void timerqueue_del(struct timerqueue_head *head,
14621 struct timerqueue_node *node);
14622extern struct timerqueue_node *timerqueue_iterate_next(
14623 struct timerqueue_node *node);
14624static inline __attribute__((always_inline))
14625struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head)
14626{
14627 return head->next;
14628}
14629static inline __attribute__((always_inline)) void timerqueue_init(struct timerqueue_node *node)
14630{
14631 rb_init_node(&node->node);
14632}
14633static inline __attribute__((always_inline)) void timerqueue_init_head(struct timerqueue_head *head)
14634{
14635 head->head = (struct rb_root) { ((void *)0), };
14636 head->next = ((void *)0);
14637}
14638struct hrtimer_clock_base;
14639struct hrtimer_cpu_base;
14640enum hrtimer_mode {
14641 HRTIMER_MODE_ABS = 0x0,
14642 HRTIMER_MODE_REL = 0x1,
14643 HRTIMER_MODE_PINNED = 0x02,
14644 HRTIMER_MODE_ABS_PINNED = 0x02,
14645 HRTIMER_MODE_REL_PINNED = 0x03,
14646};
14647enum hrtimer_restart {
14648 HRTIMER_NORESTART,
14649 HRTIMER_RESTART,
14650};
14651struct hrtimer {
14652 struct timerqueue_node node;
14653 ktime_t _softexpires;
14654 enum hrtimer_restart (*function)(struct hrtimer *);
14655 struct hrtimer_clock_base *base;
14656 unsigned long state;
14657 int start_pid;
14658 void *start_site;
14659 char start_comm[16];
14660};
14661struct hrtimer_sleeper {
14662 struct hrtimer timer;
14663 struct task_struct *task;
14664};
14665struct hrtimer_clock_base {
14666 struct hrtimer_cpu_base *cpu_base;
14667 int index;
14668 clockid_t clockid;
14669 struct timerqueue_head active;
14670 ktime_t resolution;
14671 ktime_t (*get_time)(void);
14672 ktime_t softirq_time;
14673 ktime_t offset;
14674};
14675enum hrtimer_base_type {
14676 HRTIMER_BASE_MONOTONIC,
14677 HRTIMER_BASE_REALTIME,
14678 HRTIMER_BASE_BOOTTIME,
14679 HRTIMER_MAX_CLOCK_BASES,
14680};
14681struct hrtimer_cpu_base {
14682 raw_spinlock_t lock;
14683 unsigned long active_bases;
14684 ktime_t expires_next;
14685 int hres_active;
14686 int hang_detected;
14687 unsigned long nr_events;
14688 unsigned long nr_retries;
14689 unsigned long nr_hangs;
14690 ktime_t max_hang_time;
14691 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
14692};
14693static inline __attribute__((always_inline)) void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
14694{
14695 timer->node.expires = time;
14696 timer->_softexpires = time;
14697}
14698static inline __attribute__((always_inline)) void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta)
14699{
14700 timer->_softexpires = time;
14701 timer->node.expires = ktime_add_safe(time, delta);
14702}
14703static inline __attribute__((always_inline)) void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta)
14704{
14705 timer->_softexpires = time;
14706 timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
14707}
14708static inline __attribute__((always_inline)) void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
14709{
14710 timer->node.expires.tv64 = tv64;
14711 timer->_softexpires.tv64 = tv64;
14712}
14713static inline __attribute__((always_inline)) void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
14714{
14715 timer->node.expires = ktime_add_safe(timer->node.expires, time);
14716 timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
14717}
14718static inline __attribute__((always_inline)) void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns)
14719{
14720 timer->node.expires = ({ (ktime_t){ .tv64 = (timer->node.expires).tv64 + (ns) }; });
14721 timer->_softexpires = ({ (ktime_t){ .tv64 = (timer->_softexpires).tv64 + (ns) }; });
14722}
14723static inline __attribute__((always_inline)) ktime_t hrtimer_get_expires(const struct hrtimer *timer)
14724{
14725 return timer->node.expires;
14726}
14727static inline __attribute__((always_inline)) ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
14728{
14729 return timer->_softexpires;
14730}
14731static inline __attribute__((always_inline)) s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
14732{
14733 return timer->node.expires.tv64;
14734}
14735static inline __attribute__((always_inline)) s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
14736{
14737 return timer->_softexpires.tv64;
14738}
14739static inline __attribute__((always_inline)) s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
14740{
14741 return ((timer->node.expires).tv64);
14742}
14743static inline __attribute__((always_inline)) ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
14744{
14745 return ({ (ktime_t){ .tv64 = (timer->node.expires).tv64 - (timer->base->get_time()).tv64 }; });
14746}
14747struct clock_event_device;
14748extern void hrtimer_interrupt(struct clock_event_device *dev);
14749static inline __attribute__((always_inline)) ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
14750{
14751 return timer->base->get_time();
14752}
14753static inline __attribute__((always_inline)) int hrtimer_is_hres_active(struct hrtimer *timer)
14754{
14755 return timer->base->cpu_base->hres_active;
14756}
14757extern void hrtimer_peek_ahead_timers(void);
14758extern void clock_was_set(void);
14759extern void timerfd_clock_was_set(void);
14760extern void hrtimers_resume(void);
14761extern ktime_t ktime_get(void);
14762extern ktime_t ktime_get_real(void);
14763extern ktime_t ktime_get_boottime(void);
14764extern ktime_t ktime_get_monotonic_offset(void);
14765extern __attribute__((section(".data..percpu" ""))) __typeof__(struct tick_device) tick_cpu_device;
14766extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
14767 enum hrtimer_mode mode);
14768static inline __attribute__((always_inline)) void hrtimer_init_on_stack(struct hrtimer *timer,
14769 clockid_t which_clock,
14770 enum hrtimer_mode mode)
14771{
14772 hrtimer_init(timer, which_clock, mode);
14773}
14774static inline __attribute__((always_inline)) void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
14775extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
14776 const enum hrtimer_mode mode);
14777extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
14778 unsigned long range_ns, const enum hrtimer_mode mode);
14779extern int
14780__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
14781 unsigned long delta_ns,
14782 const enum hrtimer_mode mode, int wakeup);
14783extern int hrtimer_cancel(struct hrtimer *timer);
14784extern int hrtimer_try_to_cancel(struct hrtimer *timer);
14785static inline __attribute__((always_inline)) int hrtimer_start_expires(struct hrtimer *timer,
14786 enum hrtimer_mode mode)
14787{
14788 unsigned long delta;
14789 ktime_t soft, hard;
14790 soft = hrtimer_get_softexpires(timer);
14791 hard = hrtimer_get_expires(timer);
14792 delta = ((({ (ktime_t){ .tv64 = (hard).tv64 - (soft).tv64 }; })).tv64);
14793 return hrtimer_start_range_ns(timer, soft, delta, mode);
14794}
14795static inline __attribute__((always_inline)) int hrtimer_restart(struct hrtimer *timer)
14796{
14797 return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
14798}
14799extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
14800extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
14801extern ktime_t hrtimer_get_next_event(void);
14802static inline __attribute__((always_inline)) int hrtimer_active(const struct hrtimer *timer)
14803{
14804 return timer->state != 0x00;
14805}
14806static inline __attribute__((always_inline)) int hrtimer_is_queued(struct hrtimer *timer)
14807{
14808 return timer->state & 0x01;
14809}
14810static inline __attribute__((always_inline)) int hrtimer_callback_running(struct hrtimer *timer)
14811{
14812 return timer->state & 0x02;
14813}
14814extern u64
14815hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
14816static inline __attribute__((always_inline)) u64 hrtimer_forward_now(struct hrtimer *timer,
14817 ktime_t interval)
14818{
14819 return hrtimer_forward(timer, timer->base->get_time(), interval);
14820}
14821extern long hrtimer_nanosleep(struct timespec *rqtp,
14822 struct timespec *rmtp,
14823 const enum hrtimer_mode mode,
14824 const clockid_t clockid);
14825extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
14826extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
14827 struct task_struct *tsk);
14828extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
14829 const enum hrtimer_mode mode);
14830extern int schedule_hrtimeout_range_clock(ktime_t *expires,
14831 unsigned long delta, const enum hrtimer_mode mode, int clock);
14832extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
14833extern void hrtimer_run_queues(void);
14834extern void hrtimer_run_pending(void);
14835extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) hrtimers_init(void);
14836extern u64 ktime_divns(const ktime_t kt, s64 div);
14837extern void sysrq_timer_list_show(void);
14838struct task_io_accounting {
14839 u64 rchar;
14840 u64 wchar;
14841 u64 syscr;
14842 u64 syscw;
14843 u64 read_bytes;
14844 u64 write_bytes;
14845 u64 cancelled_write_bytes;
14846};
14847struct latency_record {
14848 unsigned long backtrace[12];
14849 unsigned int count;
14850 unsigned long time;
14851 unsigned long max;
14852};
14853struct task_struct;
14854extern int latencytop_enabled;
14855void __account_scheduler_latency(struct task_struct *task, int usecs, int inter);
14856static inline __attribute__((always_inline)) void
14857account_scheduler_latency(struct task_struct *task, int usecs, int inter)
14858{
14859 if (__builtin_constant_p((((__builtin_constant_p(latencytop_enabled) ? !!(latencytop_enabled) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/latencytop.h", .line = 33, }; ______r = __builtin_expect(!!(latencytop_enabled), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(latencytop_enabled) ? !!(latencytop_enabled) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/latencytop.h", .line = 33, }; ______r = __builtin_expect(!!(latencytop_enabled), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/latencytop.h", .line = 33, }; ______r = !!(((__builtin_constant_p(latencytop_enabled) ? !!(latencytop_enabled) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/latencytop.h", .line = 33, }; ______r = __builtin_expect(!!(latencytop_enabled), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
14860 __account_scheduler_latency(task, usecs, inter);
14861}
14862void clear_all_latency_tracing(struct task_struct *p);
14863typedef int32_t key_serial_t;
14864typedef uint32_t key_perm_t;
14865struct key;
14866struct selinux_audit_rule;
14867struct audit_context;
14868struct kern_ipc_perm;
14869static inline __attribute__((always_inline)) bool selinux_is_enabled(void)
14870{
14871 return false;
14872}
14873struct user_struct;
14874struct cred;
14875struct inode;
14876struct group_info {
14877 atomic_t usage;
14878 int ngroups;
14879 int nblocks;
14880 gid_t small_block[32];
14881 gid_t *blocks[0];
14882};
14883static inline __attribute__((always_inline)) struct group_info *get_group_info(struct group_info *gi)
14884{
14885 atomic_inc(&gi->usage);
14886 return gi;
14887}
14888extern struct group_info *groups_alloc(int);
14889extern struct group_info init_groups;
14890extern void groups_free(struct group_info *);
14891extern int set_current_groups(struct group_info *);
14892extern int set_groups(struct cred *, struct group_info *);
14893extern int groups_search(const struct group_info *, gid_t);
14894extern int in_group_p(gid_t);
14895extern int in_egroup_p(gid_t);
14896struct cred {
14897 atomic_t usage;
14898 uid_t uid;
14899 gid_t gid;
14900 uid_t suid;
14901 gid_t sgid;
14902 uid_t euid;
14903 gid_t egid;
14904 uid_t fsuid;
14905 gid_t fsgid;
14906 unsigned securebits;
14907 kernel_cap_t cap_inheritable;
14908 kernel_cap_t cap_permitted;
14909 kernel_cap_t cap_effective;
14910 kernel_cap_t cap_bset;
14911 void *security;
14912 struct user_struct *user;
14913 struct user_namespace *user_ns;
14914 struct group_info *group_info;
14915 struct rcu_head rcu;
14916};
14917extern void __put_cred(struct cred *);
14918extern void exit_creds(struct task_struct *);
14919extern int copy_creds(struct task_struct *, unsigned long);
14920extern const struct cred *get_task_cred(struct task_struct *);
14921extern struct cred *cred_alloc_blank(void);
14922extern struct cred *prepare_creds(void);
14923extern struct cred *prepare_exec_creds(void);
14924extern int commit_creds(struct cred *);
14925extern void abort_creds(struct cred *);
14926extern const struct cred *override_creds(const struct cred *);
14927extern void revert_creds(const struct cred *);
14928extern struct cred *prepare_kernel_cred(struct task_struct *);
14929extern int change_create_files_as(struct cred *, struct inode *);
14930extern int set_security_override(struct cred *, u32);
14931extern int set_security_override_from_ctx(struct cred *, const char *);
14932extern int set_create_files_as(struct cred *, struct inode *);
14933extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) cred_init(void);
14934static inline __attribute__((always_inline)) void validate_creds(const struct cred *cred)
14935{
14936}
14937static inline __attribute__((always_inline)) void validate_creds_for_do_exit(struct task_struct *tsk)
14938{
14939}
14940static inline __attribute__((always_inline)) void validate_process_creds(void)
14941{
14942}
14943static inline __attribute__((always_inline)) struct cred *get_new_cred(struct cred *cred)
14944{
14945 atomic_inc(&cred->usage);
14946 return cred;
14947}
14948static inline __attribute__((always_inline)) const struct cred *get_cred(const struct cred *cred)
14949{
14950 struct cred *nonconst_cred = (struct cred *) cred;
14951 validate_creds(cred);
14952 return get_new_cred(nonconst_cred);
14953}
14954static inline __attribute__((always_inline)) void put_cred(const struct cred *_cred)
14955{
14956 struct cred *cred = (struct cred *) _cred;
14957 validate_creds(cred);
14958 if (__builtin_constant_p(((atomic_dec_and_test(&(cred)->usage)))) ? !!((atomic_dec_and_test(&(cred)->usage))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/cred.h", .line = 261, }; ______r = !!((atomic_dec_and_test(&(cred)->usage))); ______f.miss_hit[______r]++; ______r; }))
14959 __put_cred(cred);
14960}
14961extern struct user_namespace init_user_ns;
14962struct exec_domain;
14963struct futex_pi_state;
14964struct robust_list_head;
14965struct bio_list;
14966struct fs_struct;
14967struct perf_event_context;
14968struct blk_plug;
14969extern unsigned long avenrun[];
14970extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
14971extern unsigned long total_forks;
14972extern int nr_threads;
14973extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned long) process_counts;
14974extern int nr_processes(void);
14975extern unsigned long nr_running(void);
14976extern unsigned long nr_uninterruptible(void);
14977extern unsigned long nr_iowait(void);
14978extern unsigned long nr_iowait_cpu(int cpu);
14979extern unsigned long this_cpu_load(void);
14980extern void calc_global_load(unsigned long ticks);
14981extern unsigned long get_parent_ip(unsigned long addr);
14982struct seq_file;
14983struct cfs_rq;
14984struct task_group;
14985extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
14986extern void proc_sched_set_task(struct task_struct *p);
14987extern void
14988print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
14989extern char ___assert_task_state[1 - 2*!!(
14990 sizeof("RSDTtZXxKW")-1 != ( __builtin_constant_p(512) ? ( (512) < 1 ? ____ilog2_NaN() : (512) & (1ULL << 63) ? 63 : (512) & (1ULL << 62) ? 62 : (512) & (1ULL << 61) ? 61 : (512) & (1ULL << 60) ? 60 : (512) & (1ULL << 59) ? 59 : (512) & (1ULL << 58) ? 58 : (512) & (1ULL << 57) ? 57 : (512) & (1ULL << 56) ? 56 : (512) & (1ULL << 55) ? 55 : (512) & (1ULL << 54) ? 54 : (512) & (1ULL << 53) ? 53 : (512) & (1ULL << 52) ? 52 : (512) & (1ULL << 51) ? 51 : (512) & (1ULL << 50) ? 50 : (512) & (1ULL << 49) ? 49 : (512) & (1ULL << 48) ? 48 : (512) & (1ULL << 47) ? 47 : (512) & (1ULL << 46) ? 46 : (512) & (1ULL << 45) ? 45 : (512) & (1ULL << 44) ? 44 : (512) & (1ULL << 43) ? 43 : (512) & (1ULL << 42) ? 42 : (512) & (1ULL << 41) ? 41 : (512) & (1ULL << 40) ? 40 : (512) & (1ULL << 39) ? 39 : (512) & (1ULL << 38) ? 38 : (512) & (1ULL << 37) ? 37 : (512) & (1ULL << 36) ? 36 : (512) & (1ULL << 35) ? 35 : (512) & (1ULL << 34) ? 34 : (512) & (1ULL << 33) ? 33 : (512) & (1ULL << 32) ? 32 : (512) & (1ULL << 31) ? 31 : (512) & (1ULL << 30) ? 30 : (512) & (1ULL << 29) ? 29 : (512) & (1ULL << 28) ? 28 : (512) & (1ULL << 27) ? 27 : (512) & (1ULL << 26) ? 26 : (512) & (1ULL << 25) ? 25 : (512) & (1ULL << 24) ? 24 : (512) & (1ULL << 23) ? 23 : (512) & (1ULL << 22) ? 22 : (512) & (1ULL << 21) ? 21 : (512) & (1ULL << 20) ? 20 : (512) & (1ULL << 19) ? 19 : (512) & (1ULL << 18) ? 18 : (512) & (1ULL << 17) ? 17 : (512) & (1ULL << 16) ? 16 : (512) & (1ULL << 15) ? 15 : (512) & (1ULL << 14) ? 14 : (512) & (1ULL << 13) ? 13 : (512) & (1ULL << 12) ? 12 : (512) & (1ULL << 11) ? 11 : (512) & (1ULL << 10) ? 10 : (512) & (1ULL << 9) ? 9 : (512) & (1ULL << 8) ? 8 : (512) & (1ULL << 7) ? 7 : (512) & (1ULL << 6) ? 6 : (512) & (1ULL << 5) ? 5 : (512) & (1ULL << 4) ? 4 : (512) & (1ULL << 3) ? 3 : (512) & (1ULL << 2) ? 2 : (512) & (1ULL << 1) ? 1 : (512) & (1ULL << 0) ? 0 : ____ilog2_NaN() ) : (sizeof(512) <= 4) ? __ilog2_u32(512) : __ilog2_u64(512) )+1)];
14991extern rwlock_t tasklist_lock;
14992extern spinlock_t mmlist_lock;
14993struct task_struct;
14994extern void sched_init(void);
14995extern void sched_init_smp(void);
14996extern __attribute__((regparm(0))) void schedule_tail(struct task_struct *prev);
14997extern void init_idle(struct task_struct *idle, int cpu);
14998extern void init_idle_bootup_task(struct task_struct *idle);
14999extern int runqueue_is_locked(int cpu);
15000extern cpumask_var_t nohz_cpu_mask;
15001static inline __attribute__((always_inline)) void select_nohz_load_balancer(int stop_tick) { }
15002extern void show_state_filter(unsigned long state_filter);
15003static inline __attribute__((always_inline)) void show_state(void)
15004{
15005 show_state_filter(0);
15006}
15007extern void show_regs(struct pt_regs *);
15008extern void show_stack(struct task_struct *task, unsigned long *sp);
15009void io_schedule(void);
15010long io_schedule_timeout(long timeout);
15011extern void cpu_init (void);
15012extern void trap_init(void);
15013extern void update_process_times(int user);
15014extern void scheduler_tick(void);
15015extern void sched_show_task(struct task_struct *p);
15016extern void touch_softlockup_watchdog(void);
15017extern void touch_softlockup_watchdog_sync(void);
15018extern void touch_all_softlockup_watchdogs(void);
15019extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
15020 void *buffer,
15021 size_t *lenp, loff_t *ppos);
15022extern unsigned int softlockup_panic;
15023void lockup_detector_init(void);
15024extern unsigned int sysctl_hung_task_panic;
15025extern unsigned long sysctl_hung_task_check_count;
15026extern unsigned long sysctl_hung_task_timeout_secs;
15027extern unsigned long sysctl_hung_task_warnings;
15028extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
15029 void *buffer,
15030 size_t *lenp, loff_t *ppos);
15031extern char __sched_text_start[], __sched_text_end[];
15032extern int in_sched_functions(unsigned long addr);
15033extern signed long schedule_timeout(signed long timeout);
15034extern signed long schedule_timeout_interruptible(signed long timeout);
15035extern signed long schedule_timeout_killable(signed long timeout);
15036extern signed long schedule_timeout_uninterruptible(signed long timeout);
15037 __attribute__((regparm(0))) void schedule(void);
15038extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
15039struct nsproxy;
15040struct user_namespace;
15041extern int sysctl_max_map_count;
15042typedef unsigned long aio_context_t;
15043enum {
15044 IOCB_CMD_PREAD = 0,
15045 IOCB_CMD_PWRITE = 1,
15046 IOCB_CMD_FSYNC = 2,
15047 IOCB_CMD_FDSYNC = 3,
15048 IOCB_CMD_NOOP = 6,
15049 IOCB_CMD_PREADV = 7,
15050 IOCB_CMD_PWRITEV = 8,
15051};
15052struct io_event {
15053 __u64 data;
15054 __u64 obj;
15055 __s64 res;
15056 __s64 res2;
15057};
15058struct iocb {
15059 __u64 aio_data;
15060 __u32 aio_key, aio_reserved1;
15061 __u16 aio_lio_opcode;
15062 __s16 aio_reqprio;
15063 __u32 aio_fildes;
15064 __u64 aio_buf;
15065 __u64 aio_nbytes;
15066 __s64 aio_offset;
15067 __u64 aio_reserved2;
15068 __u32 aio_flags;
15069 __u32 aio_resfd;
15070};
15071struct iovec
15072{
15073 void *iov_base;
15074 __kernel_size_t iov_len;
15075};
15076struct kvec {
15077 void *iov_base;
15078 size_t iov_len;
15079};
15080static inline __attribute__((always_inline)) size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
15081{
15082 unsigned long seg;
15083 size_t ret = 0;
15084 for (seg = 0; seg < nr_segs; seg++)
15085 ret += iov[seg].iov_len;
15086 return ret;
15087}
15088unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
15089struct kioctx;
15090struct kiocb {
15091 struct list_head ki_run_list;
15092 unsigned long ki_flags;
15093 int ki_users;
15094 unsigned ki_key;
15095 struct file *ki_filp;
15096 struct kioctx *ki_ctx;
15097 int (*ki_cancel)(struct kiocb *, struct io_event *);
15098 ssize_t (*ki_retry)(struct kiocb *);
15099 void (*ki_dtor)(struct kiocb *);
15100 union {
15101 void *user;
15102 struct task_struct *tsk;
15103 } ki_obj;
15104 __u64 ki_user_data;
15105 loff_t ki_pos;
15106 void *private;
15107 unsigned short ki_opcode;
15108 size_t ki_nbytes;
15109 char *ki_buf;
15110 size_t ki_left;
15111 struct iovec ki_inline_vec;
15112 struct iovec *ki_iovec;
15113 unsigned long ki_nr_segs;
15114 unsigned long ki_cur_seg;
15115 struct list_head ki_list;
15116 struct eventfd_ctx *ki_eventfd;
15117};
15118struct aio_ring {
15119 unsigned id;
15120 unsigned nr;
15121 unsigned head;
15122 unsigned tail;
15123 unsigned magic;
15124 unsigned compat_features;
15125 unsigned incompat_features;
15126 unsigned header_length;
15127 struct io_event io_events[0];
15128};
15129struct aio_ring_info {
15130 unsigned long mmap_base;
15131 unsigned long mmap_size;
15132 struct page **ring_pages;
15133 spinlock_t ring_lock;
15134 long nr_pages;
15135 unsigned nr, tail;
15136 struct page *internal_pages[8];
15137};
15138struct kioctx {
15139 atomic_t users;
15140 int dead;
15141 struct mm_struct *mm;
15142 unsigned long user_id;
15143 struct hlist_node list;
15144 wait_queue_head_t wait;
15145 spinlock_t ctx_lock;
15146 int reqs_active;
15147 struct list_head active_reqs;
15148 struct list_head run_list;
15149 unsigned max_reqs;
15150 struct aio_ring_info ring_info;
15151 struct delayed_work wq;
15152 struct rcu_head rcu_head;
15153};
15154extern unsigned aio_max_size;
15155extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
15156extern int aio_put_req(struct kiocb *iocb);
15157extern void kick_iocb(struct kiocb *iocb);
15158extern int aio_complete(struct kiocb *iocb, long res, long res2);
15159struct mm_struct;
15160extern void exit_aio(struct mm_struct *mm);
15161extern long do_io_submit(aio_context_t ctx_id, long nr,
15162 struct iocb * *iocbpp, bool compat);
15163static inline __attribute__((always_inline)) struct kiocb *list_kiocb(struct list_head *h)
15164{
15165 return ({ const typeof( ((struct kiocb *)0)->ki_list ) *__mptr = (h); (struct kiocb *)( (char *)__mptr - __builtin_offsetof(struct kiocb,ki_list) );});
15166}
15167extern unsigned long aio_nr;
15168extern unsigned long aio_max_nr;
15169extern void arch_pick_mmap_layout(struct mm_struct *mm);
15170extern unsigned long
15171arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
15172 unsigned long, unsigned long);
15173extern unsigned long
15174arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
15175 unsigned long len, unsigned long pgoff,
15176 unsigned long flags);
15177extern void arch_unmap_area(struct mm_struct *, unsigned long);
15178extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
15179extern void set_dumpable(struct mm_struct *mm, int value);
15180extern int get_dumpable(struct mm_struct *mm);
15181struct sighand_struct {
15182 atomic_t count;
15183 struct k_sigaction action[64];
15184 spinlock_t siglock;
15185 wait_queue_head_t signalfd_wqh;
15186};
15187struct pacct_struct {
15188 int ac_flag;
15189 long ac_exitcode;
15190 unsigned long ac_mem;
15191 cputime_t ac_utime, ac_stime;
15192 unsigned long ac_minflt, ac_majflt;
15193};
15194struct cpu_itimer {
15195 cputime_t expires;
15196 cputime_t incr;
15197 u32 error;
15198 u32 incr_error;
15199};
15200struct task_cputime {
15201 cputime_t utime;
15202 cputime_t stime;
15203 unsigned long long sum_exec_runtime;
15204};
15205struct thread_group_cputimer {
15206 struct task_cputime cputime;
15207 int running;
15208 spinlock_t lock;
15209};
15210struct autogroup;
15211struct signal_struct {
15212 atomic_t sigcnt;
15213 atomic_t live;
15214 int nr_threads;
15215 wait_queue_head_t wait_chldexit;
15216 struct task_struct *curr_target;
15217 struct sigpending shared_pending;
15218 int group_exit_code;
15219 int notify_count;
15220 struct task_struct *group_exit_task;
15221 int group_stop_count;
15222 unsigned int flags;
15223 struct list_head posix_timers;
15224 struct hrtimer real_timer;
15225 struct pid *leader_pid;
15226 ktime_t it_real_incr;
15227 struct cpu_itimer it[2];
15228 struct thread_group_cputimer cputimer;
15229 struct task_cputime cputime_expires;
15230 struct list_head cpu_timers[3];
15231 struct pid *tty_old_pgrp;
15232 int leader;
15233 struct tty_struct *tty;
15234 struct autogroup *autogroup;
15235 cputime_t utime, stime, cutime, cstime;
15236 cputime_t gtime;
15237 cputime_t cgtime;
15238 cputime_t prev_utime, prev_stime;
15239 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
15240 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
15241 unsigned long inblock, oublock, cinblock, coublock;
15242 unsigned long maxrss, cmaxrss;
15243 struct task_io_accounting ioac;
15244 unsigned long long sum_sched_runtime;
15245 struct rlimit rlim[16];
15246 struct pacct_struct pacct;
15247 struct taskstats *stats;
15248 unsigned audit_tty;
15249 struct tty_audit_buf *tty_audit_buf;
15250 struct rw_semaphore threadgroup_fork_lock;
15251 int oom_adj;
15252 int oom_score_adj;
15253 int oom_score_adj_min;
15254 struct mutex cred_guard_mutex;
15255};
15256static inline __attribute__((always_inline)) int signal_group_exit(const struct signal_struct *sig)
15257{
15258 return (sig->flags & 0x00000004) ||
15259 (sig->group_exit_task != ((void *)0));
15260}
15261struct user_struct {
15262 atomic_t __count;
15263 atomic_t processes;
15264 atomic_t files;
15265 atomic_t sigpending;
15266 atomic_t inotify_watches;
15267 atomic_t inotify_devs;
15268 atomic_t fanotify_listeners;
15269 atomic_long_t epoll_watches;
15270 unsigned long mq_bytes;
15271 unsigned long locked_shm;
15272 struct hlist_node uidhash_node;
15273 uid_t uid;
15274 struct user_namespace *user_ns;
15275 atomic_long_t locked_vm;
15276};
15277extern int uids_sysfs_init(void);
15278extern struct user_struct *find_user(uid_t);
15279extern struct user_struct root_user;
15280struct backing_dev_info;
15281struct reclaim_state;
15282struct sched_info {
15283 unsigned long pcount;
15284 unsigned long long run_delay;
15285 unsigned long long last_arrival,
15286 last_queued;
15287};
15288struct task_delay_info {
15289 spinlock_t lock;
15290 unsigned int flags;
15291 struct timespec blkio_start, blkio_end;
15292 u64 blkio_delay;
15293 u64 swapin_delay;
15294 u32 blkio_count;
15295 u32 swapin_count;
15296 struct timespec freepages_start, freepages_end;
15297 u64 freepages_delay;
15298 u32 freepages_count;
15299};
15300static inline __attribute__((always_inline)) int sched_info_on(void)
15301{
15302 return 1;
15303}
15304enum cpu_idle_type {
15305 CPU_IDLE,
15306 CPU_NOT_IDLE,
15307 CPU_NEWLY_IDLE,
15308 CPU_MAX_IDLE_TYPES
15309};
15310enum powersavings_balance_level {
15311 POWERSAVINGS_BALANCE_NONE = 0,
15312 POWERSAVINGS_BALANCE_BASIC,
15313 POWERSAVINGS_BALANCE_WAKEUP,
15314 MAX_POWERSAVINGS_BALANCE_LEVELS
15315};
15316extern int sched_mc_power_savings, sched_smt_power_savings;
15317static inline __attribute__((always_inline)) int sd_balance_for_mc_power(void)
15318{
15319 if (__builtin_constant_p(((sched_smt_power_savings))) ? !!((sched_smt_power_savings)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 864, }; ______r = !!((sched_smt_power_savings)); ______f.miss_hit[______r]++; ______r; }))
15320 return 0x0100;
15321 if (__builtin_constant_p(((!sched_mc_power_savings))) ? !!((!sched_mc_power_savings)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 867, }; ______r = !!((!sched_mc_power_savings)); ______f.miss_hit[______r]++; ______r; }))
15322 return 0x1000;
15323 return 0;
15324}
15325static inline __attribute__((always_inline)) int sd_balance_for_package_power(void)
15326{
15327 if (__builtin_constant_p(((sched_mc_power_savings | sched_smt_power_savings))) ? !!((sched_mc_power_savings | sched_smt_power_savings)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 875, }; ______r = !!((sched_mc_power_savings | sched_smt_power_savings)); ______f.miss_hit[______r]++; ______r; }))
15328 return 0x0100;
15329 return 0x1000;
15330}
15331extern int __attribute__((weak)) arch_sd_sibiling_asym_packing(void);
15332static inline __attribute__((always_inline)) int sd_power_saving_flags(void)
15333{
15334 if (__builtin_constant_p(((sched_mc_power_savings | sched_smt_power_savings))) ? !!((sched_mc_power_savings | sched_smt_power_savings)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 891, }; ______r = !!((sched_mc_power_savings | sched_smt_power_savings)); ______f.miss_hit[______r]++; ______r; }))
15335 return 0x0002;
15336 return 0;
15337}
15338struct sched_group_power {
15339 atomic_t ref;
15340 unsigned int power, power_orig;
15341};
15342struct sched_group {
15343 struct sched_group *next;
15344 atomic_t ref;
15345 unsigned int group_weight;
15346 struct sched_group_power *sgp;
15347 unsigned long cpumask[0];
15348};
15349static inline __attribute__((always_inline)) struct cpumask *sched_group_cpus(struct sched_group *sg)
15350{
15351 return ((struct cpumask *)(1 ? (sg->cpumask) : (void *)sizeof(__check_is_bitmap(sg->cpumask))));
15352}
15353struct sched_domain_attr {
15354 int relax_domain_level;
15355};
15356extern int sched_domain_level_max;
15357struct sched_domain {
15358 struct sched_domain *parent;
15359 struct sched_domain *child;
15360 struct sched_group *groups;
15361 unsigned long min_interval;
15362 unsigned long max_interval;
15363 unsigned int busy_factor;
15364 unsigned int imbalance_pct;
15365 unsigned int cache_nice_tries;
15366 unsigned int busy_idx;
15367 unsigned int idle_idx;
15368 unsigned int newidle_idx;
15369 unsigned int wake_idx;
15370 unsigned int forkexec_idx;
15371 unsigned int smt_gain;
15372 int flags;
15373 int level;
15374 unsigned long last_balance;
15375 unsigned int balance_interval;
15376 unsigned int nr_balance_failed;
15377 u64 last_update;
15378 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
15379 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
15380 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
15381 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
15382 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
15383 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
15384 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
15385 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
15386 unsigned int alb_count;
15387 unsigned int alb_failed;
15388 unsigned int alb_pushed;
15389 unsigned int sbe_count;
15390 unsigned int sbe_balanced;
15391 unsigned int sbe_pushed;
15392 unsigned int sbf_count;
15393 unsigned int sbf_balanced;
15394 unsigned int sbf_pushed;
15395 unsigned int ttwu_wake_remote;
15396 unsigned int ttwu_move_affine;
15397 unsigned int ttwu_move_balance;
15398 char *name;
15399 union {
15400 void *private;
15401 struct rcu_head rcu;
15402 };
15403 unsigned int span_weight;
15404 unsigned long span[0];
15405};
15406static inline __attribute__((always_inline)) struct cpumask *sched_domain_span(struct sched_domain *sd)
15407{
15408 return ((struct cpumask *)(1 ? (sd->span) : (void *)sizeof(__check_is_bitmap(sd->span))));
15409}
15410extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
15411 struct sched_domain_attr *dattr_new);
15412cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
15413void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
15414static inline __attribute__((always_inline)) int test_sd_parent(struct sched_domain *sd, int flag)
15415{
15416 if (__builtin_constant_p(((sd->parent && (sd->parent->flags & flag)))) ? !!((sd->parent && (sd->parent->flags & flag))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 1029, }; ______r = !!((sd->parent && (sd->parent->flags & flag))); ______f.miss_hit[______r]++; ______r; }))
15417 return 1;
15418 return 0;
15419}
15420unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
15421unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
15422struct io_context;
15423static inline __attribute__((always_inline)) void prefetch_stack(struct task_struct *t) { }
15424struct audit_context;
15425struct mempolicy;
15426struct pipe_inode_info;
15427struct uts_namespace;
15428struct rq;
15429struct sched_domain;
15430struct sched_class {
15431 const struct sched_class *next;
15432 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
15433 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
15434 void (*yield_task) (struct rq *rq);
15435 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
15436 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
15437 struct task_struct * (*pick_next_task) (struct rq *rq);
15438 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
15439 int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
15440 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
15441 void (*post_schedule) (struct rq *this_rq);
15442 void (*task_waking) (struct task_struct *task);
15443 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
15444 void (*set_cpus_allowed)(struct task_struct *p,
15445 const struct cpumask *newmask);
15446 void (*rq_online)(struct rq *rq);
15447 void (*rq_offline)(struct rq *rq);
15448 void (*set_curr_task) (struct rq *rq);
15449 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
15450 void (*task_fork) (struct task_struct *p);
15451 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
15452 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
15453 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
15454 int oldprio);
15455 unsigned int (*get_rr_interval) (struct rq *rq,
15456 struct task_struct *task);
15457 void (*task_move_group) (struct task_struct *p, int on_rq);
15458};
15459struct load_weight {
15460 unsigned long weight, inv_weight;
15461};
15462struct sched_statistics {
15463 u64 wait_start;
15464 u64 wait_max;
15465 u64 wait_count;
15466 u64 wait_sum;
15467 u64 iowait_count;
15468 u64 iowait_sum;
15469 u64 sleep_start;
15470 u64 sleep_max;
15471 s64 sum_sleep_runtime;
15472 u64 block_start;
15473 u64 block_max;
15474 u64 exec_max;
15475 u64 slice_max;
15476 u64 nr_migrations_cold;
15477 u64 nr_failed_migrations_affine;
15478 u64 nr_failed_migrations_running;
15479 u64 nr_failed_migrations_hot;
15480 u64 nr_forced_migrations;
15481 u64 nr_wakeups;
15482 u64 nr_wakeups_sync;
15483 u64 nr_wakeups_migrate;
15484 u64 nr_wakeups_local;
15485 u64 nr_wakeups_remote;
15486 u64 nr_wakeups_affine;
15487 u64 nr_wakeups_affine_attempts;
15488 u64 nr_wakeups_passive;
15489 u64 nr_wakeups_idle;
15490};
15491struct sched_entity {
15492 struct load_weight load;
15493 struct rb_node run_node;
15494 struct list_head group_node;
15495 unsigned int on_rq;
15496 u64 exec_start;
15497 u64 sum_exec_runtime;
15498 u64 vruntime;
15499 u64 prev_sum_exec_runtime;
15500 u64 nr_migrations;
15501 struct sched_statistics statistics;
15502 struct sched_entity *parent;
15503 struct cfs_rq *cfs_rq;
15504 struct cfs_rq *my_q;
15505};
15506struct sched_rt_entity {
15507 struct list_head run_list;
15508 unsigned long timeout;
15509 unsigned int time_slice;
15510 int nr_cpus_allowed;
15511 struct sched_rt_entity *back;
15512};
15513struct rcu_node;
15514enum perf_event_task_context {
15515 perf_invalid_context = -1,
15516 perf_hw_context = 0,
15517 perf_sw_context,
15518 perf_nr_task_contexts,
15519};
15520struct task_struct {
15521 volatile long state;
15522 void *stack;
15523 atomic_t usage;
15524 unsigned int flags;
15525 unsigned int ptrace;
15526 struct task_struct *wake_entry;
15527 int on_cpu;
15528 int on_rq;
15529 int prio, static_prio, normal_prio;
15530 unsigned int rt_priority;
15531 const struct sched_class *sched_class;
15532 struct sched_entity se;
15533 struct sched_rt_entity rt;
15534 struct hlist_head preempt_notifiers;
15535 unsigned char fpu_counter;
15536 unsigned int btrace_seq;
15537 unsigned int policy;
15538 cpumask_t cpus_allowed;
15539 int rcu_read_lock_nesting;
15540 char rcu_read_unlock_special;
15541 int rcu_boosted;
15542 struct list_head rcu_node_entry;
15543 struct rcu_node *rcu_blocked_node;
15544 struct rt_mutex *rcu_boost_mutex;
15545 struct sched_info sched_info;
15546 struct list_head tasks;
15547 struct plist_node pushable_tasks;
15548 struct mm_struct *mm, *active_mm;
15549 int exit_state;
15550 int exit_code, exit_signal;
15551 int pdeath_signal;
15552 unsigned int group_stop;
15553 unsigned int personality;
15554 unsigned did_exec:1;
15555 unsigned in_execve:1;
15556 unsigned in_iowait:1;
15557 unsigned sched_reset_on_fork:1;
15558 unsigned sched_contributes_to_load:1;
15559 pid_t pid;
15560 pid_t tgid;
15561 unsigned long stack_canary;
15562 struct task_struct *real_parent;
15563 struct task_struct *parent;
15564 struct list_head children;
15565 struct list_head sibling;
15566 struct task_struct *group_leader;
15567 struct list_head ptraced;
15568 struct list_head ptrace_entry;
15569 struct pid_link pids[PIDTYPE_MAX];
15570 struct list_head thread_group;
15571 struct completion *vfork_done;
15572 int *set_child_tid;
15573 int *clear_child_tid;
15574 cputime_t utime, stime, utimescaled, stimescaled;
15575 cputime_t gtime;
15576 cputime_t prev_utime, prev_stime;
15577 unsigned long nvcsw, nivcsw;
15578 struct timespec start_time;
15579 struct timespec real_start_time;
15580 unsigned long min_flt, maj_flt;
15581 struct task_cputime cputime_expires;
15582 struct list_head cpu_timers[3];
15583 const struct cred *real_cred;
15584 const struct cred *cred;
15585 struct cred *replacement_session_keyring;
15586 char comm[16];
15587 int link_count, total_link_count;
15588 struct sysv_sem sysvsem;
15589 unsigned long last_switch_count;
15590 struct thread_struct thread;
15591 struct fs_struct *fs;
15592 struct files_struct *files;
15593 struct nsproxy *nsproxy;
15594 struct signal_struct *signal;
15595 struct sighand_struct *sighand;
15596 sigset_t blocked, real_blocked;
15597 sigset_t saved_sigmask;
15598 struct sigpending pending;
15599 unsigned long sas_ss_sp;
15600 size_t sas_ss_size;
15601 int (*notifier)(void *priv);
15602 void *notifier_data;
15603 sigset_t *notifier_mask;
15604 struct audit_context *audit_context;
15605 uid_t loginuid;
15606 unsigned int sessionid;
15607 seccomp_t seccomp;
15608 u32 parent_exec_id;
15609 u32 self_exec_id;
15610 spinlock_t alloc_lock;
15611 struct irqaction *irqaction;
15612 raw_spinlock_t pi_lock;
15613 struct plist_head pi_waiters;
15614 struct rt_mutex_waiter *pi_blocked_on;
15615 struct mutex_waiter *blocked_on;
15616 unsigned int irq_events;
15617 unsigned long hardirq_enable_ip;
15618 unsigned long hardirq_disable_ip;
15619 unsigned int hardirq_enable_event;
15620 unsigned int hardirq_disable_event;
15621 int hardirqs_enabled;
15622 int hardirq_context;
15623 unsigned long softirq_disable_ip;
15624 unsigned long softirq_enable_ip;
15625 unsigned int softirq_disable_event;
15626 unsigned int softirq_enable_event;
15627 int softirqs_enabled;
15628 int softirq_context;
15629 u64 curr_chain_key;
15630 int lockdep_depth;
15631 unsigned int lockdep_recursion;
15632 struct held_lock held_locks[48UL];
15633 gfp_t lockdep_reclaim_gfp;
15634 void *journal_info;
15635 struct bio_list *bio_list;
15636 struct blk_plug *plug;
15637 struct reclaim_state *reclaim_state;
15638 struct backing_dev_info *backing_dev_info;
15639 struct io_context *io_context;
15640 unsigned long ptrace_message;
15641 siginfo_t *last_siginfo;
15642 struct task_io_accounting ioac;
15643 u64 acct_rss_mem1;
15644 u64 acct_vm_mem1;
15645 cputime_t acct_timexpd;
15646 struct css_set *cgroups;
15647 struct list_head cg_list;
15648 struct robust_list_head *robust_list;
15649 struct list_head pi_state_list;
15650 struct futex_pi_state *pi_state_cache;
15651 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
15652 struct mutex perf_event_mutex;
15653 struct list_head perf_event_list;
15654 atomic_t fs_excl;
15655 struct rcu_head rcu;
15656 struct pipe_inode_info *splice_pipe;
15657 struct task_delay_info *delays;
15658 struct prop_local_single dirties;
15659 int latency_record_count;
15660 struct latency_record latency_record[32];
15661 unsigned long timer_slack_ns;
15662 unsigned long default_timer_slack_ns;
15663 struct list_head *scm_work_list;
15664 int curr_ret_stack;
15665 struct ftrace_ret_stack *ret_stack;
15666 unsigned long long ftrace_timestamp;
15667 atomic_t trace_overrun;
15668 atomic_t tracing_graph_pause;
15669 unsigned long trace;
15670 unsigned long trace_recursion;
15671 atomic_t ptrace_bp_refcnt;
15672};
15673static inline __attribute__((always_inline)) int rt_prio(int prio)
15674{
15675 if (__builtin_constant_p((((__builtin_constant_p(prio < 100) ? !!(prio < 100) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 1599, }; ______r = __builtin_expect(!!(prio < 100), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(prio < 100) ? !!(prio < 100) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 1599, }; ______r = __builtin_expect(!!(prio < 100), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 1599, }; ______r = !!(((__builtin_constant_p(prio < 100) ? !!(prio < 100) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 1599, }; ______r = __builtin_expect(!!(prio < 100), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
15676 return 1;
15677 return 0;
15678}
15679static inline __attribute__((always_inline)) int rt_task(struct task_struct *p)
15680{
15681 return rt_prio(p->prio);
15682}
15683static inline __attribute__((always_inline)) struct pid *task_pid(struct task_struct *task)
15684{
15685 return task->pids[PIDTYPE_PID].pid;
15686}
15687static inline __attribute__((always_inline)) struct pid *task_tgid(struct task_struct *task)
15688{
15689 return task->group_leader->pids[PIDTYPE_PID].pid;
15690}
15691static inline __attribute__((always_inline)) struct pid *task_pgrp(struct task_struct *task)
15692{
15693 return task->group_leader->pids[PIDTYPE_PGID].pid;
15694}
15695static inline __attribute__((always_inline)) struct pid *task_session(struct task_struct *task)
15696{
15697 return task->group_leader->pids[PIDTYPE_SID].pid;
15698}
15699struct pid_namespace;
15700pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
15701 struct pid_namespace *ns);
15702static inline __attribute__((always_inline)) pid_t task_pid_nr(struct task_struct *tsk)
15703{
15704 return tsk->pid;
15705}
15706static inline __attribute__((always_inline)) pid_t task_pid_nr_ns(struct task_struct *tsk,
15707 struct pid_namespace *ns)
15708{
15709 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
15710}
15711static inline __attribute__((always_inline)) pid_t task_pid_vnr(struct task_struct *tsk)
15712{
15713 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ((void *)0));
15714}
15715static inline __attribute__((always_inline)) pid_t task_tgid_nr(struct task_struct *tsk)
15716{
15717 return tsk->tgid;
15718}
15719pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
15720static inline __attribute__((always_inline)) pid_t task_tgid_vnr(struct task_struct *tsk)
15721{
15722 return pid_vnr(task_tgid(tsk));
15723}
15724static inline __attribute__((always_inline)) pid_t task_pgrp_nr_ns(struct task_struct *tsk,
15725 struct pid_namespace *ns)
15726{
15727 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
15728}
15729static inline __attribute__((always_inline)) pid_t task_pgrp_vnr(struct task_struct *tsk)
15730{
15731 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ((void *)0));
15732}
15733static inline __attribute__((always_inline)) pid_t task_session_nr_ns(struct task_struct *tsk,
15734 struct pid_namespace *ns)
15735{
15736 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
15737}
15738static inline __attribute__((always_inline)) pid_t task_session_vnr(struct task_struct *tsk)
15739{
15740 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ((void *)0));
15741}
15742static inline __attribute__((always_inline)) pid_t task_pgrp_nr(struct task_struct *tsk)
15743{
15744 return task_pgrp_nr_ns(tsk, &init_pid_ns);
15745}
15746static inline __attribute__((always_inline)) int pid_alive(struct task_struct *p)
15747{
15748 return p->pids[PIDTYPE_PID].pid != ((void *)0);
15749}
15750static inline __attribute__((always_inline)) int is_global_init(struct task_struct *tsk)
15751{
15752 return tsk->pid == 1;
15753}
15754extern int is_container_init(struct task_struct *tsk);
15755extern struct pid *cad_pid;
15756extern void free_task(struct task_struct *tsk);
15757extern void __put_task_struct(struct task_struct *t);
15758static inline __attribute__((always_inline)) void put_task_struct(struct task_struct *t)
15759{
15760 if (__builtin_constant_p(((atomic_dec_and_test(&t->usage)))) ? !!((atomic_dec_and_test(&t->usage))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 1750, }; ______r = !!((atomic_dec_and_test(&t->usage))); ______f.miss_hit[______r]++; ______r; }))
15761 __put_task_struct(t);
15762}
15763extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
15764extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
15765extern void task_clear_group_stop_pending(struct task_struct *task);
15766static inline __attribute__((always_inline)) void rcu_copy_process(struct task_struct *p)
15767{
15768 p->rcu_read_lock_nesting = 0;
15769 p->rcu_read_unlock_special = 0;
15770 p->rcu_blocked_node = ((void *)0);
15771 p->rcu_boost_mutex = ((void *)0);
15772 INIT_LIST_HEAD(&p->rcu_node_entry);
15773}
15774extern void do_set_cpus_allowed(struct task_struct *p,
15775 const struct cpumask *new_mask);
15776extern int set_cpus_allowed_ptr(struct task_struct *p,
15777 const struct cpumask *new_mask);
15778static inline __attribute__((always_inline)) int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
15779{
15780 return set_cpus_allowed_ptr(p, &new_mask);
15781}
15782extern unsigned long long __attribute__((no_instrument_function)) sched_clock(void);
15783extern u64 cpu_clock(int cpu);
15784extern u64 local_clock(void);
15785extern u64 sched_clock_cpu(int cpu);
15786extern void sched_clock_init(void);
15787extern int sched_clock_stable;
15788extern void sched_clock_tick(void);
15789extern void sched_clock_idle_sleep_event(void);
15790extern void sched_clock_idle_wakeup_event(u64 delta_ns);
15791extern void enable_sched_clock_irqtime(void);
15792extern void disable_sched_clock_irqtime(void);
15793extern unsigned long long
15794task_sched_runtime(struct task_struct *task);
15795extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
15796extern void sched_exec(void);
15797extern void sched_clock_idle_sleep_event(void);
15798extern void sched_clock_idle_wakeup_event(u64 delta_ns);
15799extern void idle_task_exit(void);
15800static inline __attribute__((always_inline)) void wake_up_idle_cpu(int cpu) { }
15801extern unsigned int sysctl_sched_latency;
15802extern unsigned int sysctl_sched_min_granularity;
15803extern unsigned int sysctl_sched_wakeup_granularity;
15804extern unsigned int sysctl_sched_child_runs_first;
15805enum sched_tunable_scaling {
15806 SCHED_TUNABLESCALING_NONE,
15807 SCHED_TUNABLESCALING_LOG,
15808 SCHED_TUNABLESCALING_LINEAR,
15809 SCHED_TUNABLESCALING_END,
15810};
15811extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
15812extern unsigned int sysctl_sched_migration_cost;
15813extern unsigned int sysctl_sched_nr_migrate;
15814extern unsigned int sysctl_sched_time_avg;
15815extern unsigned int sysctl_timer_migration;
15816extern unsigned int sysctl_sched_shares_window;
15817int sched_proc_update_handler(struct ctl_table *table, int write,
15818 void *buffer, size_t *length,
15819 loff_t *ppos);
15820static inline __attribute__((always_inline)) unsigned int get_sysctl_timer_migration(void)
15821{
15822 return sysctl_timer_migration;
15823}
15824extern unsigned int sysctl_sched_rt_period;
15825extern int sysctl_sched_rt_runtime;
15826int sched_rt_handler(struct ctl_table *table, int write,
15827 void *buffer, size_t *lenp,
15828 loff_t *ppos);
15829extern unsigned int sysctl_sched_autogroup_enabled;
15830extern void sched_autogroup_create_attach(struct task_struct *p);
15831extern void sched_autogroup_detach(struct task_struct *p);
15832extern void sched_autogroup_fork(struct signal_struct *sig);
15833extern void sched_autogroup_exit(struct signal_struct *sig);
15834extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
15835extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice);
15836extern int rt_mutex_getprio(struct task_struct *p);
15837extern void rt_mutex_setprio(struct task_struct *p, int prio);
15838extern void rt_mutex_adjust_pi(struct task_struct *p);
15839extern bool yield_to(struct task_struct *p, bool preempt);
15840extern void set_user_nice(struct task_struct *p, long nice);
15841extern int task_prio(const struct task_struct *p);
15842extern int task_nice(const struct task_struct *p);
15843extern int can_nice(const struct task_struct *p, const int nice);
15844extern int task_curr(const struct task_struct *p);
15845extern int idle_cpu(int cpu);
15846extern int sched_setscheduler(struct task_struct *, int,
15847 const struct sched_param *);
15848extern int sched_setscheduler_nocheck(struct task_struct *, int,
15849 const struct sched_param *);
15850extern struct task_struct *idle_task(int cpu);
15851extern struct task_struct *curr_task(int cpu);
15852extern void set_curr_task(int cpu, struct task_struct *p);
15853void yield(void);
15854extern struct exec_domain default_exec_domain;
15855union thread_union {
15856 struct thread_info thread_info;
15857 unsigned long stack[(((1UL) << 12) << 1)/sizeof(long)];
15858};
15859static inline __attribute__((always_inline)) int kstack_end(void *addr)
15860{
15861 return !(((unsigned long)addr+sizeof(void*)-1) & ((((1UL) << 12) << 1)-sizeof(void*)));
15862}
15863extern union thread_union init_thread_union;
15864extern struct task_struct init_task;
15865extern struct mm_struct init_mm;
15866extern struct pid_namespace init_pid_ns;
15867extern struct task_struct *find_task_by_vpid(pid_t nr);
15868extern struct task_struct *find_task_by_pid_ns(pid_t nr,
15869 struct pid_namespace *ns);
15870extern void __set_special_pids(struct pid *pid);
15871extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
15872static inline __attribute__((always_inline)) struct user_struct *get_uid(struct user_struct *u)
15873{
15874 atomic_inc(&u->__count);
15875 return u;
15876}
15877extern void free_uid(struct user_struct *);
15878extern void release_uids(struct user_namespace *ns);
15879extern void xtime_update(unsigned long ticks);
15880extern int wake_up_state(struct task_struct *tsk, unsigned int state);
15881extern int wake_up_process(struct task_struct *tsk);
15882extern void wake_up_new_task(struct task_struct *tsk);
15883 extern void kick_process(struct task_struct *tsk);
15884extern void sched_fork(struct task_struct *p);
15885extern void sched_dead(struct task_struct *p);
15886extern void proc_caches_init(void);
15887extern void flush_signals(struct task_struct *);
15888extern void __flush_signals(struct task_struct *);
15889extern void ignore_signals(struct task_struct *);
15890extern void flush_signal_handlers(struct task_struct *, int force_default);
15891extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
15892static inline __attribute__((always_inline)) int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
15893{
15894 unsigned long flags;
15895 int ret;
15896 do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&tsk->sighand->siglock)); } while (0); } while (0);
15897 ret = dequeue_signal(tsk, mask, info);
15898 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
15899 return ret;
15900}
15901extern void block_all_signals(int (*notifier)(void *priv), void *priv,
15902 sigset_t *mask);
15903extern void unblock_all_signals(void);
15904extern void release_task(struct task_struct * p);
15905extern int send_sig_info(int, struct siginfo *, struct task_struct *);
15906extern int force_sigsegv(int, struct task_struct *);
15907extern int force_sig_info(int, struct siginfo *, struct task_struct *);
15908extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
15909extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
15910extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
15911extern int kill_pgrp(struct pid *pid, int sig, int priv);
15912extern int kill_pid(struct pid *pid, int sig, int priv);
15913extern int kill_proc_info(int, struct siginfo *, pid_t);
15914extern int do_notify_parent(struct task_struct *, int);
15915extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
15916extern void force_sig(int, struct task_struct *);
15917extern int send_sig(int, struct task_struct *, int);
15918extern int zap_other_threads(struct task_struct *p);
15919extern struct sigqueue *sigqueue_alloc(void);
15920extern void sigqueue_free(struct sigqueue *);
15921extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
15922extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
15923extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long);
15924static inline __attribute__((always_inline)) int kill_cad_pid(int sig, int priv)
15925{
15926 return kill_pid(cad_pid, sig, priv);
15927}
15928static inline __attribute__((always_inline)) int on_sig_stack(unsigned long sp)
15929{
15930 return sp > get_current()->sas_ss_sp &&
15931 sp - get_current()->sas_ss_sp <= get_current()->sas_ss_size;
15932}
15933static inline __attribute__((always_inline)) int sas_ss_flags(unsigned long sp)
15934{
15935 return (get_current()->sas_ss_size == 0 ? 2
15936 : on_sig_stack(sp) ? 1 : 0);
15937}
15938extern struct mm_struct * mm_alloc(void);
15939extern void __mmdrop(struct mm_struct *);
15940static inline __attribute__((always_inline)) void mmdrop(struct mm_struct * mm)
15941{
15942 if (__builtin_constant_p((((__builtin_constant_p(atomic_dec_and_test(&mm->mm_count)) ? !!(atomic_dec_and_test(&mm->mm_count)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2204, }; ______r = __builtin_expect(!!(atomic_dec_and_test(&mm->mm_count)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(atomic_dec_and_test(&mm->mm_count)) ? !!(atomic_dec_and_test(&mm->mm_count)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2204, }; ______r = __builtin_expect(!!(atomic_dec_and_test(&mm->mm_count)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2204, }; ______r = !!(((__builtin_constant_p(atomic_dec_and_test(&mm->mm_count)) ? !!(atomic_dec_and_test(&mm->mm_count)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2204, }; ______r = __builtin_expect(!!(atomic_dec_and_test(&mm->mm_count)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
15943 __mmdrop(mm);
15944}
15945extern void mmput(struct mm_struct *);
15946extern struct mm_struct *get_task_mm(struct task_struct *task);
15947extern void mm_release(struct task_struct *, struct mm_struct *);
15948extern struct mm_struct *dup_mm(struct task_struct *tsk);
15949extern int copy_thread(unsigned long, unsigned long, unsigned long,
15950 struct task_struct *, struct pt_regs *);
15951extern void flush_thread(void);
15952extern void exit_thread(void);
15953extern void exit_files(struct task_struct *);
15954extern void __cleanup_sighand(struct sighand_struct *);
15955extern void exit_itimers(struct signal_struct *);
15956extern void flush_itimer_signals(void);
15957extern void do_group_exit(int);
15958extern void daemonize(const char *, ...);
15959extern int allow_signal(int);
15960extern int disallow_signal(int);
15961extern int do_execve(const char *,
15962 const char * const *,
15963 const char * const *, struct pt_regs *);
15964extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int *, int *);
15965struct task_struct *fork_idle(int);
15966extern void set_task_comm(struct task_struct *tsk, char *from);
15967extern char *get_task_comm(char *to, struct task_struct *tsk);
15968void scheduler_ipi(void);
15969extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
15970extern bool current_is_single_threaded(void);
15971static inline __attribute__((always_inline)) int get_nr_threads(struct task_struct *tsk)
15972{
15973 return tsk->signal->nr_threads;
15974}
15975static inline __attribute__((always_inline)) int has_group_leader_pid(struct task_struct *p)
15976{
15977 return p->pid == p->tgid;
15978}
15979static inline __attribute__((always_inline))
15980int same_thread_group(struct task_struct *p1, struct task_struct *p2)
15981{
15982 return p1->tgid == p2->tgid;
15983}
15984static inline __attribute__((always_inline)) struct task_struct *next_thread(const struct task_struct *p)
15985{
15986 return ({typeof (*p->thread_group.next) *__ptr = (typeof (*p->thread_group.next) *)p->thread_group.next; ({ const typeof( ((struct task_struct *)0)->thread_group ) *__mptr = ((typeof(p->thread_group.next))({ typeof(*(__ptr)) *_________p1 = (typeof(*(__ptr))* )(*(volatile typeof((__ptr)) *)&((__ptr))); do { } while (0); ; do { } while (0); ((typeof(*(__ptr)) *)(_________p1)); })); (struct task_struct *)( (char *)__mptr - __builtin_offsetof(struct task_struct,thread_group) );}); })
15987 ;
15988}
15989static inline __attribute__((always_inline)) int thread_group_empty(struct task_struct *p)
15990{
15991 return list_empty(&p->thread_group);
15992}
15993static inline __attribute__((always_inline)) int task_detached(struct task_struct *p)
15994{
15995 return p->exit_signal == -1;
15996}
15997static inline __attribute__((always_inline)) void task_lock(struct task_struct *p)
15998{
15999 spin_lock(&p->alloc_lock);
16000}
16001static inline __attribute__((always_inline)) void task_unlock(struct task_struct *p)
16002{
16003 spin_unlock(&p->alloc_lock);
16004}
16005extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
16006 unsigned long *flags);
16007static inline __attribute__((always_inline)) void unlock_task_sighand(struct task_struct *tsk,
16008 unsigned long *flags)
16009{
16010 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
16011}
16012static inline __attribute__((always_inline)) void threadgroup_fork_read_lock(struct task_struct *tsk)
16013{
16014 down_read(&tsk->signal->threadgroup_fork_lock);
16015}
16016static inline __attribute__((always_inline)) void threadgroup_fork_read_unlock(struct task_struct *tsk)
16017{
16018 up_read(&tsk->signal->threadgroup_fork_lock);
16019}
16020static inline __attribute__((always_inline)) void threadgroup_fork_write_lock(struct task_struct *tsk)
16021{
16022 down_write(&tsk->signal->threadgroup_fork_lock);
16023}
16024static inline __attribute__((always_inline)) void threadgroup_fork_write_unlock(struct task_struct *tsk)
16025{
16026 up_write(&tsk->signal->threadgroup_fork_lock);
16027}
16028static inline __attribute__((always_inline)) void setup_thread_stack(struct task_struct *p, struct task_struct *org)
16029{
16030 *((struct thread_info *)(p)->stack) = *((struct thread_info *)(org)->stack);
16031 ((struct thread_info *)(p)->stack)->task = p;
16032}
16033static inline __attribute__((always_inline)) unsigned long *end_of_stack(struct task_struct *p)
16034{
16035 return (unsigned long *)(((struct thread_info *)(p)->stack) + 1);
16036}
16037static inline __attribute__((always_inline)) int object_is_on_stack(void *obj)
16038{
16039 void *stack = ((get_current())->stack);
16040 return (obj >= stack) && (obj < (stack + (((1UL) << 12) << 1)));
16041}
16042extern void thread_info_cache_init(void);
16043static inline __attribute__((always_inline)) void set_tsk_thread_flag(struct task_struct *tsk, int flag)
16044{
16045 set_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
16046}
16047static inline __attribute__((always_inline)) void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
16048{
16049 clear_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
16050}
16051static inline __attribute__((always_inline)) int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
16052{
16053 return test_and_set_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
16054}
16055static inline __attribute__((always_inline)) int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
16056{
16057 return test_and_clear_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
16058}
16059static inline __attribute__((always_inline)) int test_tsk_thread_flag(struct task_struct *tsk, int flag)
16060{
16061 return test_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
16062}
16063static inline __attribute__((always_inline)) void set_tsk_need_resched(struct task_struct *tsk)
16064{
16065 set_tsk_thread_flag(tsk,3);
16066}
16067static inline __attribute__((always_inline)) void clear_tsk_need_resched(struct task_struct *tsk)
16068{
16069 clear_tsk_thread_flag(tsk,3);
16070}
16071static inline __attribute__((always_inline)) int test_tsk_need_resched(struct task_struct *tsk)
16072{
16073 return (__builtin_constant_p(test_tsk_thread_flag(tsk,3)) ? !!(test_tsk_thread_flag(tsk,3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2458, }; ______r = __builtin_expect(!!(test_tsk_thread_flag(tsk,3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }));
16074}
16075static inline __attribute__((always_inline)) int restart_syscall(void)
16076{
16077 set_tsk_thread_flag(get_current(), 2);
16078 return -513;
16079}
16080static inline __attribute__((always_inline)) int signal_pending(struct task_struct *p)
16081{
16082 return (__builtin_constant_p(test_tsk_thread_flag(p,2)) ? !!(test_tsk_thread_flag(p,2)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2469, }; ______r = __builtin_expect(!!(test_tsk_thread_flag(p,2)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }));
16083}
16084static inline __attribute__((always_inline)) int __fatal_signal_pending(struct task_struct *p)
16085{
16086 return (__builtin_constant_p((__builtin_constant_p(9) ? __const_sigismember((&p->pending.signal), (9)) : __gen_sigismember((&p->pending.signal), (9)))) ? !!((__builtin_constant_p(9) ? __const_sigismember((&p->pending.signal), (9)) : __gen_sigismember((&p->pending.signal), (9)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2474, }; ______r = __builtin_expect(!!((__builtin_constant_p(9) ? __const_sigismember((&p->pending.signal), (9)) : __gen_sigismember((&p->pending.signal), (9)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }));
16087}
16088static inline __attribute__((always_inline)) int fatal_signal_pending(struct task_struct *p)
16089{
16090 return signal_pending(p) && __fatal_signal_pending(p);
16091}
16092static inline __attribute__((always_inline)) int signal_pending_state(long state, struct task_struct *p)
16093{
16094 if (__builtin_constant_p(((!(state & (1 | 128))))) ? !!((!(state & (1 | 128)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2484, }; ______r = !!((!(state & (1 | 128)))); ______f.miss_hit[______r]++; ______r; }))
16095 return 0;
16096 if (__builtin_constant_p(((!signal_pending(p)))) ? !!((!signal_pending(p))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2486, }; ______r = !!((!signal_pending(p))); ______f.miss_hit[______r]++; ______r; }))
16097 return 0;
16098 return (state & 1) || __fatal_signal_pending(p);
16099}
16100static inline __attribute__((always_inline)) int need_resched(void)
16101{
16102 return (__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/sched.h", .line = 2494, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }));
16103}
16104extern int _cond_resched(void);
16105extern int __cond_resched_lock(spinlock_t *lock);
16106extern int __cond_resched_softirq(void);
16107static inline __attribute__((always_inline)) int spin_needbreak(spinlock_t *lock)
16108{
16109 return spin_is_contended(lock);
16110}
16111void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
16112void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
16113static inline __attribute__((always_inline)) void thread_group_cputime_init(struct signal_struct *sig)
16114{
16115 do { spinlock_check(&sig->cputimer.lock); do { static struct lock_class_key __key; __raw_spin_lock_init((&(&sig->cputimer.lock)->rlock), "&(&sig->cputimer.lock)->rlock", &__key); } while (0); } while (0);
16116}
16117extern void recalc_sigpending_and_wake(struct task_struct *t);
16118extern void recalc_sigpending(void);
16119extern void signal_wake_up(struct task_struct *t, int resume_stopped);
16120static inline __attribute__((always_inline)) unsigned int task_cpu(const struct task_struct *p)
16121{
16122 return ((struct thread_info *)(p)->stack)->cpu;
16123}
16124extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
16125extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
16126extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
16127extern void normalize_rt_tasks(void);
16128extern struct task_group root_task_group;
16129extern struct task_group *sched_create_group(struct task_group *parent);
16130extern void sched_destroy_group(struct task_group *tg);
16131extern void sched_move_task(struct task_struct *tsk);
16132extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
16133extern unsigned long sched_group_shares(struct task_group *tg);
16134extern int task_can_switch_user(struct user_struct *up,
16135 struct task_struct *tsk);
16136static inline __attribute__((always_inline)) void add_rchar(struct task_struct *tsk, ssize_t amt)
16137{
16138 tsk->ioac.rchar += amt;
16139}
16140static inline __attribute__((always_inline)) void add_wchar(struct task_struct *tsk, ssize_t amt)
16141{
16142 tsk->ioac.wchar += amt;
16143}
16144static inline __attribute__((always_inline)) void inc_syscr(struct task_struct *tsk)
16145{
16146 tsk->ioac.syscr++;
16147}
16148static inline __attribute__((always_inline)) void inc_syscw(struct task_struct *tsk)
16149{
16150 tsk->ioac.syscw++;
16151}
16152static inline __attribute__((always_inline)) void mm_update_next_owner(struct mm_struct *mm)
16153{
16154}
16155static inline __attribute__((always_inline)) void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
16156{
16157}
16158static inline __attribute__((always_inline)) unsigned long task_rlimit(const struct task_struct *tsk,
16159 unsigned int limit)
16160{
16161 return (*(volatile typeof(tsk->signal->rlim[limit].rlim_cur) *)&(tsk->signal->rlim[limit].rlim_cur));
16162}
16163static inline __attribute__((always_inline)) unsigned long task_rlimit_max(const struct task_struct *tsk,
16164 unsigned int limit)
16165{
16166 return (*(volatile typeof(tsk->signal->rlim[limit].rlim_max) *)&(tsk->signal->rlim[limit].rlim_max));
16167}
16168static inline __attribute__((always_inline)) unsigned long rlimit(unsigned int limit)
16169{
16170 return task_rlimit(get_current(), limit);
16171}
16172static inline __attribute__((always_inline)) unsigned long rlimit_max(unsigned int limit)
16173{
16174 return task_rlimit_max(get_current(), limit);
16175}
16176struct irqaction;
16177struct softirq_action;
16178extern struct tracepoint
16179 __tracepoint_irq_handler_entry
16180 ; static inline __attribute__((always_inline)) void
16181 trace_irq_handler_entry
16182 (int irq, struct irqaction *action) { if (__builtin_constant_p(((static_branch(&__tracepoint_irq_handler_entry.key)))) ? !!((static_branch(&__tracepoint_irq_handler_entry.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16183 "include/trace/events/irq.h"
16184 , .line =
16185 54
16186 , }; ______r = !!((static_branch(&__tracepoint_irq_handler_entry.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16187 "include/trace/events/irq.h"
16188 , .line =
16189 54
16190 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_irq_handler_entry)->funcs)) *_________p1 = (typeof(*((&__tracepoint_irq_handler_entry)->funcs))* )(*(volatile typeof(((&__tracepoint_irq_handler_entry)->funcs)) *)&(((&__tracepoint_irq_handler_entry)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_irq_handler_entry)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16191 "include/trace/events/irq.h"
16192 , .line =
16193 54
16194 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, int irq, struct irqaction *action))(it_func))(__data, irq, action); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
16195 register_trace_irq_handler_entry
16196 (void (*probe)(void *__data, int irq, struct irqaction *action), void *data) { return tracepoint_probe_register("irq_handler_entry", (void *)probe, data); } static inline __attribute__((always_inline)) int
16197 unregister_trace_irq_handler_entry
16198 (void (*probe)(void *__data, int irq, struct irqaction *action), void *data) { return tracepoint_probe_unregister("irq_handler_entry", (void *)probe, data); } static inline __attribute__((always_inline)) void
16199 check_trace_callback_type_irq_handler_entry
16200 (void (*cb)(void *__data, int irq, struct irqaction *action)) { }
16201 ;
16202extern struct tracepoint
16203 __tracepoint_irq_handler_exit
16204 ; static inline __attribute__((always_inline)) void
16205 trace_irq_handler_exit
16206 (int irq, struct irqaction *action, int ret) { if (__builtin_constant_p(((static_branch(&__tracepoint_irq_handler_exit.key)))) ? !!((static_branch(&__tracepoint_irq_handler_exit.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16207 "include/trace/events/irq.h"
16208 , .line =
16209 85
16210 , }; ______r = !!((static_branch(&__tracepoint_irq_handler_exit.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16211 "include/trace/events/irq.h"
16212 , .line =
16213 85
16214 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_irq_handler_exit)->funcs)) *_________p1 = (typeof(*((&__tracepoint_irq_handler_exit)->funcs))* )(*(volatile typeof(((&__tracepoint_irq_handler_exit)->funcs)) *)&(((&__tracepoint_irq_handler_exit)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_irq_handler_exit)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16215 "include/trace/events/irq.h"
16216 , .line =
16217 85
16218 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, int irq, struct irqaction *action, int ret))(it_func))(__data, irq, action, ret); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
16219 register_trace_irq_handler_exit
16220 (void (*probe)(void *__data, int irq, struct irqaction *action, int ret), void *data) { return tracepoint_probe_register("irq_handler_exit", (void *)probe, data); } static inline __attribute__((always_inline)) int
16221 unregister_trace_irq_handler_exit
16222 (void (*probe)(void *__data, int irq, struct irqaction *action, int ret), void *data) { return tracepoint_probe_unregister("irq_handler_exit", (void *)probe, data); } static inline __attribute__((always_inline)) void
16223 check_trace_callback_type_irq_handler_exit
16224 (void (*cb)(void *__data, int irq, struct irqaction *action, int ret)) { }
16225 ;
16226 ;
16227extern struct tracepoint
16228 __tracepoint_softirq_entry
16229 ; static inline __attribute__((always_inline)) void
16230 trace_softirq_entry
16231 (unsigned int vec_nr) { if (__builtin_constant_p(((static_branch(&__tracepoint_softirq_entry.key)))) ? !!((static_branch(&__tracepoint_softirq_entry.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16232 "include/trace/events/irq.h"
16233 , .line =
16234 117
16235 , }; ______r = !!((static_branch(&__tracepoint_softirq_entry.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16236 "include/trace/events/irq.h"
16237 , .line =
16238 117
16239 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_softirq_entry)->funcs)) *_________p1 = (typeof(*((&__tracepoint_softirq_entry)->funcs))* )(*(volatile typeof(((&__tracepoint_softirq_entry)->funcs)) *)&(((&__tracepoint_softirq_entry)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_softirq_entry)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16240 "include/trace/events/irq.h"
16241 , .line =
16242 117
16243 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, unsigned int vec_nr))(it_func))(__data, vec_nr); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
16244 register_trace_softirq_entry
16245 (void (*probe)(void *__data, unsigned int vec_nr), void *data) { return tracepoint_probe_register("softirq_entry", (void *)probe, data); } static inline __attribute__((always_inline)) int
16246 unregister_trace_softirq_entry
16247 (void (*probe)(void *__data, unsigned int vec_nr), void *data) { return tracepoint_probe_unregister("softirq_entry", (void *)probe, data); } static inline __attribute__((always_inline)) void
16248 check_trace_callback_type_softirq_entry
16249 (void (*cb)(void *__data, unsigned int vec_nr)) { }
16250 ;
16251extern struct tracepoint
16252 __tracepoint_softirq_exit
16253 ; static inline __attribute__((always_inline)) void
16254 trace_softirq_exit
16255 (unsigned int vec_nr) { if (__builtin_constant_p(((static_branch(&__tracepoint_softirq_exit.key)))) ? !!((static_branch(&__tracepoint_softirq_exit.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16256 "include/trace/events/irq.h"
16257 , .line =
16258 131
16259 , }; ______r = !!((static_branch(&__tracepoint_softirq_exit.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16260 "include/trace/events/irq.h"
16261 , .line =
16262 131
16263 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_softirq_exit)->funcs)) *_________p1 = (typeof(*((&__tracepoint_softirq_exit)->funcs))* )(*(volatile typeof(((&__tracepoint_softirq_exit)->funcs)) *)&(((&__tracepoint_softirq_exit)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_softirq_exit)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16264 "include/trace/events/irq.h"
16265 , .line =
16266 131
16267 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, unsigned int vec_nr))(it_func))(__data, vec_nr); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
16268 register_trace_softirq_exit
16269 (void (*probe)(void *__data, unsigned int vec_nr), void *data) { return tracepoint_probe_register("softirq_exit", (void *)probe, data); } static inline __attribute__((always_inline)) int
16270 unregister_trace_softirq_exit
16271 (void (*probe)(void *__data, unsigned int vec_nr), void *data) { return tracepoint_probe_unregister("softirq_exit", (void *)probe, data); } static inline __attribute__((always_inline)) void
16272 check_trace_callback_type_softirq_exit
16273 (void (*cb)(void *__data, unsigned int vec_nr)) { }
16274 ;
16275extern struct tracepoint
16276 __tracepoint_softirq_raise
16277 ; static inline __attribute__((always_inline)) void
16278 trace_softirq_raise
16279 (unsigned int vec_nr) { if (__builtin_constant_p(((static_branch(&__tracepoint_softirq_raise.key)))) ? !!((static_branch(&__tracepoint_softirq_raise.key))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16280 "include/trace/events/irq.h"
16281 , .line =
16282 145
16283 , }; ______r = !!((static_branch(&__tracepoint_softirq_raise.key))); ______f.miss_hit[______r]++; ______r; })) do { struct tracepoint_func *it_func_ptr; void *it_func; void *__data; if (__builtin_constant_p(((!(1)))) ? !!((!(1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16284 "include/trace/events/irq.h"
16285 , .line =
16286 145
16287 , }; ______r = !!((!(1))); ______f.miss_hit[______r]++; ______r; })) return; rcu_read_lock_sched_notrace(); it_func_ptr = ({ typeof(*((&__tracepoint_softirq_raise)->funcs)) *_________p1 = (typeof(*((&__tracepoint_softirq_raise)->funcs))* )(*(volatile typeof(((&__tracepoint_softirq_raise)->funcs)) *)&(((&__tracepoint_softirq_raise)->funcs))); do { } while (0); ; do { } while (0); ((typeof(*((&__tracepoint_softirq_raise)->funcs)) *)(_________p1)); }); if (__builtin_constant_p(((it_func_ptr))) ? !!((it_func_ptr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
16288 "include/trace/events/irq.h"
16289 , .line =
16290 145
16291 , }; ______r = !!((it_func_ptr)); ______f.miss_hit[______r]++; ______r; })) { do { it_func = (it_func_ptr)->func; __data = (it_func_ptr)->data; ((void(*)(void *__data, unsigned int vec_nr))(it_func))(__data, vec_nr); } while ((++it_func_ptr)->func); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int
16292 register_trace_softirq_raise
16293 (void (*probe)(void *__data, unsigned int vec_nr), void *data) { return tracepoint_probe_register("softirq_raise", (void *)probe, data); } static inline __attribute__((always_inline)) int
16294 unregister_trace_softirq_raise
16295 (void (*probe)(void *__data, unsigned int vec_nr), void *data) { return tracepoint_probe_unregister("softirq_raise", (void *)probe, data); } static inline __attribute__((always_inline)) void
16296 check_trace_callback_type_softirq_raise
16297 (void (*cb)(void *__data, unsigned int vec_nr)) { }
16298 ;
16299enum {
16300 IRQC_IS_HARDIRQ = 0,
16301 IRQC_IS_NESTED,
16302};
16303typedef irqreturn_t (*irq_handler_t)(int, void *);
16304struct irqaction {
16305 irq_handler_t handler;
16306 unsigned long flags;
16307 void *dev_id;
16308 struct irqaction *next;
16309 int irq;
16310 irq_handler_t thread_fn;
16311 struct task_struct *thread;
16312 unsigned long thread_flags;
16313 unsigned long thread_mask;
16314 const char *name;
16315 struct proc_dir_entry *dir;
16316} __attribute__((__aligned__(1 << (6))));
16317extern irqreturn_t no_action(int cpl, void *dev_id);
16318extern int __attribute__((warn_unused_result))
16319request_threaded_irq(unsigned int irq, irq_handler_t handler,
16320 irq_handler_t thread_fn,
16321 unsigned long flags, const char *name, void *dev);
16322static inline __attribute__((always_inline)) int __attribute__((warn_unused_result))
16323request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
16324 const char *name, void *dev)
16325{
16326 return request_threaded_irq(irq, handler, ((void *)0), flags, name, dev);
16327}
16328extern int __attribute__((warn_unused_result))
16329request_any_context_irq(unsigned int irq, irq_handler_t handler,
16330 unsigned long flags, const char *name, void *dev_id);
16331extern void exit_irq_thread(void);
16332extern void free_irq(unsigned int, void *);
16333struct device;
16334extern int __attribute__((warn_unused_result))
16335devm_request_threaded_irq(struct device *dev, unsigned int irq,
16336 irq_handler_t handler, irq_handler_t thread_fn,
16337 unsigned long irqflags, const char *devname,
16338 void *dev_id);
16339static inline __attribute__((always_inline)) int __attribute__((warn_unused_result))
16340devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
16341 unsigned long irqflags, const char *devname, void *dev_id)
16342{
16343 return devm_request_threaded_irq(dev, irq, handler, ((void *)0), irqflags,
16344 devname, dev_id);
16345}
16346extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
16347extern void disable_irq_nosync(unsigned int irq);
16348extern void disable_irq(unsigned int irq);
16349extern void enable_irq(unsigned int irq);
16350extern void suspend_device_irqs(void);
16351extern void resume_device_irqs(void);
16352extern int check_wakeup_irqs(void);
16353extern cpumask_var_t irq_default_affinity;
16354extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
16355extern int irq_can_set_affinity(unsigned int irq);
16356extern int irq_select_affinity(unsigned int irq);
16357extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
16358struct irq_affinity_notify {
16359 unsigned int irq;
16360 struct kref kref;
16361 struct work_struct work;
16362 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
16363 void (*release)(struct kref *ref);
16364};
16365extern int
16366irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
16367static inline __attribute__((always_inline)) void irq_run_affinity_notifiers(void)
16368{
16369 flush_scheduled_work();
16370}
16371static inline __attribute__((always_inline)) void disable_irq_nosync_lockdep(unsigned int irq)
16372{
16373 disable_irq_nosync(irq);
16374 do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0);
16375}
16376static inline __attribute__((always_inline)) void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
16377{
16378 disable_irq_nosync(irq);
16379 do { do { ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); *flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0);
16380}
16381static inline __attribute__((always_inline)) void disable_irq_lockdep(unsigned int irq)
16382{
16383 disable_irq(irq);
16384 do { arch_local_irq_disable(); trace_hardirqs_off(); } while (0);
16385}
16386static inline __attribute__((always_inline)) void enable_irq_lockdep(unsigned int irq)
16387{
16388 do { trace_hardirqs_on(); arch_local_irq_enable(); } while (0);
16389 enable_irq(irq);
16390}
16391static inline __attribute__((always_inline)) void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
16392{
16393 do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(*flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(*flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 333, }; ______r = !!((({ ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(*flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(*flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(*flags); } while (0); } } while (0);
16394 enable_irq(irq);
16395}
16396extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
16397static inline __attribute__((always_inline)) int enable_irq_wake(unsigned int irq)
16398{
16399 return irq_set_irq_wake(irq, 1);
16400}
16401static inline __attribute__((always_inline)) int disable_irq_wake(unsigned int irq)
16402{
16403 return irq_set_irq_wake(irq, 0);
16404}
16405extern bool force_irqthreads;
16406enum
16407{
16408 HI_SOFTIRQ=0,
16409 TIMER_SOFTIRQ,
16410 NET_TX_SOFTIRQ,
16411 NET_RX_SOFTIRQ,
16412 BLOCK_SOFTIRQ,
16413 BLOCK_IOPOLL_SOFTIRQ,
16414 TASKLET_SOFTIRQ,
16415 SCHED_SOFTIRQ,
16416 HRTIMER_SOFTIRQ,
16417 RCU_SOFTIRQ,
16418 NR_SOFTIRQS
16419};
16420extern char *softirq_to_name[NR_SOFTIRQS];
16421struct softirq_action
16422{
16423 void (*action)(struct softirq_action *);
16424};
16425 __attribute__((regparm(0))) void do_softirq(void);
16426 __attribute__((regparm(0))) void __do_softirq(void);
16427extern void open_softirq(int nr, void (*action)(struct softirq_action *));
16428extern void softirq_init(void);
16429static inline __attribute__((always_inline)) void __raise_softirq_irqoff(unsigned int nr)
16430{
16431 trace_softirq_raise(nr);
16432 do { typedef typeof(irq_stat.__softirq_pending) pto_T__; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 443, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pto_T__ pto_tmp__; pto_tmp__ = ((1UL << nr)); (void)pto_tmp__; } switch (sizeof(irq_stat.__softirq_pending)) { case 1: asm("or" "b %1,""%%""fs"":" "%P" "0" : "+m" (irq_stat.__softirq_pending) : "qi" ((pto_T__)((1UL << nr)))); break; case 2: asm("or" "w %1,""%%""fs"":" "%P" "0" : "+m" (irq_stat.__softirq_pending) : "ri" ((pto_T__)((1UL << nr)))); break; case 4: asm("or" "l %1,""%%""fs"":" "%P" "0" : "+m" (irq_stat.__softirq_pending) : "ri" ((pto_T__)((1UL << nr)))); break; case 8: asm("or" "q %1,""%%""fs"":" "%P" "0" : "+m" (irq_stat.__softirq_pending) : "re" ((pto_T__)((1UL << nr)))); break; default: __bad_percpu_size(); } } while (0);
16433}
16434extern void raise_softirq_irqoff(unsigned int nr);
16435extern void raise_softirq(unsigned int nr);
16436extern __attribute__((section(".data..percpu" ""))) __typeof__(struct list_head [NR_SOFTIRQS]) softirq_work_list;
16437extern __attribute__((section(".data..percpu" ""))) __typeof__(struct task_struct *) ksoftirqd;
16438static inline __attribute__((always_inline)) struct task_struct *this_cpu_ksoftirqd(void)
16439{
16440 return ({ typeof((ksoftirqd)) pscr_ret__; do { const void *__vpp_verify = (typeof(&((ksoftirqd))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((ksoftirqd))) { case 1: pscr_ret__ = ({ typeof(((ksoftirqd))) pfo_ret__; switch (sizeof(((ksoftirqd)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((ksoftirqd))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 2: pscr_ret__ = ({ typeof(((ksoftirqd))) pfo_ret__; switch (sizeof(((ksoftirqd)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((ksoftirqd))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 4: pscr_ret__ = ({ typeof(((ksoftirqd))) pfo_ret__; switch (sizeof(((ksoftirqd)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((ksoftirqd))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((ksoftirqd))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 8: pscr_ret__ = ({ typeof((ksoftirqd)) ret__; do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); ret__ = *({ do { const void *__vpp_verify = (typeof((&((ksoftirqd)))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((ksoftirqd)))) *)(&((ksoftirqd))))); (typeof((typeof(*(&((ksoftirqd)))) *)(&((ksoftirqd))))) (__ptr + (((__per_cpu_offset[debug_smp_processor_id()])))); }); }); do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 462, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 462, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 462, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 462, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); ret__; });break; default: __bad_size_call_parameter();break; } pscr_ret__; });
16441}
16442extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
16443extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
16444 int this_cpu, int softirq);
16445struct tasklet_struct
16446{
16447 struct tasklet_struct *next;
16448 unsigned long state;
16449 atomic_t count;
16450 void (*func)(unsigned long);
16451 unsigned long data;
16452};
16453enum
16454{
16455 TASKLET_STATE_SCHED,
16456 TASKLET_STATE_RUN
16457};
16458static inline __attribute__((always_inline)) int tasklet_trylock(struct tasklet_struct *t)
16459{
16460 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
16461}
16462static inline __attribute__((always_inline)) void tasklet_unlock(struct tasklet_struct *t)
16463{
16464 __asm__ __volatile__("": : :"memory");
16465 clear_bit(TASKLET_STATE_RUN, &(t)->state);
16466}
16467static inline __attribute__((always_inline)) void tasklet_unlock_wait(struct tasklet_struct *t)
16468{
16469 while ((__builtin_constant_p((TASKLET_STATE_RUN)) ? constant_test_bit((TASKLET_STATE_RUN), (&(t)->state)) : variable_test_bit((TASKLET_STATE_RUN), (&(t)->state)))) { __asm__ __volatile__("": : :"memory"); }
16470}
16471extern void __tasklet_schedule(struct tasklet_struct *t);
16472static inline __attribute__((always_inline)) void tasklet_schedule(struct tasklet_struct *t)
16473{
16474 if (__builtin_constant_p(((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)))) ? !!((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 544, }; ______r = !!((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))); ______f.miss_hit[______r]++; ______r; }))
16475 __tasklet_schedule(t);
16476}
16477extern void __tasklet_hi_schedule(struct tasklet_struct *t);
16478static inline __attribute__((always_inline)) void tasklet_hi_schedule(struct tasklet_struct *t)
16479{
16480 if (__builtin_constant_p(((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)))) ? !!((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 552, }; ______r = !!((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))); ______f.miss_hit[______r]++; ______r; }))
16481 __tasklet_hi_schedule(t);
16482}
16483extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
16484static inline __attribute__((always_inline)) void tasklet_hi_schedule_first(struct tasklet_struct *t)
16485{
16486 if (__builtin_constant_p(((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)))) ? !!((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/interrupt.h", .line = 566, }; ______r = !!((!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))); ______f.miss_hit[______r]++; ______r; }))
16487 __tasklet_hi_schedule_first(t);
16488}
16489static inline __attribute__((always_inline)) void tasklet_disable_nosync(struct tasklet_struct *t)
16490{
16491 atomic_inc(&t->count);
16492 __asm__ __volatile__("": : :"memory");
16493}
16494static inline __attribute__((always_inline)) void tasklet_disable(struct tasklet_struct *t)
16495{
16496 tasklet_disable_nosync(t);
16497 tasklet_unlock_wait(t);
16498 asm volatile ("661:\n\t" "lock; addl $0,0(%%esp)" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(0*32+26)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "mfence" "\n664:\n" ".previous" : : : "memory");
16499}
16500static inline __attribute__((always_inline)) void tasklet_enable(struct tasklet_struct *t)
16501{
16502 __asm__ __volatile__("": : :"memory");
16503 atomic_dec(&t->count);
16504}
16505static inline __attribute__((always_inline)) void tasklet_hi_enable(struct tasklet_struct *t)
16506{
16507 __asm__ __volatile__("": : :"memory");
16508 atomic_dec(&t->count);
16509}
16510extern void tasklet_kill(struct tasklet_struct *t);
16511extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
16512extern void tasklet_init(struct tasklet_struct *t,
16513 void (*func)(unsigned long), unsigned long data);
16514struct tasklet_hrtimer {
16515 struct hrtimer timer;
16516 struct tasklet_struct tasklet;
16517 enum hrtimer_restart (*function)(struct hrtimer *);
16518};
16519extern void
16520tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
16521 enum hrtimer_restart (*function)(struct hrtimer *),
16522 clockid_t which_clock, enum hrtimer_mode mode);
16523static inline __attribute__((always_inline))
16524int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
16525 const enum hrtimer_mode mode)
16526{
16527 return hrtimer_start(&ttimer->timer, time, mode);
16528}
16529static inline __attribute__((always_inline))
16530void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
16531{
16532 hrtimer_cancel(&ttimer->timer);
16533 tasklet_kill(&ttimer->tasklet);
16534}
16535extern unsigned long probe_irq_on(void);
16536extern int probe_irq_off(unsigned long);
16537extern unsigned int probe_irq_mask(unsigned long);
16538extern void init_irq_proc(void);
16539struct seq_file;
16540int show_interrupts(struct seq_file *p, void *v);
16541int arch_show_interrupts(struct seq_file *p, int prec);
16542extern int early_irq_init(void);
16543extern int arch_probe_nr_irqs(void);
16544extern int arch_early_irq_init(void);
16545struct cpu_usage_stat {
16546 cputime64_t user;
16547 cputime64_t nice;
16548 cputime64_t system;
16549 cputime64_t softirq;
16550 cputime64_t irq;
16551 cputime64_t idle;
16552 cputime64_t iowait;
16553 cputime64_t steal;
16554 cputime64_t guest;
16555 cputime64_t guest_nice;
16556};
16557struct kernel_stat {
16558 struct cpu_usage_stat cpustat;
16559 unsigned long irqs_sum;
16560 unsigned int softirqs[NR_SOFTIRQS];
16561};
16562extern __attribute__((section(".data..percpu" ""))) __typeof__(struct kernel_stat) kstat;
16563extern unsigned long long nr_context_switches(void);
16564extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
16565static inline __attribute__((always_inline)) void kstat_incr_softirqs_this_cpu(unsigned int irq)
16566{
16567 do { do { const void *__vpp_verify = (typeof(&(((kstat.softirqs[irq])))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((kstat.softirqs[irq])))) { case 1: do { typedef typeof((((kstat.softirqs[irq])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((kstat.softirqs[irq]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((((kstat.softirqs[irq])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((kstat.softirqs[irq]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((((kstat.softirqs[irq])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((kstat.softirqs[irq]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/kernel_stat.h", .line = 77, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((kstat.softirqs[irq])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((((kstat.softirqs[irq]))))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((((kstat.softirqs[irq])))))); (typeof(*(&((((kstat.softirqs[irq])))))) *)tcp_ptr__; }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
16568}
16569static inline __attribute__((always_inline)) unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
16570{
16571 return (*({ do { const void *__vpp_verify = (typeof((&(kstat))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(kstat))) *)(&(kstat)))); (typeof((typeof(*(&(kstat))) *)(&(kstat)))) (__ptr + (((__per_cpu_offset[cpu])))); }); })).softirqs[irq];
16572}
16573extern unsigned int kstat_irqs(unsigned int irq);
16574static inline __attribute__((always_inline)) unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
16575{
16576 return (*({ do { const void *__vpp_verify = (typeof((&(kstat))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(kstat))) *)(&(kstat)))); (typeof((typeof(*(&(kstat))) *)(&(kstat)))) (__ptr + (((__per_cpu_offset[cpu])))); }); })).irqs_sum;
16577}
16578extern unsigned long long task_delta_exec(struct task_struct *);
16579extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
16580extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
16581extern void account_steal_time(cputime_t);
16582extern void account_idle_time(cputime_t);
16583extern void account_process_tick(struct task_struct *, int user);
16584extern void account_steal_ticks(unsigned long ticks);
16585extern void account_idle_ticks(unsigned long ticks);
16586struct task_struct;
16587struct user_regset;
16588typedef int user_regset_active_fn(struct task_struct *target,
16589 const struct user_regset *regset);
16590typedef int user_regset_get_fn(struct task_struct *target,
16591 const struct user_regset *regset,
16592 unsigned int pos, unsigned int count,
16593 void *kbuf, void *ubuf);
16594typedef int user_regset_set_fn(struct task_struct *target,
16595 const struct user_regset *regset,
16596 unsigned int pos, unsigned int count,
16597 const void *kbuf, const void *ubuf);
16598typedef int user_regset_writeback_fn(struct task_struct *target,
16599 const struct user_regset *regset,
16600 int immediate);
16601struct user_regset {
16602 user_regset_get_fn *get;
16603 user_regset_set_fn *set;
16604 user_regset_active_fn *active;
16605 user_regset_writeback_fn *writeback;
16606 unsigned int n;
16607 unsigned int size;
16608 unsigned int align;
16609 unsigned int bias;
16610 unsigned int core_note_type;
16611};
16612struct user_regset_view {
16613 const char *name;
16614 const struct user_regset *regsets;
16615 unsigned int n;
16616 u32 e_flags;
16617 u16 e_machine;
16618 u8 ei_osabi;
16619};
16620const struct user_regset_view *task_user_regset_view(struct task_struct *tsk);
16621static inline __attribute__((always_inline)) int user_regset_copyout(unsigned int *pos, unsigned int *count,
16622 void **kbuf,
16623 void **ubuf, const void *data,
16624 const int start_pos, const int end_pos)
16625{
16626 if (__builtin_constant_p(((*count == 0))) ? !!((*count == 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 224, }; ______r = !!((*count == 0)); ______f.miss_hit[______r]++; ______r; }))
16627 return 0;
16628 do { if (__builtin_constant_p((((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 226, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 226, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 226, }; ______r = !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 226, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/regset.h"), "i" (226), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
16629 if (__builtin_constant_p(((end_pos < 0 || *pos < end_pos))) ? !!((end_pos < 0 || *pos < end_pos)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 227, }; ______r = !!((end_pos < 0 || *pos < end_pos)); ______f.miss_hit[______r]++; ______r; })) {
16630 unsigned int copy = (end_pos < 0 ? *count
16631 : ({ typeof(*count) _min1 = (*count); typeof(end_pos - *pos) _min2 = (end_pos - *pos); (void) (&_min1 == &_min2); _min1 < _min2 ? _min1 : _min2; }));
16632 data += *pos - start_pos;
16633 if (__builtin_constant_p(((*kbuf))) ? !!((*kbuf)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 231, }; ______r = !!((*kbuf)); ______f.miss_hit[______r]++; ______r; })) {
16634 __builtin_memcpy(*kbuf, data, copy);
16635 *kbuf += copy;
16636 } else if (__builtin_constant_p(((__copy_to_user(*ubuf, data, copy)))) ? !!((__copy_to_user(*ubuf, data, copy))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 234, }; ______r = !!((__copy_to_user(*ubuf, data, copy))); ______f.miss_hit[______r]++; ______r; }))
16637 return -14;
16638 else
16639 *ubuf += copy;
16640 *pos += copy;
16641 *count -= copy;
16642 }
16643 return 0;
16644}
16645static inline __attribute__((always_inline)) int user_regset_copyin(unsigned int *pos, unsigned int *count,
16646 const void **kbuf,
16647 const void **ubuf, void *data,
16648 const int start_pos, const int end_pos)
16649{
16650 if (__builtin_constant_p(((*count == 0))) ? !!((*count == 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 249, }; ______r = !!((*count == 0)); ______f.miss_hit[______r]++; ______r; }))
16651 return 0;
16652 do { if (__builtin_constant_p((((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 251, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 251, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 251, }; ______r = !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 251, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/regset.h"), "i" (251), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
16653 if (__builtin_constant_p(((end_pos < 0 || *pos < end_pos))) ? !!((end_pos < 0 || *pos < end_pos)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 252, }; ______r = !!((end_pos < 0 || *pos < end_pos)); ______f.miss_hit[______r]++; ______r; })) {
16654 unsigned int copy = (end_pos < 0 ? *count
16655 : ({ typeof(*count) _min1 = (*count); typeof(end_pos - *pos) _min2 = (end_pos - *pos); (void) (&_min1 == &_min2); _min1 < _min2 ? _min1 : _min2; }));
16656 data += *pos - start_pos;
16657 if (__builtin_constant_p(((*kbuf))) ? !!((*kbuf)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 256, }; ______r = !!((*kbuf)); ______f.miss_hit[______r]++; ______r; })) {
16658 __builtin_memcpy(data, *kbuf, copy);
16659 *kbuf += copy;
16660 } else if (__builtin_constant_p(((__copy_from_user(data, *ubuf, copy)))) ? !!((__copy_from_user(data, *ubuf, copy))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 259, }; ______r = !!((__copy_from_user(data, *ubuf, copy))); ______f.miss_hit[______r]++; ______r; }))
16661 return -14;
16662 else
16663 *ubuf += copy;
16664 *pos += copy;
16665 *count -= copy;
16666 }
16667 return 0;
16668}
16669static inline __attribute__((always_inline)) int user_regset_copyout_zero(unsigned int *pos,
16670 unsigned int *count,
16671 void **kbuf, void **ubuf,
16672 const int start_pos,
16673 const int end_pos)
16674{
16675 if (__builtin_constant_p(((*count == 0))) ? !!((*count == 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 279, }; ______r = !!((*count == 0)); ______f.miss_hit[______r]++; ______r; }))
16676 return 0;
16677 do { if (__builtin_constant_p((((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 281, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 281, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 281, }; ______r = !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 281, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/regset.h"), "i" (281), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
16678 if (__builtin_constant_p(((end_pos < 0 || *pos < end_pos))) ? !!((end_pos < 0 || *pos < end_pos)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 282, }; ______r = !!((end_pos < 0 || *pos < end_pos)); ______f.miss_hit[______r]++; ______r; })) {
16679 unsigned int copy = (end_pos < 0 ? *count
16680 : ({ typeof(*count) _min1 = (*count); typeof(end_pos - *pos) _min2 = (end_pos - *pos); (void) (&_min1 == &_min2); _min1 < _min2 ? _min1 : _min2; }));
16681 if (__builtin_constant_p(((*kbuf))) ? !!((*kbuf)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 285, }; ______r = !!((*kbuf)); ______f.miss_hit[______r]++; ______r; })) {
16682 __builtin_memset(*kbuf, 0, copy);
16683 *kbuf += copy;
16684 } else if (__builtin_constant_p(((__clear_user(*ubuf, copy)))) ? !!((__clear_user(*ubuf, copy))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 288, }; ______r = !!((__clear_user(*ubuf, copy))); ______f.miss_hit[______r]++; ______r; }))
16685 return -14;
16686 else
16687 *ubuf += copy;
16688 *pos += copy;
16689 *count -= copy;
16690 }
16691 return 0;
16692}
16693static inline __attribute__((always_inline)) int user_regset_copyin_ignore(unsigned int *pos,
16694 unsigned int *count,
16695 const void **kbuf,
16696 const void **ubuf,
16697 const int start_pos,
16698 const int end_pos)
16699{
16700 if (__builtin_constant_p(((*count == 0))) ? !!((*count == 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 305, }; ______r = !!((*count == 0)); ______f.miss_hit[______r]++; ______r; }))
16701 return 0;
16702 do { if (__builtin_constant_p((((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 307, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 307, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 307, }; ______r = !!(((__builtin_constant_p(*pos < start_pos) ? !!(*pos < start_pos) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 307, }; ______r = __builtin_expect(!!(*pos < start_pos), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/regset.h"), "i" (307), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
16703 if (__builtin_constant_p(((end_pos < 0 || *pos < end_pos))) ? !!((end_pos < 0 || *pos < end_pos)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 308, }; ______r = !!((end_pos < 0 || *pos < end_pos)); ______f.miss_hit[______r]++; ______r; })) {
16704 unsigned int copy = (end_pos < 0 ? *count
16705 : ({ typeof(*count) _min1 = (*count); typeof(end_pos - *pos) _min2 = (end_pos - *pos); (void) (&_min1 == &_min2); _min1 < _min2 ? _min1 : _min2; }));
16706 if (__builtin_constant_p(((*kbuf))) ? !!((*kbuf)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 311, }; ______r = !!((*kbuf)); ______f.miss_hit[______r]++; ______r; }))
16707 *kbuf += copy;
16708 else
16709 *ubuf += copy;
16710 *pos += copy;
16711 *count -= copy;
16712 }
16713 return 0;
16714}
16715static inline __attribute__((always_inline)) int copy_regset_to_user(struct task_struct *target,
16716 const struct user_regset_view *view,
16717 unsigned int setno,
16718 unsigned int offset, unsigned int size,
16719 void *data)
16720{
16721 const struct user_regset *regset = &view->regsets[setno];
16722 if (__builtin_constant_p(((!((__builtin_constant_p(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) ? !!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 338, }; ______r = __builtin_expect(!!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))))) ? !!((!((__builtin_constant_p(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) ? !!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 338, }; ______r = __builtin_expect(!!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 338, }; ______r = !!((!((__builtin_constant_p(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) ? !!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 338, }; ______r = __builtin_expect(!!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))); ______f.miss_hit[______r]++; ______r; }))
16723 return -5;
16724 return regset->get(target, regset, offset, size, ((void *)0), data);
16725}
16726static inline __attribute__((always_inline)) int copy_regset_from_user(struct task_struct *target,
16727 const struct user_regset_view *view,
16728 unsigned int setno,
16729 unsigned int offset, unsigned int size,
16730 const void *data)
16731{
16732 const struct user_regset *regset = &view->regsets[setno];
16733 if (__builtin_constant_p(((!((__builtin_constant_p(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) ? !!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 361, }; ______r = __builtin_expect(!!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))))) ? !!((!((__builtin_constant_p(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) ? !!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 361, }; ______r = __builtin_expect(!!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 361, }; ______r = !!((!((__builtin_constant_p(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) ? !!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/regset.h", .line = 361, }; ______r = __builtin_expect(!!(({ unsigned long flag, roksum; (void)0; asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" : "=&r" (flag), "=r" (roksum) : "1" (data), "g" ((long)(size)), "rm" (current_thread_info()->addr_limit.seg)); flag; }) == 0), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))); ______f.miss_hit[______r]++; ______r; }))
16734 return -5;
16735 return regset->set(target, regset, offset, size, ((void *)0), data);
16736}
16737extern unsigned int xstate_size;
16738extern u64 pcntxt_mask;
16739extern u64 xstate_fx_sw_bytes[6];
16740extern void xsave_init(void);
16741extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
16742extern int init_fpu(struct task_struct *child);
16743extern int check_for_xstate(struct i387_fxsave_struct *buf,
16744 void *fpstate,
16745 struct _fpx_sw_bytes *sw);
16746static inline __attribute__((always_inline)) int fpu_xrstor_checking(struct fpu *fpu)
16747{
16748 struct xsave_struct *fx = &fpu->state->xsave;
16749 int err;
16750 asm volatile("1: .byte " "0x0f,0xae,0x2f\n\t"
16751 "2:\n"
16752 ".section .fixup,\"ax\"\n"
16753 "3: movl $-1,%[err]\n"
16754 " jmp 2b\n"
16755 ".previous\n"
16756 " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "3b" "\n" " .previous\n"
16757 : [err] "=r" (err)
16758 : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0)
16759 : "memory");
16760 return err;
16761}
16762static inline __attribute__((always_inline)) int xsave_user(struct xsave_struct *buf)
16763{
16764 int err;
16765 err = __clear_user(&buf->xsave_hdr,
16766 sizeof(struct xsave_hdr_struct));
16767 if (__builtin_constant_p((((__builtin_constant_p(err) ? !!(err) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 74, }; ______r = __builtin_expect(!!(err), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(err) ? !!(err) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 74, }; ______r = __builtin_expect(!!(err), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 74, }; ______r = !!(((__builtin_constant_p(err) ? !!(err) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 74, }; ______r = __builtin_expect(!!(err), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
16768 return -14;
16769 __asm__ __volatile__("1: .byte " "0x0f,0xae,0x27\n"
16770 "2:\n"
16771 ".section .fixup,\"ax\"\n"
16772 "3: movl $-1,%[err]\n"
16773 " jmp 2b\n"
16774 ".previous\n"
16775 ".section __ex_table,\"a\"\n"
16776 " " ".balign 4" " " "\n"
16777 " " ".long" " " "1b,3b\n"
16778 ".previous"
16779 : [err] "=r" (err)
16780 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
16781 : "memory");
16782 if (__builtin_constant_p((((__builtin_constant_p(err) ? !!(err) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 90, }; ______r = __builtin_expect(!!(err), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })) && __clear_user(buf, xstate_size)))) ? !!(((__builtin_constant_p(err) ? !!(err) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 90, }; ______r = __builtin_expect(!!(err), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })) && __clear_user(buf, xstate_size))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 90, }; ______r = !!(((__builtin_constant_p(err) ? !!(err) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/xsave.h", .line = 90, }; ______r = __builtin_expect(!!(err), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })) && __clear_user(buf, xstate_size))); ______f.miss_hit[______r]++; ______r; }))
16783 err = -14;
16784 return err;
16785}
16786static inline __attribute__((always_inline)) int xrestore_user(struct xsave_struct *buf, u64 mask)
16787{
16788 int err;
16789 struct xsave_struct *xstate = (( struct xsave_struct *)buf);
16790 u32 lmask = mask;
16791 u32 hmask = mask >> 32;
16792 __asm__ __volatile__("1: .byte " "0x0f,0xae,0x2f\n"
16793 "2:\n"
16794 ".section .fixup,\"ax\"\n"
16795 "3: movl $-1,%[err]\n"
16796 " jmp 2b\n"
16797 ".previous\n"
16798 ".section __ex_table,\"a\"\n"
16799 " " ".balign 4" " " "\n"
16800 " " ".long" " " "1b,3b\n"
16801 ".previous"
16802 : [err] "=r" (err)
16803 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
16804 : "memory");
16805 return err;
16806}
16807static inline __attribute__((always_inline)) void xrstor_state(struct xsave_struct *fx, u64 mask)
16808{
16809 u32 lmask = mask;
16810 u32 hmask = mask >> 32;
16811 asm volatile(".byte " "0x0f,0xae,0x2f\n\t"
16812 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
16813 : "memory");
16814}
16815static inline __attribute__((always_inline)) void xsave_state(struct xsave_struct *fx, u64 mask)
16816{
16817 u32 lmask = mask;
16818 u32 hmask = mask >> 32;
16819 asm volatile(".byte " "0x0f,0xae,0x27\n\t"
16820 : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
16821 : "memory");
16822}
16823static inline __attribute__((always_inline)) void fpu_xsave(struct fpu *fpu)
16824{
16825 asm volatile ("661:\n\t" ".byte " "0x0f,0xae,0x27" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(7*32+ 4)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" ".byte " "0x0f,0xae,0x37" "\n664:\n" ".previous" : : "i" (0), [fx] "D" (&fpu->state->xsave), "a" (-1), "d" (-1) : "memory")
16826 ;
16827}
16828extern unsigned int sig_xstate_size;
16829extern void fpu_init(void);
16830extern void mxcsr_feature_mask_init(void);
16831extern int init_fpu(struct task_struct *child);
16832extern __attribute__((regparm(0))) void math_state_restore(void);
16833extern void __math_state_restore(void);
16834extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
16835extern user_regset_active_fn fpregs_active, xfpregs_active;
16836extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
16837 xstateregs_get;
16838extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
16839 xstateregs_set;
16840extern struct _fpx_sw_bytes fx_sw_reserved;
16841static inline __attribute__((always_inline)) void finit_soft_fpu(struct i387_soft_struct *soft) {}
16842static inline __attribute__((always_inline)) __attribute__((always_inline)) __attribute__((pure)) bool use_xsaveopt(void)
16843{
16844 return ( __builtin_constant_p((__builtin_constant_p((7*32+ 4)) && ( ((((7*32+ 4))>>5)==0 && (1UL<<(((7*32+ 4))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((7*32+ 4))>>5)==1 && (1UL<<(((7*32+ 4))&31) & (0|0))) || ((((7*32+ 4))>>5)==2 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==3 && (1UL<<(((7*32+ 4))&31) & (0))) || ((((7*32+ 4))>>5)==4 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==5 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==6 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==7 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==8 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==9 && (1UL<<(((7*32+ 4))&31) & 0)) ) ? 1 : (__builtin_constant_p(((7*32+ 4))) ? constant_test_bit(((7*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((7*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))) ? (__builtin_constant_p((7*32+ 4)) && ( ((((7*32+ 4))>>5)==0 && (1UL<<(((7*32+ 4))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((7*32+ 4))>>5)==1 && (1UL<<(((7*32+ 4))&31) & (0|0))) || ((((7*32+ 4))>>5)==2 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==3 && (1UL<<(((7*32+ 4))&31) & (0))) || ((((7*32+ 4))>>5)==4 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==5 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==6 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==7 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==8 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==9 && (1UL<<(((7*32+ 4))&31) & 0)) ) ? 1 : (__builtin_constant_p(((7*32+ 4))) ? constant_test_bit(((7*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((7*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) : __builtin_constant_p((7*32+ 4)) ? __static_cpu_has((7*32+ 4)) : (__builtin_constant_p((7*32+ 4)) && ( ((((7*32+ 4))>>5)==0 && (1UL<<(((7*32+ 4))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((7*32+ 4))>>5)==1 && (1UL<<(((7*32+ 4))&31) & (0|0))) || ((((7*32+ 4))>>5)==2 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==3 && (1UL<<(((7*32+ 4))&31) & (0))) || ((((7*32+ 4))>>5)==4 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==5 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==6 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==7 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==8 && (1UL<<(((7*32+ 4))&31) & 0)) || ((((7*32+ 4))>>5)==9 && (1UL<<(((7*32+ 4))&31) & 0)) ) ? 1 : (__builtin_constant_p(((7*32+ 4))) ? constant_test_bit(((7*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((7*32+ 4)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) );
16845}
16846static inline __attribute__((always_inline)) __attribute__((always_inline)) __attribute__((pure)) bool use_xsave(void)
16847{
16848 return ( __builtin_constant_p((__builtin_constant_p((4*32+26)) && ( ((((4*32+26))>>5)==0 && (1UL<<(((4*32+26))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+26))>>5)==1 && (1UL<<(((4*32+26))&31) & (0|0))) || ((((4*32+26))>>5)==2 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==3 && (1UL<<(((4*32+26))&31) & (0))) || ((((4*32+26))>>5)==4 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==5 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==6 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==7 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==8 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==9 && (1UL<<(((4*32+26))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+26))) ? constant_test_bit(((4*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((4*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))) ? (__builtin_constant_p((4*32+26)) && ( ((((4*32+26))>>5)==0 && (1UL<<(((4*32+26))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+26))>>5)==1 && (1UL<<(((4*32+26))&31) & (0|0))) || ((((4*32+26))>>5)==2 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==3 && (1UL<<(((4*32+26))&31) & (0))) || ((((4*32+26))>>5)==4 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==5 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==6 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==7 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==8 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==9 && (1UL<<(((4*32+26))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+26))) ? constant_test_bit(((4*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((4*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) : __builtin_constant_p((4*32+26)) ? __static_cpu_has((4*32+26)) : (__builtin_constant_p((4*32+26)) && ( ((((4*32+26))>>5)==0 && (1UL<<(((4*32+26))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+26))>>5)==1 && (1UL<<(((4*32+26))&31) & (0|0))) || ((((4*32+26))>>5)==2 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==3 && (1UL<<(((4*32+26))&31) & (0))) || ((((4*32+26))>>5)==4 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==5 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==6 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==7 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==8 && (1UL<<(((4*32+26))&31) & 0)) || ((((4*32+26))>>5)==9 && (1UL<<(((4*32+26))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+26))) ? constant_test_bit(((4*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((4*32+26)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) );
16849}
16850static inline __attribute__((always_inline)) __attribute__((always_inline)) __attribute__((pure)) bool use_fxsr(void)
16851{
16852 return ( __builtin_constant_p((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))) ? (__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) : __builtin_constant_p((0*32+24)) ? __static_cpu_has((0*32+24)) : (__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))) );
16853}
16854extern void __sanitize_i387_state(struct task_struct *);
16855static inline __attribute__((always_inline)) void sanitize_i387_state(struct task_struct *tsk)
16856{
16857 if (__builtin_constant_p(((!use_xsaveopt()))) ? !!((!use_xsaveopt())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 85, }; ______r = !!((!use_xsaveopt())); ______f.miss_hit[______r]++; ______r; }))
16858 return;
16859 __sanitize_i387_state(tsk);
16860}
16861static inline __attribute__((always_inline)) int fxrstor_checking(struct i387_fxsave_struct *fx)
16862{
16863 asm volatile ("661:\n\t" "nop ; frstor %1" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(0*32+24)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "fxrstor %1" "\n664:\n" ".previous" : : "i" (0), "m" (*fx))
16864 ;
16865 return 0;
16866}
16867static inline __attribute__((always_inline)) void fpu_fxsave(struct fpu *fpu)
16868{
16869 asm volatile("fxsave %[fx]"
16870 : [fx] "=m" (fpu->state->fxsave));
16871}
16872static inline __attribute__((always_inline)) void fpu_save_init(struct fpu *fpu)
16873{
16874 if (__builtin_constant_p(((use_xsave()))) ? !!((use_xsave())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 229, }; ______r = !!((use_xsave())); ______f.miss_hit[______r]++; ______r; })) {
16875 fpu_xsave(fpu);
16876 if (__builtin_constant_p(((!(fpu->state->xsave.xsave_hdr.xstate_bv & 0x1)))) ? !!((!(fpu->state->xsave.xsave_hdr.xstate_bv & 0x1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 235, }; ______r = !!((!(fpu->state->xsave.xsave_hdr.xstate_bv & 0x1))); ______f.miss_hit[______r]++; ______r; }))
16877 return;
16878 } else if (__builtin_constant_p(((use_fxsr()))) ? !!((use_fxsr())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 237, }; ______r = !!((use_fxsr())); ______f.miss_hit[______r]++; ______r; })) {
16879 fpu_fxsave(fpu);
16880 } else {
16881 asm volatile("fnsave %[fx]; fwait"
16882 : [fx] "=m" (fpu->state->fsave));
16883 return;
16884 }
16885 if (__builtin_constant_p((((__builtin_constant_p(fpu->state->fxsave.swd & (1 << 7)) ? !!(fpu->state->fxsave.swd & (1 << 7)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 245, }; ______r = __builtin_expect(!!(fpu->state->fxsave.swd & (1 << 7)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(fpu->state->fxsave.swd & (1 << 7)) ? !!(fpu->state->fxsave.swd & (1 << 7)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 245, }; ______r = __builtin_expect(!!(fpu->state->fxsave.swd & (1 << 7)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 245, }; ______r = !!(((__builtin_constant_p(fpu->state->fxsave.swd & (1 << 7)) ? !!(fpu->state->fxsave.swd & (1 << 7)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 245, }; ______r = __builtin_expect(!!(fpu->state->fxsave.swd & (1 << 7)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
16886 asm volatile("fnclex");
16887 asm volatile ("661:\n\t" ".byte " "0x90,0x8d,0xb4,0x26,0x00,0x00,0x00,0x00" "\n" ".byte " "0x89,0xf6" "\n" "\n662:\n" ".section .altinstructions,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "661b\n" " " ".long" " " "663f\n" " .word " "(3*32+10)" "\n" " .byte 662b-661b\n" " .byte 664f-663f\n" ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (664f-663f) - (662b-661b)\n" ".previous\n" ".section .altinstr_replacement, \"ax\"\n" "663:\n\t" "emms\n\t" "fildl %P[addr]" "\n664:\n" ".previous" : : "i" (0), [addr] "m" ((__per_cpu_offset[0])))
16888 ;
16889}
16890static inline __attribute__((always_inline)) void __save_init_fpu(struct task_struct *tsk)
16891{
16892 fpu_save_init(&tsk->thread.fpu);
16893 ((struct thread_info *)(tsk)->stack)->status &= ~0x0001;
16894}
16895static inline __attribute__((always_inline)) int fpu_fxrstor_checking(struct fpu *fpu)
16896{
16897 return fxrstor_checking(&fpu->state->fxsave);
16898}
16899static inline __attribute__((always_inline)) int fpu_restore_checking(struct fpu *fpu)
16900{
16901 if (__builtin_constant_p(((use_xsave()))) ? !!((use_xsave())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 272, }; ______r = !!((use_xsave())); ______f.miss_hit[______r]++; ______r; }))
16902 return fpu_xrstor_checking(fpu);
16903 else
16904 return fpu_fxrstor_checking(fpu);
16905}
16906static inline __attribute__((always_inline)) int restore_fpu_checking(struct task_struct *tsk)
16907{
16908 return fpu_restore_checking(&tsk->thread.fpu);
16909}
16910extern int save_i387_xstate(void *buf);
16911extern int restore_i387_xstate(void *buf);
16912static inline __attribute__((always_inline)) void __unlazy_fpu(struct task_struct *tsk)
16913{
16914 if (__builtin_constant_p(((((struct thread_info *)(tsk)->stack)->status & 0x0001))) ? !!((((struct thread_info *)(tsk)->stack)->status & 0x0001)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 291, }; ______r = !!((((struct thread_info *)(tsk)->stack)->status & 0x0001)); ______f.miss_hit[______r]++; ______r; })) {
16915 __save_init_fpu(tsk);
16916 write_cr0(read_cr0() | 0x00000008);
16917 } else
16918 tsk->fpu_counter = 0;
16919}
16920static inline __attribute__((always_inline)) void __clear_fpu(struct task_struct *tsk)
16921{
16922 if (__builtin_constant_p(((((struct thread_info *)(tsk)->stack)->status & 0x0001))) ? !!((((struct thread_info *)(tsk)->stack)->status & 0x0001)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 300, }; ______r = !!((((struct thread_info *)(tsk)->stack)->status & 0x0001)); ______f.miss_hit[______r]++; ______r; })) {
16923 asm volatile("1: fwait\n"
16924 "2:\n"
16925 " .section __ex_table,\"a\"\n" " " ".balign 4" " " "\n" " " ".long" " " "1b" "," "2b" "\n" " .previous\n");
16926 ((struct thread_info *)(tsk)->stack)->status &= ~0x0001;
16927 write_cr0(read_cr0() | 0x00000008);
16928 }
16929}
16930static inline __attribute__((always_inline)) void kernel_fpu_begin(void)
16931{
16932 struct thread_info *me = current_thread_info();
16933 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
16934 if (__builtin_constant_p(((me->status & 0x0001))) ? !!((me->status & 0x0001)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 314, }; ______r = !!((me->status & 0x0001)); ______f.miss_hit[______r]++; ______r; }))
16935 __save_init_fpu(me->task);
16936 else
16937 clts();
16938}
16939static inline __attribute__((always_inline)) void kernel_fpu_end(void)
16940{
16941 write_cr0(read_cr0() | 0x00000008);
16942 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 323, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 323, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 323, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 323, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
16943}
16944static inline __attribute__((always_inline)) bool irq_fpu_usable(void)
16945{
16946 struct pt_regs *regs;
16947 return !(((current_thread_info()->preempt_count) & ((((1UL << (10))-1) << ((0 + 8) + 8)) | (((1UL << (8))-1) << (0 + 8)) | (((1UL << (1))-1) << (((0 + 8) + 8) + 10))))) || !(regs = get_irq_regs()) ||
16948 user_mode(regs) || (read_cr0() & 0x00000008);
16949}
16950static inline __attribute__((always_inline)) int irq_ts_save(void)
16951{
16952 if (__builtin_constant_p(((!(((current_thread_info()->preempt_count) & ~0x10000000) != 0)))) ? !!((!(((current_thread_info()->preempt_count) & ~0x10000000) != 0))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 348, }; ______r = !!((!(((current_thread_info()->preempt_count) & ~0x10000000) != 0))); ______f.miss_hit[______r]++; ______r; }))
16953 return 0;
16954 if (__builtin_constant_p(((read_cr0() & 0x00000008))) ? !!((read_cr0() & 0x00000008)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 351, }; ______r = !!((read_cr0() & 0x00000008)); ______f.miss_hit[______r]++; ______r; })) {
16955 clts();
16956 return 1;
16957 }
16958 return 0;
16959}
16960static inline __attribute__((always_inline)) void irq_ts_restore(int TS_state)
16961{
16962 if (__builtin_constant_p(((TS_state))) ? !!((TS_state)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 361, }; ______r = !!((TS_state)); ______f.miss_hit[______r]++; ______r; }))
16963 write_cr0(read_cr0() | 0x00000008);
16964}
16965static inline __attribute__((always_inline)) void save_init_fpu(struct task_struct *tsk)
16966{
16967 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
16968 __save_init_fpu(tsk);
16969 write_cr0(read_cr0() | 0x00000008);
16970 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 373, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 373, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 373, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 373, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
16971}
16972static inline __attribute__((always_inline)) void unlazy_fpu(struct task_struct *tsk)
16973{
16974 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
16975 __unlazy_fpu(tsk);
16976 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 380, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 380, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 380, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 380, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
16977}
16978static inline __attribute__((always_inline)) void clear_fpu(struct task_struct *tsk)
16979{
16980 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
16981 __clear_fpu(tsk);
16982 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 387, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 387, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 387, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 387, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
16983}
16984static inline __attribute__((always_inline)) unsigned short get_fpu_cwd(struct task_struct *tsk)
16985{
16986 if (__builtin_constant_p((((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))))) ? !!(((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 395, }; ______r = !!(((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; })) {
16987 return tsk->thread.fpu.state->fxsave.cwd;
16988 } else {
16989 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
16990 }
16991}
16992static inline __attribute__((always_inline)) unsigned short get_fpu_swd(struct task_struct *tsk)
16993{
16994 if (__builtin_constant_p((((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))))) ? !!(((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 404, }; ______r = !!(((__builtin_constant_p((0*32+24)) && ( ((((0*32+24))>>5)==0 && (1UL<<(((0*32+24))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+24))>>5)==1 && (1UL<<(((0*32+24))&31) & (0|0))) || ((((0*32+24))>>5)==2 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==3 && (1UL<<(((0*32+24))&31) & (0))) || ((((0*32+24))>>5)==4 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==5 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==6 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==7 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==8 && (1UL<<(((0*32+24))&31) & 0)) || ((((0*32+24))>>5)==9 && (1UL<<(((0*32+24))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+24))) ? constant_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+24)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; })) {
16995 return tsk->thread.fpu.state->fxsave.swd;
16996 } else {
16997 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
16998 }
16999}
17000static inline __attribute__((always_inline)) unsigned short get_fpu_mxcsr(struct task_struct *tsk)
17001{
17002 if (__builtin_constant_p((((__builtin_constant_p((0*32+25)) && ( ((((0*32+25))>>5)==0 && (1UL<<(((0*32+25))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+25))>>5)==1 && (1UL<<(((0*32+25))&31) & (0|0))) || ((((0*32+25))>>5)==2 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==3 && (1UL<<(((0*32+25))&31) & (0))) || ((((0*32+25))>>5)==4 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==5 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==6 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==7 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==8 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==9 && (1UL<<(((0*32+25))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+25))) ? constant_test_bit(((0*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))))) ? !!(((__builtin_constant_p((0*32+25)) && ( ((((0*32+25))>>5)==0 && (1UL<<(((0*32+25))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+25))>>5)==1 && (1UL<<(((0*32+25))&31) & (0|0))) || ((((0*32+25))>>5)==2 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==3 && (1UL<<(((0*32+25))&31) & (0))) || ((((0*32+25))>>5)==4 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==5 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==6 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==7 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==8 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==9 && (1UL<<(((0*32+25))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+25))) ? constant_test_bit(((0*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 413, }; ______r = !!(((__builtin_constant_p((0*32+25)) && ( ((((0*32+25))>>5)==0 && (1UL<<(((0*32+25))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+25))>>5)==1 && (1UL<<(((0*32+25))&31) & (0|0))) || ((((0*32+25))>>5)==2 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==3 && (1UL<<(((0*32+25))&31) & (0))) || ((((0*32+25))>>5)==4 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==5 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==6 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==7 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==8 && (1UL<<(((0*32+25))&31) & 0)) || ((((0*32+25))>>5)==9 && (1UL<<(((0*32+25))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+25))) ? constant_test_bit(((0*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; })) {
17003 return tsk->thread.fpu.state->fxsave.mxcsr;
17004 } else {
17005 return 0x1f80;
17006 }
17007}
17008static bool fpu_allocated(struct fpu *fpu)
17009{
17010 return fpu->state != ((void *)0);
17011}
17012static inline __attribute__((always_inline)) int fpu_alloc(struct fpu *fpu)
17013{
17014 if (__builtin_constant_p(((fpu_allocated(fpu)))) ? !!((fpu_allocated(fpu))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 427, }; ______r = !!((fpu_allocated(fpu))); ______f.miss_hit[______r]++; ______r; }))
17015 return 0;
17016 fpu->state = kmem_cache_alloc(task_xstate_cachep, ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u)));
17017 if (__builtin_constant_p(((!fpu->state))) ? !!((!fpu->state)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 430, }; ______r = !!((!fpu->state)); ______f.miss_hit[______r]++; ______r; }))
17018 return -12;
17019 ({ int __ret_warn_on = !!((unsigned long)fpu->state & 15); if (__builtin_constant_p((((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 432, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 432, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 432, }; ______r = !!(((__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 432, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) warn_slowpath_null("/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", 432); (__builtin_constant_p(__ret_warn_on) ? !!(__ret_warn_on) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 432, }; ______r = __builtin_expect(!!(__ret_warn_on), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })); });
17020 return 0;
17021}
17022static inline __attribute__((always_inline)) void fpu_free(struct fpu *fpu)
17023{
17024 if (__builtin_constant_p(((fpu->state))) ? !!((fpu->state)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/i387.h", .line = 438, }; ______r = !!((fpu->state)); ______f.miss_hit[______r]++; ______r; })) {
17025 kmem_cache_free(task_xstate_cachep, fpu->state);
17026 fpu->state = ((void *)0);
17027 }
17028}
17029static inline __attribute__((always_inline)) void fpu_copy(struct fpu *dst, struct fpu *src)
17030{
17031 __builtin_memcpy(dst->state, src->state, xstate_size);
17032}
17033extern void fpu_finit(struct fpu *fpu);
17034void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
17035 const u8 *src);
17036void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
17037 const u8 *src);
17038struct bio_set;
17039struct bio;
17040struct bio_integrity_payload;
17041struct page;
17042struct block_device;
17043typedef void (bio_end_io_t) (struct bio *, int);
17044typedef void (bio_destructor_t) (struct bio *);
17045struct bio_vec {
17046 struct page *bv_page;
17047 unsigned int bv_len;
17048 unsigned int bv_offset;
17049};
17050struct bio {
17051 sector_t bi_sector;
17052 struct bio *bi_next;
17053 struct block_device *bi_bdev;
17054 unsigned long bi_flags;
17055 unsigned long bi_rw;
17056 unsigned short bi_vcnt;
17057 unsigned short bi_idx;
17058 unsigned int bi_phys_segments;
17059 unsigned int bi_size;
17060 unsigned int bi_seg_front_size;
17061 unsigned int bi_seg_back_size;
17062 unsigned int bi_max_vecs;
17063 unsigned int bi_comp_cpu;
17064 atomic_t bi_cnt;
17065 struct bio_vec *bi_io_vec;
17066 bio_end_io_t *bi_end_io;
17067 void *bi_private;
17068 bio_destructor_t *bi_destructor;
17069 struct bio_vec bi_inline_vecs[0];
17070};
17071enum rq_flag_bits {
17072 __REQ_WRITE,
17073 __REQ_FAILFAST_DEV,
17074 __REQ_FAILFAST_TRANSPORT,
17075 __REQ_FAILFAST_DRIVER,
17076 __REQ_SYNC,
17077 __REQ_META,
17078 __REQ_DISCARD,
17079 __REQ_NOIDLE,
17080 __REQ_RAHEAD,
17081 __REQ_THROTTLED,
17082 __REQ_SORTED,
17083 __REQ_SOFTBARRIER,
17084 __REQ_FUA,
17085 __REQ_NOMERGE,
17086 __REQ_STARTED,
17087 __REQ_DONTPREP,
17088 __REQ_QUEUED,
17089 __REQ_ELVPRIV,
17090 __REQ_FAILED,
17091 __REQ_QUIET,
17092 __REQ_PREEMPT,
17093 __REQ_ALLOCED,
17094 __REQ_COPY_USER,
17095 __REQ_FLUSH,
17096 __REQ_FLUSH_SEQ,
17097 __REQ_IO_STAT,
17098 __REQ_MIXED_MERGE,
17099 __REQ_SECURE,
17100 __REQ_NR_BITS,
17101};
17102struct fstrim_range {
17103 __u64 start;
17104 __u64 len;
17105 __u64 minlen;
17106};
17107struct files_stat_struct {
17108 unsigned long nr_files;
17109 unsigned long nr_free_files;
17110 unsigned long max_files;
17111};
17112struct inodes_stat_t {
17113 int nr_inodes;
17114 int nr_unused;
17115 int dummy[5];
17116};
17117static inline __attribute__((always_inline)) int old_valid_dev(dev_t dev)
17118{
17119 return ((unsigned int) ((dev) >> 20)) < 256 && ((unsigned int) ((dev) & ((1U << 20) - 1))) < 256;
17120}
17121static inline __attribute__((always_inline)) u16 old_encode_dev(dev_t dev)
17122{
17123 return (((unsigned int) ((dev) >> 20)) << 8) | ((unsigned int) ((dev) & ((1U << 20) - 1)));
17124}
17125static inline __attribute__((always_inline)) dev_t old_decode_dev(u16 val)
17126{
17127 return ((((val >> 8) & 255) << 20) | (val & 255));
17128}
17129static inline __attribute__((always_inline)) int new_valid_dev(dev_t dev)
17130{
17131 return 1;
17132}
17133static inline __attribute__((always_inline)) u32 new_encode_dev(dev_t dev)
17134{
17135 unsigned major = ((unsigned int) ((dev) >> 20));
17136 unsigned minor = ((unsigned int) ((dev) & ((1U << 20) - 1)));
17137 return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
17138}
17139static inline __attribute__((always_inline)) dev_t new_decode_dev(u32 dev)
17140{
17141 unsigned major = (dev & 0xfff00) >> 8;
17142 unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
17143 return (((major) << 20) | (minor));
17144}
17145static inline __attribute__((always_inline)) int huge_valid_dev(dev_t dev)
17146{
17147 return 1;
17148}
17149static inline __attribute__((always_inline)) u64 huge_encode_dev(dev_t dev)
17150{
17151 return new_encode_dev(dev);
17152}
17153static inline __attribute__((always_inline)) dev_t huge_decode_dev(u64 dev)
17154{
17155 return new_decode_dev(dev);
17156}
17157static inline __attribute__((always_inline)) int sysv_valid_dev(dev_t dev)
17158{
17159 return ((unsigned int) ((dev) >> 20)) < (1<<14) && ((unsigned int) ((dev) & ((1U << 20) - 1))) < (1<<18);
17160}
17161static inline __attribute__((always_inline)) u32 sysv_encode_dev(dev_t dev)
17162{
17163 return ((unsigned int) ((dev) & ((1U << 20) - 1))) | (((unsigned int) ((dev) >> 20)) << 18);
17164}
17165static inline __attribute__((always_inline)) unsigned sysv_major(u32 dev)
17166{
17167 return (dev >> 18) & 0x3fff;
17168}
17169static inline __attribute__((always_inline)) unsigned sysv_minor(u32 dev)
17170{
17171 return dev & 0x3ffff;
17172}
17173static inline __attribute__((always_inline)) void bit_spin_lock(int bitnum, unsigned long *addr)
17174{
17175 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
17176 while ((__builtin_constant_p(test_and_set_bit_lock(bitnum, addr)) ? !!(test_and_set_bit_lock(bitnum, addr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 25, }; ______r = __builtin_expect(!!(test_and_set_bit_lock(bitnum, addr)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))) {
17177 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 26, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 26, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 26, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 26, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
17178 do {
17179 cpu_relax();
17180 } while ((__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr))));
17181 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
17182 }
17183 (void)0;
17184}
17185static inline __attribute__((always_inline)) int bit_spin_trylock(int bitnum, unsigned long *addr)
17186{
17187 do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0);
17188 if (__builtin_constant_p((((__builtin_constant_p(test_and_set_bit_lock(bitnum, addr)) ? !!(test_and_set_bit_lock(bitnum, addr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 43, }; ______r = __builtin_expect(!!(test_and_set_bit_lock(bitnum, addr)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_and_set_bit_lock(bitnum, addr)) ? !!(test_and_set_bit_lock(bitnum, addr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 43, }; ______r = __builtin_expect(!!(test_and_set_bit_lock(bitnum, addr)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 43, }; ______r = !!(((__builtin_constant_p(test_and_set_bit_lock(bitnum, addr)) ? !!(test_and_set_bit_lock(bitnum, addr)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 43, }; ______r = __builtin_expect(!!(test_and_set_bit_lock(bitnum, addr)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) {
17189 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 44, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 44, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 44, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 44, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
17190 return 0;
17191 }
17192 (void)0;
17193 return 1;
17194}
17195static inline __attribute__((always_inline)) void bit_spin_unlock(int bitnum, unsigned long *addr)
17196{
17197 do { if (__builtin_constant_p((((__builtin_constant_p(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) ? !!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 58, }; ______r = __builtin_expect(!!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) ? !!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 58, }; ______r = __builtin_expect(!!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 58, }; ______r = !!(((__builtin_constant_p(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) ? !!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 58, }; ______r = __builtin_expect(!!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/bit_spinlock.h"), "i" (58), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
17198 clear_bit_unlock(bitnum, addr);
17199 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 63, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 63, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 63, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 63, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
17200 (void)0;
17201}
17202static inline __attribute__((always_inline)) void __bit_spin_unlock(int bitnum, unsigned long *addr)
17203{
17204 do { if (__builtin_constant_p((((__builtin_constant_p(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) ? !!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 75, }; ______r = __builtin_expect(!!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) ? !!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 75, }; ______r = __builtin_expect(!!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 75, }; ______r = !!(((__builtin_constant_p(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) ? !!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 75, }; ______r = __builtin_expect(!!(!(__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)))), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/bit_spinlock.h"), "i" (75), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
17205 __clear_bit_unlock(bitnum, addr);
17206 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 80, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 80, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 80, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/bit_spinlock.h", .line = 80, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
17207 (void)0;
17208}
17209static inline __attribute__((always_inline)) int bit_spin_is_locked(int bitnum, unsigned long *addr)
17210{
17211 return (__builtin_constant_p((bitnum)) ? constant_test_bit((bitnum), (addr)) : variable_test_bit((bitnum), (addr)));
17212}
17213struct hlist_bl_head {
17214 struct hlist_bl_node *first;
17215};
17216struct hlist_bl_node {
17217 struct hlist_bl_node *next, **pprev;
17218};
17219static inline __attribute__((always_inline)) void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
17220{
17221 h->next = ((void *)0);
17222 h->pprev = ((void *)0);
17223}
17224static inline __attribute__((always_inline)) int hlist_bl_unhashed(const struct hlist_bl_node *h)
17225{
17226 return !h->pprev;
17227}
17228static inline __attribute__((always_inline)) struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h)
17229{
17230 return (struct hlist_bl_node *)
17231 ((unsigned long)h->first & ~1UL);
17232}
17233static inline __attribute__((always_inline)) void hlist_bl_set_first(struct hlist_bl_head *h,
17234 struct hlist_bl_node *n)
17235{
17236 ;
17237 ;
17238 h->first = (struct hlist_bl_node *)((unsigned long)n | 1UL);
17239}
17240static inline __attribute__((always_inline)) int hlist_bl_empty(const struct hlist_bl_head *h)
17241{
17242 return !((unsigned long)h->first & ~1UL);
17243}
17244static inline __attribute__((always_inline)) void hlist_bl_add_head(struct hlist_bl_node *n,
17245 struct hlist_bl_head *h)
17246{
17247 struct hlist_bl_node *first = hlist_bl_first(h);
17248 n->next = first;
17249 if (__builtin_constant_p(((first))) ? !!((first)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list_bl.h", .line = 82, }; ______r = !!((first)); ______f.miss_hit[______r]++; ______r; }))
17250 first->pprev = &n->next;
17251 n->pprev = &h->first;
17252 hlist_bl_set_first(h, n);
17253}
17254static inline __attribute__((always_inline)) void __hlist_bl_del(struct hlist_bl_node *n)
17255{
17256 struct hlist_bl_node *next = n->next;
17257 struct hlist_bl_node **pprev = n->pprev;
17258 ;
17259 *pprev = (struct hlist_bl_node *)
17260 ((unsigned long)next |
17261 ((unsigned long)*pprev & 1UL));
17262 if (__builtin_constant_p(((next))) ? !!((next)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list_bl.h", .line = 99, }; ______r = !!((next)); ______f.miss_hit[______r]++; ______r; }))
17263 next->pprev = pprev;
17264}
17265static inline __attribute__((always_inline)) void hlist_bl_del(struct hlist_bl_node *n)
17266{
17267 __hlist_bl_del(n);
17268 n->next = ((void *) 0x00100100 + (0x0UL));
17269 n->pprev = ((void *) 0x00200200 + (0x0UL));
17270}
17271static inline __attribute__((always_inline)) void hlist_bl_del_init(struct hlist_bl_node *n)
17272{
17273 if (__builtin_constant_p(((!hlist_bl_unhashed(n)))) ? !!((!hlist_bl_unhashed(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/list_bl.h", .line = 112, }; ______r = !!((!hlist_bl_unhashed(n))); ______f.miss_hit[______r]++; ______r; })) {
17274 __hlist_bl_del(n);
17275 INIT_HLIST_BL_NODE(n);
17276 }
17277}
17278static inline __attribute__((always_inline)) void hlist_bl_lock(struct hlist_bl_head *b)
17279{
17280 bit_spin_lock(0, (unsigned long *)b);
17281}
17282static inline __attribute__((always_inline)) void hlist_bl_unlock(struct hlist_bl_head *b)
17283{
17284 __bit_spin_unlock(0, (unsigned long *)b);
17285}
17286static inline __attribute__((always_inline)) void hlist_bl_set_first_rcu(struct hlist_bl_head *h,
17287 struct hlist_bl_node *n)
17288{
17289 ;
17290 ;
17291 ({ if (__builtin_constant_p(((!__builtin_constant_p(((struct hlist_bl_node *)((unsigned long)n | 1UL))) || ((((struct hlist_bl_node *)((unsigned long)n | 1UL))) != ((void *)0))))) ? !!((!__builtin_constant_p(((struct hlist_bl_node *)((unsigned long)n | 1UL))) || ((((struct hlist_bl_node *)((unsigned long)n | 1UL))) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
17292 "include/linux/rculist_bl.h"
17293 , .line =
17294 17
17295 , }; ______r = !!((!__builtin_constant_p(((struct hlist_bl_node *)((unsigned long)n | 1UL))) || ((((struct hlist_bl_node *)((unsigned long)n | 1UL))) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); ((h->first)) = (typeof(*((struct hlist_bl_node *)((unsigned long)n | 1UL))) *)(((struct hlist_bl_node *)((unsigned long)n | 1UL))); })
17296 ;
17297}
17298static inline __attribute__((always_inline)) struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h)
17299{
17300 return (struct hlist_bl_node *)
17301 ((unsigned long)({ typeof(*(h->first)) *_________p1 = (typeof(*(h->first))* )(*(volatile typeof((h->first)) *)&((h->first))); do { } while (0); ; do { } while (0); ((typeof(*(h->first)) *)(_________p1)); }) & ~1UL);
17302}
17303static inline __attribute__((always_inline)) void hlist_bl_del_init_rcu(struct hlist_bl_node *n)
17304{
17305 if (__builtin_constant_p(((!hlist_bl_unhashed(n)))) ? !!((!hlist_bl_unhashed(n))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist_bl.h", .line = 48, }; ______r = !!((!hlist_bl_unhashed(n))); ______f.miss_hit[______r]++; ______r; })) {
17306 __hlist_bl_del(n);
17307 n->pprev = ((void *)0);
17308 }
17309}
17310static inline __attribute__((always_inline)) void hlist_bl_del_rcu(struct hlist_bl_node *n)
17311{
17312 __hlist_bl_del(n);
17313 n->pprev = ((void *) 0x00200200 + (0x0UL));
17314}
17315static inline __attribute__((always_inline)) void hlist_bl_add_head_rcu(struct hlist_bl_node *n,
17316 struct hlist_bl_head *h)
17317{
17318 struct hlist_bl_node *first;
17319 first = hlist_bl_first(h);
17320 n->next = first;
17321 if (__builtin_constant_p(((first))) ? !!((first)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/rculist_bl.h", .line = 107, }; ______r = !!((first)); ______f.miss_hit[______r]++; ______r; }))
17322 first->pprev = &n->next;
17323 n->pprev = &h->first;
17324 hlist_bl_set_first_rcu(h, n);
17325}
17326struct nameidata;
17327struct path;
17328struct vfsmount;
17329struct qstr {
17330 unsigned int hash;
17331 unsigned int len;
17332 const unsigned char *name;
17333};
17334struct dentry_stat_t {
17335 int nr_dentry;
17336 int nr_unused;
17337 int age_limit;
17338 int want_pages;
17339 int dummy[2];
17340};
17341extern struct dentry_stat_t dentry_stat;
17342static inline __attribute__((always_inline)) int dentry_cmp(const unsigned char *cs, size_t scount,
17343 const unsigned char *ct, size_t tcount)
17344{
17345 int ret;
17346 if (__builtin_constant_p(((scount != tcount))) ? !!((scount != tcount)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/dcache.h", .line = 58, }; ______r = !!((scount != tcount)); ______f.miss_hit[______r]++; ______r; }))
17347 return 1;
17348 do {
17349 ret = (*cs != *ct);
17350 if (__builtin_constant_p(((ret))) ? !!((ret)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/dcache.h", .line = 62, }; ______r = !!((ret)); ______f.miss_hit[______r]++; ______r; }))
17351 break;
17352 cs++;
17353 ct++;
17354 tcount--;
17355 } while (tcount);
17356 return ret;
17357}
17358static inline __attribute__((always_inline)) unsigned long
17359partial_name_hash(unsigned long c, unsigned long prevhash)
17360{
17361 return (prevhash + (c << 4) + (c >> 4)) * 11;
17362}
17363static inline __attribute__((always_inline)) unsigned long end_name_hash(unsigned long hash)
17364{
17365 return (unsigned int) hash;
17366}
17367static inline __attribute__((always_inline)) unsigned int
17368full_name_hash(const unsigned char *name, unsigned int len)
17369{
17370 unsigned long hash = 0;
17371 while (len--)
17372 hash = partial_name_hash(*name++, hash);
17373 return end_name_hash(hash);
17374}
17375struct dentry {
17376 unsigned int d_flags;
17377 seqcount_t d_seq;
17378 struct hlist_bl_node d_hash;
17379 struct dentry *d_parent;
17380 struct qstr d_name;
17381 struct inode *d_inode;
17382 unsigned char d_iname[36];
17383 unsigned int d_count;
17384 spinlock_t d_lock;
17385 const struct dentry_operations *d_op;
17386 struct super_block *d_sb;
17387 unsigned long d_time;
17388 void *d_fsdata;
17389 struct list_head d_lru;
17390 union {
17391 struct list_head d_child;
17392 struct rcu_head d_rcu;
17393 } d_u;
17394 struct list_head d_subdirs;
17395 struct list_head d_alias;
17396};
17397enum dentry_d_lock_class
17398{
17399 DENTRY_D_LOCK_NORMAL,
17400 DENTRY_D_LOCK_NESTED
17401};
17402struct dentry_operations {
17403 int (*d_revalidate)(struct dentry *, struct nameidata *);
17404 int (*d_hash)(const struct dentry *, const struct inode *,
17405 struct qstr *);
17406 int (*d_compare)(const struct dentry *, const struct inode *,
17407 const struct dentry *, const struct inode *,
17408 unsigned int, const char *, const struct qstr *);
17409 int (*d_delete)(const struct dentry *);
17410 void (*d_release)(struct dentry *);
17411 void (*d_iput)(struct dentry *, struct inode *);
17412 char *(*d_dname)(struct dentry *, char *, int);
17413 struct vfsmount *(*d_automount)(struct path *);
17414 int (*d_manage)(struct dentry *, bool);
17415} __attribute__((__aligned__((1 << (6)))));
17416extern seqlock_t rename_lock;
17417static inline __attribute__((always_inline)) int dname_external(struct dentry *dentry)
17418{
17419 return dentry->d_name.name != dentry->d_iname;
17420}
17421extern void d_instantiate(struct dentry *, struct inode *);
17422extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
17423extern struct dentry * d_materialise_unique(struct dentry *, struct inode *);
17424extern void __d_drop(struct dentry *dentry);
17425extern void d_drop(struct dentry *dentry);
17426extern void d_delete(struct dentry *);
17427extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op);
17428extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
17429extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
17430extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
17431extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
17432extern struct dentry * d_obtain_alias(struct inode *);
17433extern void shrink_dcache_sb(struct super_block *);
17434extern void shrink_dcache_parent(struct dentry *);
17435extern void shrink_dcache_for_umount(struct super_block *);
17436extern int d_invalidate(struct dentry *);
17437extern struct dentry * d_alloc_root(struct inode *);
17438extern void d_genocide(struct dentry *);
17439extern struct dentry *d_find_alias(struct inode *);
17440extern void d_prune_aliases(struct inode *);
17441extern int have_submounts(struct dentry *);
17442extern void d_rehash(struct dentry *);
17443static inline __attribute__((always_inline)) void d_add(struct dentry *entry, struct inode *inode)
17444{
17445 d_instantiate(entry, inode);
17446 d_rehash(entry);
17447}
17448static inline __attribute__((always_inline)) struct dentry *d_add_unique(struct dentry *entry, struct inode *inode)
17449{
17450 struct dentry *res;
17451 res = d_instantiate_unique(entry, inode);
17452 d_rehash(res != ((void *)0) ? res : entry);
17453 return res;
17454}
17455extern void dentry_update_name_case(struct dentry *, struct qstr *);
17456extern void d_move(struct dentry *, struct dentry *);
17457extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
17458extern struct dentry *d_lookup(struct dentry *, struct qstr *);
17459extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *);
17460extern struct dentry *__d_lookup(struct dentry *, struct qstr *);
17461extern struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
17462 unsigned *seq, struct inode **inode);
17463static inline __attribute__((always_inline)) int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
17464{
17465 int ret = 0;
17466 assert_spin_locked(&dentry->d_lock);
17467 if (__builtin_constant_p(((!read_seqcount_retry(&dentry->d_seq, seq)))) ? !!((!read_seqcount_retry(&dentry->d_seq, seq))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/dcache.h", .line = 327, }; ______r = !!((!read_seqcount_retry(&dentry->d_seq, seq))); ______f.miss_hit[______r]++; ______r; })) {
17468 ret = 1;
17469 dentry->d_count++;
17470 }
17471 return ret;
17472}
17473extern int d_validate(struct dentry *, struct dentry *);
17474extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
17475extern char *__d_path(const struct path *path, struct path *root, char *, int);
17476extern char *d_path(const struct path *, char *, int);
17477extern char *d_path_with_unreachable(const struct path *, char *, int);
17478extern char *dentry_path_raw(struct dentry *, char *, int);
17479extern char *dentry_path(struct dentry *, char *, int);
17480static inline __attribute__((always_inline)) struct dentry *dget_dlock(struct dentry *dentry)
17481{
17482 if (__builtin_constant_p(((dentry))) ? !!((dentry)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/dcache.h", .line = 361, }; ______r = !!((dentry)); ______f.miss_hit[______r]++; ______r; }))
17483 dentry->d_count++;
17484 return dentry;
17485}
17486static inline __attribute__((always_inline)) struct dentry *dget(struct dentry *dentry)
17487{
17488 if (__builtin_constant_p(((dentry))) ? !!((dentry)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/dcache.h", .line = 368, }; ______r = !!((dentry)); ______f.miss_hit[______r]++; ______r; })) {
17489 spin_lock(&dentry->d_lock);
17490 dget_dlock(dentry);
17491 spin_unlock(&dentry->d_lock);
17492 }
17493 return dentry;
17494}
17495extern struct dentry *dget_parent(struct dentry *dentry);
17496static inline __attribute__((always_inline)) int d_unhashed(struct dentry *dentry)
17497{
17498 return hlist_bl_unhashed(&dentry->d_hash);
17499}
17500static inline __attribute__((always_inline)) int d_unlinked(struct dentry *dentry)
17501{
17502 return d_unhashed(dentry) && !((dentry) == (dentry)->d_parent);
17503}
17504static inline __attribute__((always_inline)) int cant_mount(struct dentry *dentry)
17505{
17506 return (dentry->d_flags & 0x0100);
17507}
17508static inline __attribute__((always_inline)) void dont_mount(struct dentry *dentry)
17509{
17510 spin_lock(&dentry->d_lock);
17511 dentry->d_flags |= 0x0100;
17512 spin_unlock(&dentry->d_lock);
17513}
17514extern void dput(struct dentry *);
17515static inline __attribute__((always_inline)) bool d_managed(struct dentry *dentry)
17516{
17517 return dentry->d_flags & (0x10000|0x20000|0x40000);
17518}
17519static inline __attribute__((always_inline)) bool d_mountpoint(struct dentry *dentry)
17520{
17521 return dentry->d_flags & 0x10000;
17522}
17523extern struct dentry *lookup_create(struct nameidata *nd, int is_dir);
17524extern int sysctl_vfs_cache_pressure;
17525struct dentry;
17526struct vfsmount;
17527struct path {
17528 struct vfsmount *mnt;
17529 struct dentry *dentry;
17530};
17531extern void path_get(struct path *);
17532extern void path_put(struct path *);
17533static inline __attribute__((always_inline)) int path_equal(const struct path *path1, const struct path *path2)
17534{
17535 return path1->mnt == path2->mnt && path1->dentry == path2->dentry;
17536}
17537static inline __attribute__((always_inline)) int radix_tree_is_indirect_ptr(void *ptr)
17538{
17539 return (int)((unsigned long)ptr & 1);
17540}
17541struct radix_tree_root {
17542 unsigned int height;
17543 gfp_t gfp_mask;
17544 struct radix_tree_node *rnode;
17545};
17546static inline __attribute__((always_inline)) void *radix_tree_deref_slot(void **pslot)
17547{
17548 return ({ typeof(*(*pslot)) *_________p1 = (typeof(*(*pslot))* )(*(volatile typeof((*pslot)) *)&((*pslot))); do { } while (0); ; do { } while (0); ((typeof(*(*pslot)) *)(_________p1)); });
17549}
17550static inline __attribute__((always_inline)) void *radix_tree_deref_slot_protected(void **pslot,
17551 spinlock_t *treelock)
17552{
17553 return ({ do { } while (0); ; ((typeof(*(*pslot)) *)((*pslot))); });
17554}
17555static inline __attribute__((always_inline)) int radix_tree_deref_retry(void *arg)
17556{
17557 return (__builtin_constant_p((unsigned long)arg & 1) ? !!((unsigned long)arg & 1) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 173, }; ______r = __builtin_expect(!!((unsigned long)arg & 1), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }));
17558}
17559static inline __attribute__((always_inline)) void radix_tree_replace_slot(void **pslot, void *item)
17560{
17561 do { if (__builtin_constant_p((((__builtin_constant_p(radix_tree_is_indirect_ptr(item)) ? !!(radix_tree_is_indirect_ptr(item)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 186, }; ______r = __builtin_expect(!!(radix_tree_is_indirect_ptr(item)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(radix_tree_is_indirect_ptr(item)) ? !!(radix_tree_is_indirect_ptr(item)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 186, }; ______r = __builtin_expect(!!(radix_tree_is_indirect_ptr(item)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 186, }; ______r = !!(((__builtin_constant_p(radix_tree_is_indirect_ptr(item)) ? !!(radix_tree_is_indirect_ptr(item)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 186, }; ______r = __builtin_expect(!!(radix_tree_is_indirect_ptr(item)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/radix-tree.h"), "i" (186), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
17562 ({ if (__builtin_constant_p(((!__builtin_constant_p((item)) || (((item)) != ((void *)0))))) ? !!((!__builtin_constant_p((item)) || (((item)) != ((void *)0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 187, }; ______r = !!((!__builtin_constant_p((item)) || (((item)) != ((void *)0)))); ______f.miss_hit[______r]++; ______r; })) __asm__ __volatile__("": : :"memory"); ((*pslot)) = (typeof(*(item)) *)((item)); });
17563}
17564int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
17565void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
17566void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
17567void *radix_tree_delete(struct radix_tree_root *, unsigned long);
17568unsigned int
17569radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
17570 unsigned long first_index, unsigned int max_items);
17571unsigned int
17572radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
17573 unsigned long first_index, unsigned int max_items);
17574unsigned long radix_tree_next_hole(struct radix_tree_root *root,
17575 unsigned long index, unsigned long max_scan);
17576unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
17577 unsigned long index, unsigned long max_scan);
17578int radix_tree_preload(gfp_t gfp_mask);
17579void radix_tree_init(void);
17580void *radix_tree_tag_set(struct radix_tree_root *root,
17581 unsigned long index, unsigned int tag);
17582void *radix_tree_tag_clear(struct radix_tree_root *root,
17583 unsigned long index, unsigned int tag);
17584int radix_tree_tag_get(struct radix_tree_root *root,
17585 unsigned long index, unsigned int tag);
17586unsigned int
17587radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
17588 unsigned long first_index, unsigned int max_items,
17589 unsigned int tag);
17590unsigned int
17591radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
17592 unsigned long first_index, unsigned int max_items,
17593 unsigned int tag);
17594unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
17595 unsigned long *first_indexp, unsigned long last_index,
17596 unsigned long nr_to_tag,
17597 unsigned int fromtag, unsigned int totag);
17598int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
17599static inline __attribute__((always_inline)) void radix_tree_preload_end(void)
17600{
17601 do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 228, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 228, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 228, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/radix-tree.h", .line = 228, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0);
17602}
17603struct semaphore {
17604 spinlock_t lock;
17605 unsigned int count;
17606 struct list_head wait_list;
17607};
17608static inline __attribute__((always_inline)) void sema_init(struct semaphore *sem, int val)
17609{
17610 static struct lock_class_key __key;
17611 *sem = (struct semaphore) { .lock = (spinlock_t ) { { .rlock = { .raw_lock = { 0 }, .magic = 0xdead4ead, .owner_cpu = -1, .owner = ((void *)-1L), .dep_map = { .name = "(*sem).lock" } } } }, .count = val, .wait_list = { &((*sem).wait_list), &((*sem).wait_list) }, };
17612 lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0);
17613}
17614extern void down(struct semaphore *sem);
17615extern int __attribute__((warn_unused_result)) down_interruptible(struct semaphore *sem);
17616extern int __attribute__((warn_unused_result)) down_killable(struct semaphore *sem);
17617extern int __attribute__((warn_unused_result)) down_trylock(struct semaphore *sem);
17618extern int __attribute__((warn_unused_result)) down_timeout(struct semaphore *sem, long jiffies);
17619extern void up(struct semaphore *sem);
17620struct fiemap_extent {
17621 __u64 fe_logical;
17622 __u64 fe_physical;
17623 __u64 fe_length;
17624 __u64 fe_reserved64[2];
17625 __u32 fe_flags;
17626 __u32 fe_reserved[3];
17627};
17628struct fiemap {
17629 __u64 fm_start;
17630 __u64 fm_length;
17631 __u32 fm_flags;
17632 __u32 fm_mapped_extents;
17633 __u32 fm_extent_count;
17634 __u32 fm_reserved;
17635 struct fiemap_extent fm_extents[0];
17636};
17637struct export_operations;
17638struct hd_geometry;
17639struct iovec;
17640struct nameidata;
17641struct kiocb;
17642struct kobject;
17643struct pipe_inode_info;
17644struct poll_table_struct;
17645struct kstatfs;
17646struct vm_area_struct;
17647struct vfsmount;
17648struct cred;
17649extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) inode_init(void);
17650extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) inode_init_early(void);
17651extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) files_init(unsigned long);
17652extern struct files_stat_struct files_stat;
17653extern unsigned long get_max_files(void);
17654extern int sysctl_nr_open;
17655extern struct inodes_stat_t inodes_stat;
17656extern int leases_enable, lease_break_time;
17657struct buffer_head;
17658typedef int (get_block_t)(struct inode *inode, sector_t iblock,
17659 struct buffer_head *bh_result, int create);
17660typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
17661 ssize_t bytes, void *private, int ret,
17662 bool is_async);
17663struct iattr {
17664 unsigned int ia_valid;
17665 umode_t ia_mode;
17666 uid_t ia_uid;
17667 gid_t ia_gid;
17668 loff_t ia_size;
17669 struct timespec ia_atime;
17670 struct timespec ia_mtime;
17671 struct timespec ia_ctime;
17672 struct file *ia_file;
17673};
17674enum {
17675 QIF_BLIMITS_B = 0,
17676 QIF_SPACE_B,
17677 QIF_ILIMITS_B,
17678 QIF_INODES_B,
17679 QIF_BTIME_B,
17680 QIF_ITIME_B,
17681};
17682struct if_dqblk {
17683 __u64 dqb_bhardlimit;
17684 __u64 dqb_bsoftlimit;
17685 __u64 dqb_curspace;
17686 __u64 dqb_ihardlimit;
17687 __u64 dqb_isoftlimit;
17688 __u64 dqb_curinodes;
17689 __u64 dqb_btime;
17690 __u64 dqb_itime;
17691 __u32 dqb_valid;
17692};
17693struct if_dqinfo {
17694 __u64 dqi_bgrace;
17695 __u64 dqi_igrace;
17696 __u32 dqi_flags;
17697 __u32 dqi_valid;
17698};
17699enum {
17700 QUOTA_NL_C_UNSPEC,
17701 QUOTA_NL_C_WARNING,
17702 __QUOTA_NL_C_MAX,
17703};
17704enum {
17705 QUOTA_NL_A_UNSPEC,
17706 QUOTA_NL_A_QTYPE,
17707 QUOTA_NL_A_EXCESS_ID,
17708 QUOTA_NL_A_WARNING,
17709 QUOTA_NL_A_DEV_MAJOR,
17710 QUOTA_NL_A_DEV_MINOR,
17711 QUOTA_NL_A_CAUSED_ID,
17712 __QUOTA_NL_A_MAX,
17713};
17714typedef struct fs_disk_quota {
17715 __s8 d_version;
17716 __s8 d_flags;
17717 __u16 d_fieldmask;
17718 __u32 d_id;
17719 __u64 d_blk_hardlimit;
17720 __u64 d_blk_softlimit;
17721 __u64 d_ino_hardlimit;
17722 __u64 d_ino_softlimit;
17723 __u64 d_bcount;
17724 __u64 d_icount;
17725 __s32 d_itimer;
17726 __s32 d_btimer;
17727 __u16 d_iwarns;
17728 __u16 d_bwarns;
17729 __s32 d_padding2;
17730 __u64 d_rtb_hardlimit;
17731 __u64 d_rtb_softlimit;
17732 __u64 d_rtbcount;
17733 __s32 d_rtbtimer;
17734 __u16 d_rtbwarns;
17735 __s16 d_padding3;
17736 char d_padding4[8];
17737} fs_disk_quota_t;
17738typedef struct fs_qfilestat {
17739 __u64 qfs_ino;
17740 __u64 qfs_nblks;
17741 __u32 qfs_nextents;
17742} fs_qfilestat_t;
17743typedef struct fs_quota_stat {
17744 __s8 qs_version;
17745 __u16 qs_flags;
17746 __s8 qs_pad;
17747 fs_qfilestat_t qs_uquota;
17748 fs_qfilestat_t qs_gquota;
17749 __u32 qs_incoredqs;
17750 __s32 qs_btimelimit;
17751 __s32 qs_itimelimit;
17752 __s32 qs_rtbtimelimit;
17753 __u16 qs_bwarnlimit;
17754 __u16 qs_iwarnlimit;
17755} fs_quota_stat_t;
17756struct dquot;
17757struct qtree_fmt_operations {
17758 void (*mem2disk_dqblk)(void *disk, struct dquot *dquot);
17759 void (*disk2mem_dqblk)(struct dquot *dquot, void *disk);
17760 int (*is_id)(void *disk, struct dquot *dquot);
17761};
17762struct qtree_mem_dqinfo {
17763 struct super_block *dqi_sb;
17764 int dqi_type;
17765 unsigned int dqi_blocks;
17766 unsigned int dqi_free_blk;
17767 unsigned int dqi_free_entry;
17768 unsigned int dqi_blocksize_bits;
17769 unsigned int dqi_entry_size;
17770 unsigned int dqi_usable_bs;
17771 unsigned int dqi_qtree_depth;
17772 struct qtree_fmt_operations *dqi_ops;
17773};
17774int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
17775int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
17776int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
17777int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
17778int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk);
17779static inline __attribute__((always_inline)) int qtree_depth(struct qtree_mem_dqinfo *info)
17780{
17781 unsigned int epb = info->dqi_usable_bs >> 2;
17782 unsigned long long entries = epb;
17783 int i;
17784 for (i = 1; entries < (1ULL << 32); i++)
17785 entries *= epb;
17786 return i;
17787}
17788typedef __kernel_uid32_t qid_t;
17789typedef long long qsize_t;
17790extern spinlock_t dq_data_lock;
17791struct mem_dqblk {
17792 qsize_t dqb_bhardlimit;
17793 qsize_t dqb_bsoftlimit;
17794 qsize_t dqb_curspace;
17795 qsize_t dqb_rsvspace;
17796 qsize_t dqb_ihardlimit;
17797 qsize_t dqb_isoftlimit;
17798 qsize_t dqb_curinodes;
17799 time_t dqb_btime;
17800 time_t dqb_itime;
17801};
17802struct quota_format_type;
17803struct mem_dqinfo {
17804 struct quota_format_type *dqi_format;
17805 int dqi_fmt_id;
17806 struct list_head dqi_dirty_list;
17807 unsigned long dqi_flags;
17808 unsigned int dqi_bgrace;
17809 unsigned int dqi_igrace;
17810 qsize_t dqi_maxblimit;
17811 qsize_t dqi_maxilimit;
17812 void *dqi_priv;
17813};
17814struct super_block;
17815extern void mark_info_dirty(struct super_block *sb, int type);
17816static inline __attribute__((always_inline)) int info_dirty(struct mem_dqinfo *info)
17817{
17818 return (__builtin_constant_p((16)) ? constant_test_bit((16), (&info->dqi_flags)) : variable_test_bit((16), (&info->dqi_flags)));
17819}
17820enum {
17821 DQST_LOOKUPS,
17822 DQST_DROPS,
17823 DQST_READS,
17824 DQST_WRITES,
17825 DQST_CACHE_HITS,
17826 DQST_ALLOC_DQUOTS,
17827 DQST_FREE_DQUOTS,
17828 DQST_SYNCS,
17829 _DQST_DQSTAT_LAST
17830};
17831struct dqstats {
17832 int stat[_DQST_DQSTAT_LAST];
17833 struct percpu_counter counter[_DQST_DQSTAT_LAST];
17834};
17835extern struct dqstats *dqstats_pcpu;
17836extern struct dqstats dqstats;
17837static inline __attribute__((always_inline)) void dqstats_inc(unsigned int type)
17838{
17839 percpu_counter_inc(&dqstats.counter[type]);
17840}
17841static inline __attribute__((always_inline)) void dqstats_dec(unsigned int type)
17842{
17843 percpu_counter_dec(&dqstats.counter[type]);
17844}
17845struct dquot {
17846 struct hlist_node dq_hash;
17847 struct list_head dq_inuse;
17848 struct list_head dq_free;
17849 struct list_head dq_dirty;
17850 struct mutex dq_lock;
17851 atomic_t dq_count;
17852 wait_queue_head_t dq_wait_unused;
17853 struct super_block *dq_sb;
17854 unsigned int dq_id;
17855 loff_t dq_off;
17856 unsigned long dq_flags;
17857 short dq_type;
17858 struct mem_dqblk dq_dqb;
17859};
17860struct quota_format_ops {
17861 int (*check_quota_file)(struct super_block *sb, int type);
17862 int (*read_file_info)(struct super_block *sb, int type);
17863 int (*write_file_info)(struct super_block *sb, int type);
17864 int (*free_file_info)(struct super_block *sb, int type);
17865 int (*read_dqblk)(struct dquot *dquot);
17866 int (*commit_dqblk)(struct dquot *dquot);
17867 int (*release_dqblk)(struct dquot *dquot);
17868};
17869struct dquot_operations {
17870 int (*write_dquot) (struct dquot *);
17871 struct dquot *(*alloc_dquot)(struct super_block *, int);
17872 void (*destroy_dquot)(struct dquot *);
17873 int (*acquire_dquot) (struct dquot *);
17874 int (*release_dquot) (struct dquot *);
17875 int (*mark_dirty) (struct dquot *);
17876 int (*write_info) (struct super_block *, int);
17877 qsize_t *(*get_reserved_space) (struct inode *);
17878};
17879struct path;
17880struct quotactl_ops {
17881 int (*quota_on)(struct super_block *, int, int, struct path *);
17882 int (*quota_on_meta)(struct super_block *, int, int);
17883 int (*quota_off)(struct super_block *, int);
17884 int (*quota_sync)(struct super_block *, int, int);
17885 int (*get_info)(struct super_block *, int, struct if_dqinfo *);
17886 int (*set_info)(struct super_block *, int, struct if_dqinfo *);
17887 int (*get_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *);
17888 int (*set_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *);
17889 int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
17890 int (*set_xstate)(struct super_block *, unsigned int, int);
17891};
17892struct quota_format_type {
17893 int qf_fmt_id;
17894 const struct quota_format_ops *qf_ops;
17895 struct module *qf_owner;
17896 struct quota_format_type *qf_next;
17897};
17898enum {
17899 _DQUOT_USAGE_ENABLED = 0,
17900 _DQUOT_LIMITS_ENABLED,
17901 _DQUOT_SUSPENDED,
17902 _DQUOT_STATE_FLAGS
17903};
17904static inline __attribute__((always_inline)) unsigned int dquot_state_flag(unsigned int flags, int type)
17905{
17906 return flags << _DQUOT_STATE_FLAGS * type;
17907}
17908static inline __attribute__((always_inline)) unsigned int dquot_generic_flag(unsigned int flags, int type)
17909{
17910 return (flags >> _DQUOT_STATE_FLAGS * type) & ((1 << _DQUOT_USAGE_ENABLED) | (1 << _DQUOT_LIMITS_ENABLED) | (1 << _DQUOT_SUSPENDED));
17911}
17912static inline __attribute__((always_inline)) void quota_send_warning(short type, unsigned int id, dev_t dev,
17913 const char warntype)
17914{
17915 return;
17916}
17917struct quota_info {
17918 unsigned int flags;
17919 struct mutex dqio_mutex;
17920 struct mutex dqonoff_mutex;
17921 struct rw_semaphore dqptr_sem;
17922 struct inode *files[2];
17923 struct mem_dqinfo info[2];
17924 const struct quota_format_ops *ops[2];
17925};
17926int register_quota_format(struct quota_format_type *fmt);
17927void unregister_quota_format(struct quota_format_type *fmt);
17928struct quota_module_name {
17929 int qm_fmt_id;
17930 char *qm_mod_name;
17931};
17932enum positive_aop_returns {
17933 AOP_WRITEPAGE_ACTIVATE = 0x80000,
17934 AOP_TRUNCATED_PAGE = 0x80001,
17935};
17936struct page;
17937struct address_space;
17938struct writeback_control;
17939struct iov_iter {
17940 const struct iovec *iov;
17941 unsigned long nr_segs;
17942 size_t iov_offset;
17943 size_t count;
17944};
17945size_t iov_iter_copy_from_user_atomic(struct page *page,
17946 struct iov_iter *i, unsigned long offset, size_t bytes);
17947size_t iov_iter_copy_from_user(struct page *page,
17948 struct iov_iter *i, unsigned long offset, size_t bytes);
17949void iov_iter_advance(struct iov_iter *i, size_t bytes);
17950int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
17951size_t iov_iter_single_seg_count(struct iov_iter *i);
17952static inline __attribute__((always_inline)) void iov_iter_init(struct iov_iter *i,
17953 const struct iovec *iov, unsigned long nr_segs,
17954 size_t count, size_t written)
17955{
17956 i->iov = iov;
17957 i->nr_segs = nr_segs;
17958 i->iov_offset = 0;
17959 i->count = count + written;
17960 iov_iter_advance(i, written);
17961}
17962static inline __attribute__((always_inline)) size_t iov_iter_count(struct iov_iter *i)
17963{
17964 return i->count;
17965}
17966typedef struct {
17967 size_t written;
17968 size_t count;
17969 union {
17970 char *buf;
17971 void *data;
17972 } arg;
17973 int error;
17974} read_descriptor_t;
17975typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
17976 unsigned long, unsigned long);
17977struct address_space_operations {
17978 int (*writepage)(struct page *page, struct writeback_control *wbc);
17979 int (*readpage)(struct file *, struct page *);
17980 int (*writepages)(struct address_space *, struct writeback_control *);
17981 int (*set_page_dirty)(struct page *page);
17982 int (*readpages)(struct file *filp, struct address_space *mapping,
17983 struct list_head *pages, unsigned nr_pages);
17984 int (*write_begin)(struct file *, struct address_space *mapping,
17985 loff_t pos, unsigned len, unsigned flags,
17986 struct page **pagep, void **fsdata);
17987 int (*write_end)(struct file *, struct address_space *mapping,
17988 loff_t pos, unsigned len, unsigned copied,
17989 struct page *page, void *fsdata);
17990 sector_t (*bmap)(struct address_space *, sector_t);
17991 void (*invalidatepage) (struct page *, unsigned long);
17992 int (*releasepage) (struct page *, gfp_t);
17993 void (*freepage)(struct page *);
17994 ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
17995 loff_t offset, unsigned long nr_segs);
17996 int (*get_xip_mem)(struct address_space *, unsigned long, int,
17997 void **, unsigned long *);
17998 int (*migratepage) (struct address_space *,
17999 struct page *, struct page *);
18000 int (*launder_page) (struct page *);
18001 int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
18002 unsigned long);
18003 int (*error_remove_page)(struct address_space *, struct page *);
18004};
18005extern const struct address_space_operations empty_aops;
18006int pagecache_write_begin(struct file *, struct address_space *mapping,
18007 loff_t pos, unsigned len, unsigned flags,
18008 struct page **pagep, void **fsdata);
18009int pagecache_write_end(struct file *, struct address_space *mapping,
18010 loff_t pos, unsigned len, unsigned copied,
18011 struct page *page, void *fsdata);
18012struct backing_dev_info;
18013struct address_space {
18014 struct inode *host;
18015 struct radix_tree_root page_tree;
18016 spinlock_t tree_lock;
18017 unsigned int i_mmap_writable;
18018 struct prio_tree_root i_mmap;
18019 struct list_head i_mmap_nonlinear;
18020 struct mutex i_mmap_mutex;
18021 unsigned long nrpages;
18022 unsigned long writeback_index;
18023 const struct address_space_operations *a_ops;
18024 unsigned long flags;
18025 struct backing_dev_info *backing_dev_info;
18026 spinlock_t private_lock;
18027 struct list_head private_list;
18028 struct address_space *assoc_mapping;
18029} __attribute__((aligned(sizeof(long))));
18030struct block_device {
18031 dev_t bd_dev;
18032 int bd_openers;
18033 struct inode * bd_inode;
18034 struct super_block * bd_super;
18035 struct mutex bd_mutex;
18036 struct list_head bd_inodes;
18037 void * bd_claiming;
18038 void * bd_holder;
18039 int bd_holders;
18040 bool bd_write_holder;
18041 struct list_head bd_holder_disks;
18042 struct block_device * bd_contains;
18043 unsigned bd_block_size;
18044 struct hd_struct * bd_part;
18045 unsigned bd_part_count;
18046 int bd_invalidated;
18047 struct gendisk * bd_disk;
18048 struct list_head bd_list;
18049 unsigned long bd_private;
18050 int bd_fsfreeze_count;
18051 struct mutex bd_fsfreeze_mutex;
18052};
18053int mapping_tagged(struct address_space *mapping, int tag);
18054static inline __attribute__((always_inline)) int mapping_mapped(struct address_space *mapping)
18055{
18056 return !prio_tree_empty(&mapping->i_mmap) ||
18057 !list_empty(&mapping->i_mmap_nonlinear);
18058}
18059static inline __attribute__((always_inline)) int mapping_writably_mapped(struct address_space *mapping)
18060{
18061 return mapping->i_mmap_writable != 0;
18062}
18063struct posix_acl;
18064struct inode {
18065 umode_t i_mode;
18066 uid_t i_uid;
18067 gid_t i_gid;
18068 const struct inode_operations *i_op;
18069 struct super_block *i_sb;
18070 spinlock_t i_lock;
18071 unsigned int i_flags;
18072 unsigned long i_state;
18073 void *i_security;
18074 struct mutex i_mutex;
18075 unsigned long dirtied_when;
18076 struct hlist_node i_hash;
18077 struct list_head i_wb_list;
18078 struct list_head i_lru;
18079 struct list_head i_sb_list;
18080 union {
18081 struct list_head i_dentry;
18082 struct rcu_head i_rcu;
18083 };
18084 unsigned long i_ino;
18085 atomic_t i_count;
18086 unsigned int i_nlink;
18087 dev_t i_rdev;
18088 unsigned int i_blkbits;
18089 u64 i_version;
18090 loff_t i_size;
18091 seqcount_t i_size_seqcount;
18092 struct timespec i_atime;
18093 struct timespec i_mtime;
18094 struct timespec i_ctime;
18095 blkcnt_t i_blocks;
18096 unsigned short i_bytes;
18097 struct rw_semaphore i_alloc_sem;
18098 const struct file_operations *i_fop;
18099 struct file_lock *i_flock;
18100 struct address_space *i_mapping;
18101 struct address_space i_data;
18102 struct dquot *i_dquot[2];
18103 struct list_head i_devices;
18104 union {
18105 struct pipe_inode_info *i_pipe;
18106 struct block_device *i_bdev;
18107 struct cdev *i_cdev;
18108 };
18109 __u32 i_generation;
18110 __u32 i_fsnotify_mask;
18111 struct hlist_head i_fsnotify_marks;
18112 atomic_t i_writecount;
18113 struct posix_acl *i_acl;
18114 struct posix_acl *i_default_acl;
18115 void *i_private;
18116};
18117static inline __attribute__((always_inline)) int inode_unhashed(struct inode *inode)
18118{
18119 return hlist_unhashed(&inode->i_hash);
18120}
18121enum inode_i_mutex_lock_class
18122{
18123 I_MUTEX_NORMAL,
18124 I_MUTEX_PARENT,
18125 I_MUTEX_CHILD,
18126 I_MUTEX_XATTR,
18127 I_MUTEX_QUOTA
18128};
18129static inline __attribute__((always_inline)) loff_t i_size_read(const struct inode *inode)
18130{
18131 loff_t i_size;
18132 unsigned int seq;
18133 do {
18134 seq = read_seqcount_begin(&inode->i_size_seqcount);
18135 i_size = inode->i_size;
18136 } while (read_seqcount_retry(&inode->i_size_seqcount, seq));
18137 return i_size;
18138}
18139static inline __attribute__((always_inline)) void i_size_write(struct inode *inode, loff_t i_size)
18140{
18141 write_seqcount_begin(&inode->i_size_seqcount);
18142 inode->i_size = i_size;
18143 write_seqcount_end(&inode->i_size_seqcount);
18144}
18145static inline __attribute__((always_inline)) unsigned iminor(const struct inode *inode)
18146{
18147 return ((unsigned int) ((inode->i_rdev) & ((1U << 20) - 1)));
18148}
18149static inline __attribute__((always_inline)) unsigned imajor(const struct inode *inode)
18150{
18151 return ((unsigned int) ((inode->i_rdev) >> 20));
18152}
18153extern struct block_device *I_BDEV(struct inode *inode);
18154struct fown_struct {
18155 rwlock_t lock;
18156 struct pid *pid;
18157 enum pid_type pid_type;
18158 uid_t uid, euid;
18159 int signum;
18160};
18161struct file_ra_state {
18162 unsigned long start;
18163 unsigned int size;
18164 unsigned int async_size;
18165 unsigned int ra_pages;
18166 unsigned int mmap_miss;
18167 loff_t prev_pos;
18168};
18169static inline __attribute__((always_inline)) int ra_has_index(struct file_ra_state *ra, unsigned long index)
18170{
18171 return (index >= ra->start &&
18172 index < ra->start + ra->size);
18173}
18174struct file {
18175 union {
18176 struct list_head fu_list;
18177 struct rcu_head fu_rcuhead;
18178 } f_u;
18179 struct path f_path;
18180 const struct file_operations *f_op;
18181 spinlock_t f_lock;
18182 int f_sb_list_cpu;
18183 atomic_long_t f_count;
18184 unsigned int f_flags;
18185 fmode_t f_mode;
18186 loff_t f_pos;
18187 struct fown_struct f_owner;
18188 const struct cred *f_cred;
18189 struct file_ra_state f_ra;
18190 u64 f_version;
18191 void *f_security;
18192 void *private_data;
18193 struct list_head f_ep_links;
18194 struct address_space *f_mapping;
18195};
18196struct file_handle {
18197 __u32 handle_bytes;
18198 int handle_type;
18199 unsigned char f_handle[0];
18200};
18201static inline __attribute__((always_inline)) void file_take_write(struct file *filp) {}
18202static inline __attribute__((always_inline)) void file_release_write(struct file *filp) {}
18203static inline __attribute__((always_inline)) void file_reset_write(struct file *filp) {}
18204static inline __attribute__((always_inline)) void file_check_state(struct file *filp) {}
18205static inline __attribute__((always_inline)) int file_check_writeable(struct file *filp)
18206{
18207 return 0;
18208}
18209typedef struct files_struct *fl_owner_t;
18210struct file_lock_operations {
18211 void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
18212 void (*fl_release_private)(struct file_lock *);
18213};
18214struct lock_manager_operations {
18215 int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
18216 void (*fl_notify)(struct file_lock *);
18217 int (*fl_grant)(struct file_lock *, struct file_lock *, int);
18218 void (*fl_release_private)(struct file_lock *);
18219 void (*fl_break)(struct file_lock *);
18220 int (*fl_change)(struct file_lock **, int);
18221};
18222struct lock_manager {
18223 struct list_head list;
18224};
18225void locks_start_grace(struct lock_manager *);
18226void locks_end_grace(struct lock_manager *);
18227int locks_in_grace(void);
18228 enum nfs_stat {
18229 NFS_OK = 0,
18230 NFSERR_PERM = 1,
18231 NFSERR_NOENT = 2,
18232 NFSERR_IO = 5,
18233 NFSERR_NXIO = 6,
18234 NFSERR_EAGAIN = 11,
18235 NFSERR_ACCES = 13,
18236 NFSERR_EXIST = 17,
18237 NFSERR_XDEV = 18,
18238 NFSERR_NODEV = 19,
18239 NFSERR_NOTDIR = 20,
18240 NFSERR_ISDIR = 21,
18241 NFSERR_INVAL = 22,
18242 NFSERR_FBIG = 27,
18243 NFSERR_NOSPC = 28,
18244 NFSERR_ROFS = 30,
18245 NFSERR_MLINK = 31,
18246 NFSERR_OPNOTSUPP = 45,
18247 NFSERR_NAMETOOLONG = 63,
18248 NFSERR_NOTEMPTY = 66,
18249 NFSERR_DQUOT = 69,
18250 NFSERR_STALE = 70,
18251 NFSERR_REMOTE = 71,
18252 NFSERR_WFLUSH = 99,
18253 NFSERR_BADHANDLE = 10001,
18254 NFSERR_NOT_SYNC = 10002,
18255 NFSERR_BAD_COOKIE = 10003,
18256 NFSERR_NOTSUPP = 10004,
18257 NFSERR_TOOSMALL = 10005,
18258 NFSERR_SERVERFAULT = 10006,
18259 NFSERR_BADTYPE = 10007,
18260 NFSERR_JUKEBOX = 10008,
18261 NFSERR_SAME = 10009,
18262 NFSERR_DENIED = 10010,
18263 NFSERR_EXPIRED = 10011,
18264 NFSERR_LOCKED = 10012,
18265 NFSERR_GRACE = 10013,
18266 NFSERR_FHEXPIRED = 10014,
18267 NFSERR_SHARE_DENIED = 10015,
18268 NFSERR_WRONGSEC = 10016,
18269 NFSERR_CLID_INUSE = 10017,
18270 NFSERR_RESOURCE = 10018,
18271 NFSERR_MOVED = 10019,
18272 NFSERR_NOFILEHANDLE = 10020,
18273 NFSERR_MINOR_VERS_MISMATCH = 10021,
18274 NFSERR_STALE_CLIENTID = 10022,
18275 NFSERR_STALE_STATEID = 10023,
18276 NFSERR_OLD_STATEID = 10024,
18277 NFSERR_BAD_STATEID = 10025,
18278 NFSERR_BAD_SEQID = 10026,
18279 NFSERR_NOT_SAME = 10027,
18280 NFSERR_LOCK_RANGE = 10028,
18281 NFSERR_SYMLINK = 10029,
18282 NFSERR_RESTOREFH = 10030,
18283 NFSERR_LEASE_MOVED = 10031,
18284 NFSERR_ATTRNOTSUPP = 10032,
18285 NFSERR_NO_GRACE = 10033,
18286 NFSERR_RECLAIM_BAD = 10034,
18287 NFSERR_RECLAIM_CONFLICT = 10035,
18288 NFSERR_BAD_XDR = 10036,
18289 NFSERR_LOCKS_HELD = 10037,
18290 NFSERR_OPENMODE = 10038,
18291 NFSERR_BADOWNER = 10039,
18292 NFSERR_BADCHAR = 10040,
18293 NFSERR_BADNAME = 10041,
18294 NFSERR_BAD_RANGE = 10042,
18295 NFSERR_LOCK_NOTSUPP = 10043,
18296 NFSERR_OP_ILLEGAL = 10044,
18297 NFSERR_DEADLOCK = 10045,
18298 NFSERR_FILE_OPEN = 10046,
18299 NFSERR_ADMIN_REVOKED = 10047,
18300 NFSERR_CB_PATH_DOWN = 10048,
18301};
18302enum nfs_ftype {
18303 NFNON = 0,
18304 NFREG = 1,
18305 NFDIR = 2,
18306 NFBLK = 3,
18307 NFCHR = 4,
18308 NFLNK = 5,
18309 NFSOCK = 6,
18310 NFBAD = 7,
18311 NFFIFO = 8
18312};
18313typedef u32 rpc_authflavor_t;
18314enum rpc_auth_flavors {
18315 RPC_AUTH_NULL = 0,
18316 RPC_AUTH_UNIX = 1,
18317 RPC_AUTH_SHORT = 2,
18318 RPC_AUTH_DES = 3,
18319 RPC_AUTH_KRB = 4,
18320 RPC_AUTH_GSS = 6,
18321 RPC_AUTH_MAXFLAVOR = 8,
18322 RPC_AUTH_GSS_KRB5 = 390003,
18323 RPC_AUTH_GSS_KRB5I = 390004,
18324 RPC_AUTH_GSS_KRB5P = 390005,
18325 RPC_AUTH_GSS_LKEY = 390006,
18326 RPC_AUTH_GSS_LKEYI = 390007,
18327 RPC_AUTH_GSS_LKEYP = 390008,
18328 RPC_AUTH_GSS_SPKM = 390009,
18329 RPC_AUTH_GSS_SPKMI = 390010,
18330 RPC_AUTH_GSS_SPKMP = 390011,
18331};
18332enum rpc_msg_type {
18333 RPC_CALL = 0,
18334 RPC_REPLY = 1
18335};
18336enum rpc_reply_stat {
18337 RPC_MSG_ACCEPTED = 0,
18338 RPC_MSG_DENIED = 1
18339};
18340enum rpc_accept_stat {
18341 RPC_SUCCESS = 0,
18342 RPC_PROG_UNAVAIL = 1,
18343 RPC_PROG_MISMATCH = 2,
18344 RPC_PROC_UNAVAIL = 3,
18345 RPC_GARBAGE_ARGS = 4,
18346 RPC_SYSTEM_ERR = 5,
18347 RPC_DROP_REPLY = 60000,
18348};
18349enum rpc_reject_stat {
18350 RPC_MISMATCH = 0,
18351 RPC_AUTH_ERROR = 1
18352};
18353enum rpc_auth_stat {
18354 RPC_AUTH_OK = 0,
18355 RPC_AUTH_BADCRED = 1,
18356 RPC_AUTH_REJECTEDCRED = 2,
18357 RPC_AUTH_BADVERF = 3,
18358 RPC_AUTH_REJECTEDVERF = 4,
18359 RPC_AUTH_TOOWEAK = 5,
18360 RPCSEC_GSS_CREDPROBLEM = 13,
18361 RPCSEC_GSS_CTXPROBLEM = 14
18362};
18363typedef __be32 rpc_fraghdr;
18364extern __be32 in_aton(const char *str);
18365extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
18366extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
18367struct nfs_fh {
18368 unsigned short size;
18369 unsigned char data[128];
18370};
18371static inline __attribute__((always_inline)) int nfs_compare_fh(const struct nfs_fh *a, const struct nfs_fh *b)
18372{
18373 return a->size != b->size || __builtin_memcmp(a->data, b->data, a->size) != 0;
18374}
18375static inline __attribute__((always_inline)) void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *source)
18376{
18377 target->size = source->size;
18378 __builtin_memcpy(target->data, source->data, source->size);
18379}
18380enum nfs3_stable_how {
18381 NFS_UNSTABLE = 0,
18382 NFS_DATA_SYNC = 1,
18383 NFS_FILE_SYNC = 2
18384};
18385struct nlm_lockowner;
18386struct nfs_lock_info {
18387 u32 state;
18388 struct nlm_lockowner *owner;
18389 struct list_head list;
18390};
18391struct nfs4_lock_state;
18392struct nfs4_lock_info {
18393 struct nfs4_lock_state *owner;
18394};
18395struct file_lock {
18396 struct file_lock *fl_next;
18397 struct list_head fl_link;
18398 struct list_head fl_block;
18399 fl_owner_t fl_owner;
18400 unsigned char fl_flags;
18401 unsigned char fl_type;
18402 unsigned int fl_pid;
18403 struct pid *fl_nspid;
18404 wait_queue_head_t fl_wait;
18405 struct file *fl_file;
18406 loff_t fl_start;
18407 loff_t fl_end;
18408 struct fasync_struct * fl_fasync;
18409 unsigned long fl_break_time;
18410 const struct file_lock_operations *fl_ops;
18411 const struct lock_manager_operations *fl_lmops;
18412 union {
18413 struct nfs_lock_info nfs_fl;
18414 struct nfs4_lock_info nfs4_fl;
18415 struct {
18416 struct list_head link;
18417 int state;
18418 } afs;
18419 } fl_u;
18420};
18421struct f_owner_ex {
18422 int type;
18423 __kernel_pid_t pid;
18424};
18425struct flock {
18426 short l_type;
18427 short l_whence;
18428 __kernel_off_t l_start;
18429 __kernel_off_t l_len;
18430 __kernel_pid_t l_pid;
18431};
18432struct flock64 {
18433 short l_type;
18434 short l_whence;
18435 __kernel_loff_t l_start;
18436 __kernel_loff_t l_len;
18437 __kernel_pid_t l_pid;
18438};
18439extern void send_sigio(struct fown_struct *fown, int fd, int band);
18440extern int fcntl_getlk(struct file *, struct flock *);
18441extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
18442 struct flock *);
18443extern int fcntl_getlk64(struct file *, struct flock64 *);
18444extern int fcntl_setlk64(unsigned int, struct file *, unsigned int,
18445 struct flock64 *);
18446extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
18447extern int fcntl_getlease(struct file *filp);
18448void locks_free_lock(struct file_lock *fl);
18449extern void locks_init_lock(struct file_lock *);
18450extern struct file_lock * locks_alloc_lock(void);
18451extern void locks_copy_lock(struct file_lock *, struct file_lock *);
18452extern void __locks_copy_lock(struct file_lock *, const struct file_lock *);
18453extern void locks_remove_posix(struct file *, fl_owner_t);
18454extern void locks_remove_flock(struct file *);
18455extern void locks_release_private(struct file_lock *);
18456extern void posix_test_lock(struct file *, struct file_lock *);
18457extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
18458extern int posix_lock_file_wait(struct file *, struct file_lock *);
18459extern int posix_unblock_lock(struct file *, struct file_lock *);
18460extern int vfs_test_lock(struct file *, struct file_lock *);
18461extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
18462extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
18463extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
18464extern int __break_lease(struct inode *inode, unsigned int flags);
18465extern void lease_get_mtime(struct inode *, struct timespec *time);
18466extern int generic_setlease(struct file *, long, struct file_lock **);
18467extern int vfs_setlease(struct file *, long, struct file_lock **);
18468extern int lease_modify(struct file_lock **, int);
18469extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
18470extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
18471extern void lock_flocks(void);
18472extern void unlock_flocks(void);
18473struct fasync_struct {
18474 spinlock_t fa_lock;
18475 int magic;
18476 int fa_fd;
18477 struct fasync_struct *fa_next;
18478 struct file *fa_file;
18479 struct rcu_head fa_rcu;
18480};
18481extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
18482extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *);
18483extern int fasync_remove_entry(struct file *, struct fasync_struct **);
18484extern struct fasync_struct *fasync_alloc(void);
18485extern void fasync_free(struct fasync_struct *);
18486extern void kill_fasync(struct fasync_struct **, int, int);
18487extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
18488extern int f_setown(struct file *filp, unsigned long arg, int force);
18489extern void f_delown(struct file *filp);
18490extern pid_t f_getown(struct file *filp);
18491extern int send_sigurg(struct fown_struct *fown);
18492extern struct list_head super_blocks;
18493extern spinlock_t sb_lock;
18494struct super_block {
18495 struct list_head s_list;
18496 dev_t s_dev;
18497 unsigned char s_dirt;
18498 unsigned char s_blocksize_bits;
18499 unsigned long s_blocksize;
18500 loff_t s_maxbytes;
18501 struct file_system_type *s_type;
18502 const struct super_operations *s_op;
18503 const struct dquot_operations *dq_op;
18504 const struct quotactl_ops *s_qcop;
18505 const struct export_operations *s_export_op;
18506 unsigned long s_flags;
18507 unsigned long s_magic;
18508 struct dentry *s_root;
18509 struct rw_semaphore s_umount;
18510 struct mutex s_lock;
18511 int s_count;
18512 atomic_t s_active;
18513 void *s_security;
18514 const struct xattr_handler **s_xattr;
18515 struct list_head s_inodes;
18516 struct hlist_bl_head s_anon;
18517 struct list_head *s_files;
18518 struct list_head s_dentry_lru;
18519 int s_nr_dentry_unused;
18520 struct block_device *s_bdev;
18521 struct backing_dev_info *s_bdi;
18522 struct mtd_info *s_mtd;
18523 struct list_head s_instances;
18524 struct quota_info s_dquot;
18525 int s_frozen;
18526 wait_queue_head_t s_wait_unfrozen;
18527 char s_id[32];
18528 u8 s_uuid[16];
18529 void *s_fs_info;
18530 fmode_t s_mode;
18531 u32 s_time_gran;
18532 struct mutex s_vfs_rename_mutex;
18533 char *s_subtype;
18534 char *s_options;
18535 const struct dentry_operations *s_d_op;
18536 int cleancache_poolid;
18537};
18538extern struct timespec current_fs_time(struct super_block *sb);
18539enum {
18540 SB_UNFROZEN = 0,
18541 SB_FREEZE_WRITE = 1,
18542 SB_FREEZE_TRANS = 2,
18543};
18544extern struct user_namespace init_user_ns;
18545extern bool inode_owner_or_capable(const struct inode *inode);
18546extern void lock_super(struct super_block *);
18547extern void unlock_super(struct super_block *);
18548extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *);
18549extern int vfs_mkdir(struct inode *, struct dentry *, int);
18550extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t);
18551extern int vfs_symlink(struct inode *, struct dentry *, const char *);
18552extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
18553extern int vfs_rmdir(struct inode *, struct dentry *);
18554extern int vfs_unlink(struct inode *, struct dentry *);
18555extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
18556extern void dentry_unhash(struct dentry *dentry);
18557extern int file_permission(struct file *, int);
18558extern void inode_init_owner(struct inode *inode, const struct inode *dir,
18559 mode_t mode);
18560struct fiemap_extent_info {
18561 unsigned int fi_flags;
18562 unsigned int fi_extents_mapped;
18563 unsigned int fi_extents_max;
18564 struct fiemap_extent *fi_extents_start;
18565};
18566int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
18567 u64 phys, u64 len, u32 flags);
18568int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
18569typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned);
18570struct block_device_operations;
18571struct file_operations {
18572 struct module *owner;
18573 loff_t (*llseek) (struct file *, loff_t, int);
18574 ssize_t (*read) (struct file *, char *, size_t, loff_t *);
18575 ssize_t (*write) (struct file *, const char *, size_t, loff_t *);
18576 ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
18577 ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
18578 int (*readdir) (struct file *, void *, filldir_t);
18579 unsigned int (*poll) (struct file *, struct poll_table_struct *);
18580 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
18581 long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
18582 int (*mmap) (struct file *, struct vm_area_struct *);
18583 int (*open) (struct inode *, struct file *);
18584 int (*flush) (struct file *, fl_owner_t id);
18585 int (*release) (struct inode *, struct file *);
18586 int (*fsync) (struct file *, int datasync);
18587 int (*aio_fsync) (struct kiocb *, int datasync);
18588 int (*fasync) (int, struct file *, int);
18589 int (*lock) (struct file *, int, struct file_lock *);
18590 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
18591 unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
18592 int (*check_flags)(int);
18593 int (*flock) (struct file *, int, struct file_lock *);
18594 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
18595 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
18596 int (*setlease)(struct file *, long, struct file_lock **);
18597 long (*fallocate)(struct file *file, int mode, loff_t offset,
18598 loff_t len);
18599};
18600struct inode_operations {
18601 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
18602 void * (*follow_link) (struct dentry *, struct nameidata *);
18603 int (*permission) (struct inode *, int, unsigned int);
18604 int (*check_acl)(struct inode *, int, unsigned int);
18605 int (*readlink) (struct dentry *, char *,int);
18606 void (*put_link) (struct dentry *, struct nameidata *, void *);
18607 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
18608 int (*link) (struct dentry *,struct inode *,struct dentry *);
18609 int (*unlink) (struct inode *,struct dentry *);
18610 int (*symlink) (struct inode *,struct dentry *,const char *);
18611 int (*mkdir) (struct inode *,struct dentry *,int);
18612 int (*rmdir) (struct inode *,struct dentry *);
18613 int (*mknod) (struct inode *,struct dentry *,int,dev_t);
18614 int (*rename) (struct inode *, struct dentry *,
18615 struct inode *, struct dentry *);
18616 void (*truncate) (struct inode *);
18617 int (*setattr) (struct dentry *, struct iattr *);
18618 int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
18619 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
18620 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
18621 ssize_t (*listxattr) (struct dentry *, char *, size_t);
18622 int (*removexattr) (struct dentry *, const char *);
18623 void (*truncate_range)(struct inode *, loff_t, loff_t);
18624 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
18625 u64 len);
18626} __attribute__((__aligned__((1 << (6)))));
18627struct seq_file;
18628ssize_t rw_copy_check_uvector(int type, const struct iovec * uvector,
18629 unsigned long nr_segs, unsigned long fast_segs,
18630 struct iovec *fast_pointer,
18631 struct iovec **ret_pointer);
18632extern ssize_t vfs_read(struct file *, char *, size_t, loff_t *);
18633extern ssize_t vfs_write(struct file *, const char *, size_t, loff_t *);
18634extern ssize_t vfs_readv(struct file *, const struct iovec *,
18635 unsigned long, loff_t *);
18636extern ssize_t vfs_writev(struct file *, const struct iovec *,
18637 unsigned long, loff_t *);
18638struct super_operations {
18639 struct inode *(*alloc_inode)(struct super_block *sb);
18640 void (*destroy_inode)(struct inode *);
18641 void (*dirty_inode) (struct inode *, int flags);
18642 int (*write_inode) (struct inode *, struct writeback_control *wbc);
18643 int (*drop_inode) (struct inode *);
18644 void (*evict_inode) (struct inode *);
18645 void (*put_super) (struct super_block *);
18646 void (*write_super) (struct super_block *);
18647 int (*sync_fs)(struct super_block *sb, int wait);
18648 int (*freeze_fs) (struct super_block *);
18649 int (*unfreeze_fs) (struct super_block *);
18650 int (*statfs) (struct dentry *, struct kstatfs *);
18651 int (*remount_fs) (struct super_block *, int *, char *);
18652 void (*umount_begin) (struct super_block *);
18653 int (*show_options)(struct seq_file *, struct vfsmount *);
18654 int (*show_devname)(struct seq_file *, struct vfsmount *);
18655 int (*show_path)(struct seq_file *, struct vfsmount *);
18656 int (*show_stats)(struct seq_file *, struct vfsmount *);
18657 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
18658 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
18659 int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
18660};
18661extern void __mark_inode_dirty(struct inode *, int);
18662static inline __attribute__((always_inline)) void mark_inode_dirty(struct inode *inode)
18663{
18664 __mark_inode_dirty(inode, ((1 << 0) | (1 << 1) | (1 << 2)));
18665}
18666static inline __attribute__((always_inline)) void mark_inode_dirty_sync(struct inode *inode)
18667{
18668 __mark_inode_dirty(inode, (1 << 0));
18669}
18670static inline __attribute__((always_inline)) void inc_nlink(struct inode *inode)
18671{
18672 inode->i_nlink++;
18673}
18674static inline __attribute__((always_inline)) void inode_inc_link_count(struct inode *inode)
18675{
18676 inc_nlink(inode);
18677 mark_inode_dirty(inode);
18678}
18679static inline __attribute__((always_inline)) void drop_nlink(struct inode *inode)
18680{
18681 inode->i_nlink--;
18682}
18683static inline __attribute__((always_inline)) void clear_nlink(struct inode *inode)
18684{
18685 inode->i_nlink = 0;
18686}
18687static inline __attribute__((always_inline)) void inode_dec_link_count(struct inode *inode)
18688{
18689 drop_nlink(inode);
18690 mark_inode_dirty(inode);
18691}
18692static inline __attribute__((always_inline)) void inode_inc_iversion(struct inode *inode)
18693{
18694 spin_lock(&inode->i_lock);
18695 inode->i_version++;
18696 spin_unlock(&inode->i_lock);
18697}
18698extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry);
18699static inline __attribute__((always_inline)) void file_accessed(struct file *file)
18700{
18701 if (__builtin_constant_p(((!(file->f_flags & 01000000)))) ? !!((!(file->f_flags & 01000000))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/fs.h", .line = 1795, }; ______r = !!((!(file->f_flags & 01000000))); ______f.miss_hit[______r]++; ______r; }))
18702 touch_atime(file->f_path.mnt, file->f_path.dentry);
18703}
18704int sync_inode(struct inode *inode, struct writeback_control *wbc);
18705int sync_inode_metadata(struct inode *inode, int wait);
18706struct file_system_type {
18707 const char *name;
18708 int fs_flags;
18709 struct dentry *(*mount) (struct file_system_type *, int,
18710 const char *, void *);
18711 void (*kill_sb) (struct super_block *);
18712 struct module *owner;
18713 struct file_system_type * next;
18714 struct list_head fs_supers;
18715 struct lock_class_key s_lock_key;
18716 struct lock_class_key s_umount_key;
18717 struct lock_class_key s_vfs_rename_key;
18718 struct lock_class_key i_lock_key;
18719 struct lock_class_key i_mutex_key;
18720 struct lock_class_key i_mutex_dir_key;
18721 struct lock_class_key i_alloc_sem_key;
18722};
18723extern struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
18724 void *data, int (*fill_super)(struct super_block *, void *, int));
18725extern struct dentry *mount_bdev(struct file_system_type *fs_type,
18726 int flags, const char *dev_name, void *data,
18727 int (*fill_super)(struct super_block *, void *, int));
18728extern struct dentry *mount_single(struct file_system_type *fs_type,
18729 int flags, void *data,
18730 int (*fill_super)(struct super_block *, void *, int));
18731extern struct dentry *mount_nodev(struct file_system_type *fs_type,
18732 int flags, void *data,
18733 int (*fill_super)(struct super_block *, void *, int));
18734void generic_shutdown_super(struct super_block *sb);
18735void kill_block_super(struct super_block *sb);
18736void kill_anon_super(struct super_block *sb);
18737void kill_litter_super(struct super_block *sb);
18738void deactivate_super(struct super_block *sb);
18739void deactivate_locked_super(struct super_block *sb);
18740int set_anon_super(struct super_block *s, void *data);
18741struct super_block *sget(struct file_system_type *type,
18742 int (*test)(struct super_block *,void *),
18743 int (*set)(struct super_block *,void *),
18744 void *data);
18745extern struct dentry *mount_pseudo(struct file_system_type *, char *,
18746 const struct super_operations *ops,
18747 const struct dentry_operations *dops,
18748 unsigned long);
18749static inline __attribute__((always_inline)) void sb_mark_dirty(struct super_block *sb)
18750{
18751 sb->s_dirt = 1;
18752}
18753static inline __attribute__((always_inline)) void sb_mark_clean(struct super_block *sb)
18754{
18755 sb->s_dirt = 0;
18756}
18757static inline __attribute__((always_inline)) int sb_is_dirty(struct super_block *sb)
18758{
18759 return sb->s_dirt;
18760}
18761extern int register_filesystem(struct file_system_type *);
18762extern int unregister_filesystem(struct file_system_type *);
18763extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
18764extern int may_umount_tree(struct vfsmount *);
18765extern int may_umount(struct vfsmount *);
18766extern long do_mount(char *, char *, char *, unsigned long, void *);
18767extern struct vfsmount *collect_mounts(struct path *);
18768extern void drop_collected_mounts(struct vfsmount *);
18769extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
18770 struct vfsmount *);
18771extern int vfs_statfs(struct path *, struct kstatfs *);
18772extern int user_statfs(const char *, struct kstatfs *);
18773extern int fd_statfs(int, struct kstatfs *);
18774extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
18775extern int freeze_super(struct super_block *super);
18776extern int thaw_super(struct super_block *super);
18777extern int current_umask(void);
18778extern struct kobject *fs_kobj;
18779extern int rw_verify_area(int, struct file *, loff_t *, size_t);
18780extern int locks_mandatory_locked(struct inode *);
18781extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t);
18782static inline __attribute__((always_inline)) int __mandatory_lock(struct inode *ino)
18783{
18784 return (ino->i_mode & (0002000 | 00010)) == 0002000;
18785}
18786static inline __attribute__((always_inline)) int mandatory_lock(struct inode *ino)
18787{
18788 return ((ino)->i_sb->s_flags & (64)) && __mandatory_lock(ino);
18789}
18790static inline __attribute__((always_inline)) int locks_verify_locked(struct inode *inode)
18791{
18792 if (__builtin_constant_p(((mandatory_lock(inode)))) ? !!((mandatory_lock(inode))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/fs.h", .line = 1923, }; ______r = !!((mandatory_lock(inode))); ______f.miss_hit[______r]++; ______r; }))
18793 return locks_mandatory_locked(inode);
18794 return 0;
18795}
18796static inline __attribute__((always_inline)) int locks_verify_truncate(struct inode *inode,
18797 struct file *filp,
18798 loff_t size)
18799{
18800 if (__builtin_constant_p(((inode->i_flock && mandatory_lock(inode)))) ? !!((inode->i_flock && mandatory_lock(inode))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/fs.h", .line = 1932, }; ______r = !!((inode->i_flock && mandatory_lock(inode))); ______f.miss_hit[______r]++; ______r; }))
18801 return locks_mandatory_area(
18802 2, inode, filp,
18803 size < inode->i_size ? size : inode->i_size,
18804 (size < inode->i_size ? inode->i_size - size
18805 : size - inode->i_size)
18806 );
18807 return 0;
18808}
18809static inline __attribute__((always_inline)) int break_lease(struct inode *inode, unsigned int mode)
18810{
18811 if (__builtin_constant_p(((inode->i_flock))) ? !!((inode->i_flock)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/fs.h", .line = 1944, }; ______r = !!((inode->i_flock)); ______f.miss_hit[______r]++; ______r; }))
18812 return __break_lease(inode, mode);
18813 return 0;
18814}
18815extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
18816 struct file *filp);
18817extern int do_fallocate(struct file *file, int mode, loff_t offset,
18818 loff_t len);
18819extern long do_sys_open(int dfd, const char *filename, int flags,
18820 int mode);
18821extern struct file *filp_open(const char *, int, int);
18822extern struct file *file_open_root(struct dentry *, struct vfsmount *,
18823 const char *, int);
18824extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
18825 const struct cred *);
18826extern int filp_close(struct file *, fl_owner_t id);
18827extern char * getname(const char *);
18828extern int ioctl_preallocate(struct file *filp, void *argp);
18829extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vfs_caches_init_early(void);
18830extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vfs_caches_init(unsigned long);
18831extern struct kmem_cache *names_cachep;
18832extern void putname(const char *name);
18833extern int register_blkdev(unsigned int, const char *);
18834extern void unregister_blkdev(unsigned int, const char *);
18835extern struct block_device *bdget(dev_t);
18836extern struct block_device *bdgrab(struct block_device *bdev);
18837extern void bd_set_size(struct block_device *, loff_t size);
18838extern void bd_forget(struct inode *inode);
18839extern void bdput(struct block_device *);
18840extern void invalidate_bdev(struct block_device *);
18841extern int sync_blockdev(struct block_device *bdev);
18842extern struct super_block *freeze_bdev(struct block_device *);
18843extern void emergency_thaw_all(void);
18844extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
18845extern int fsync_bdev(struct block_device *);
18846extern int sync_filesystem(struct super_block *);
18847extern const struct file_operations def_blk_fops;
18848extern const struct file_operations def_chr_fops;
18849extern const struct file_operations bad_sock_fops;
18850extern const struct file_operations def_fifo_fops;
18851extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
18852extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
18853extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
18854extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
18855extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
18856 void *holder);
18857extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
18858 void *holder);
18859extern int blkdev_put(struct block_device *bdev, fmode_t mode);
18860extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
18861extern void bd_unlink_disk_holder(struct block_device *bdev,
18862 struct gendisk *disk);
18863extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
18864extern int register_chrdev_region(dev_t, unsigned, const char *);
18865extern int __register_chrdev(unsigned int major, unsigned int baseminor,
18866 unsigned int count, const char *name,
18867 const struct file_operations *fops);
18868extern void __unregister_chrdev(unsigned int major, unsigned int baseminor,
18869 unsigned int count, const char *name);
18870extern void unregister_chrdev_region(dev_t, unsigned);
18871extern void chrdev_show(struct seq_file *,off_t);
18872static inline __attribute__((always_inline)) int register_chrdev(unsigned int major, const char *name,
18873 const struct file_operations *fops)
18874{
18875 return __register_chrdev(major, 0, 256, name, fops);
18876}
18877static inline __attribute__((always_inline)) void unregister_chrdev(unsigned int major, const char *name)
18878{
18879 __unregister_chrdev(major, 0, 256, name);
18880}
18881extern const char *__bdevname(dev_t, char *buffer);
18882extern const char *bdevname(struct block_device *bdev, char *buffer);
18883extern struct block_device *lookup_bdev(const char *);
18884extern void blkdev_show(struct seq_file *,off_t);
18885extern void init_special_inode(struct inode *, umode_t, dev_t);
18886extern void make_bad_inode(struct inode *);
18887extern int is_bad_inode(struct inode *);
18888extern const struct file_operations read_pipefifo_fops;
18889extern const struct file_operations write_pipefifo_fops;
18890extern const struct file_operations rdwr_pipefifo_fops;
18891extern int fs_may_remount_ro(struct super_block *);
18892extern void check_disk_size_change(struct gendisk *disk,
18893 struct block_device *bdev);
18894extern int revalidate_disk(struct gendisk *);
18895extern int check_disk_change(struct block_device *);
18896extern int __invalidate_device(struct block_device *, bool);
18897extern int invalidate_partition(struct gendisk *, int);
18898unsigned long invalidate_mapping_pages(struct address_space *mapping,
18899 unsigned long start, unsigned long end);
18900static inline __attribute__((always_inline)) void invalidate_remote_inode(struct inode *inode)
18901{
18902 if (__builtin_constant_p((((((inode->i_mode) & 00170000) == 0100000) || (((inode->i_mode) & 00170000) == 0040000) || (((inode->i_mode) & 00170000) == 0120000)))) ? !!(((((inode->i_mode) & 00170000) == 0100000) || (((inode->i_mode) & 00170000) == 0040000) || (((inode->i_mode) & 00170000) == 0120000))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
18903 "include/linux/fs.h"
18904 , .line =
18905 2159
18906 , }; ______r = !!(((((inode->i_mode) & 00170000) == 0100000) || (((inode->i_mode) & 00170000) == 0040000) || (((inode->i_mode) & 00170000) == 0120000))); ______f.miss_hit[______r]++; ______r; }))
18907 invalidate_mapping_pages(inode->i_mapping, 0, -1);
18908}
18909extern int invalidate_inode_pages2(struct address_space *mapping);
18910extern int invalidate_inode_pages2_range(struct address_space *mapping,
18911 unsigned long start, unsigned long end);
18912extern int write_inode_now(struct inode *, int);
18913extern int filemap_fdatawrite(struct address_space *);
18914extern int filemap_flush(struct address_space *);
18915extern int filemap_fdatawait(struct address_space *);
18916extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
18917 loff_t lend);
18918extern int filemap_write_and_wait(struct address_space *mapping);
18919extern int filemap_write_and_wait_range(struct address_space *mapping,
18920 loff_t lstart, loff_t lend);
18921extern int __filemap_fdatawrite_range(struct address_space *mapping,
18922 loff_t start, loff_t end, int sync_mode);
18923extern int filemap_fdatawrite_range(struct address_space *mapping,
18924 loff_t start, loff_t end);
18925extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
18926 int datasync);
18927extern int vfs_fsync(struct file *file, int datasync);
18928extern int generic_write_sync(struct file *file, loff_t pos, loff_t count);
18929extern void sync_supers(void);
18930extern void emergency_sync(void);
18931extern void emergency_remount(void);
18932extern sector_t bmap(struct inode *, sector_t);
18933extern int notify_change(struct dentry *, struct iattr *);
18934extern int inode_permission(struct inode *, int);
18935extern int generic_permission(struct inode *, int, unsigned int,
18936 int (*check_acl)(struct inode *, int, unsigned int));
18937static inline __attribute__((always_inline)) bool execute_ok(struct inode *inode)
18938{
18939 return (inode->i_mode & (00100|00010|00001)) || (((inode->i_mode) & 00170000) == 0040000);
18940}
18941extern int get_write_access(struct inode *);
18942extern int deny_write_access(struct file *);
18943static inline __attribute__((always_inline)) void put_write_access(struct inode * inode)
18944{
18945 atomic_dec(&inode->i_writecount);
18946}
18947static inline __attribute__((always_inline)) void allow_write_access(struct file *file)
18948{
18949 if (__builtin_constant_p(((file))) ? !!((file)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/fs.h", .line = 2207, }; ______r = !!((file)); ______f.miss_hit[______r]++; ______r; }))
18950 atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
18951}
18952static inline __attribute__((always_inline)) void i_readcount_dec(struct inode *inode)
18953{
18954 return;
18955}
18956static inline __attribute__((always_inline)) void i_readcount_inc(struct inode *inode)
18957{
18958 return;
18959}
18960extern int do_pipe_flags(int *, int);
18961extern struct file *create_read_pipe(struct file *f, int flags);
18962extern struct file *create_write_pipe(int flags);
18963extern void free_write_pipe(struct file *);
18964extern int kernel_read(struct file *, loff_t, char *, unsigned long);
18965extern struct file * open_exec(const char *);
18966extern int is_subdir(struct dentry *, struct dentry *);
18967extern int path_is_under(struct path *, struct path *);
18968extern ino_t find_inode_number(struct dentry *, struct qstr *);
18969extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
18970extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
18971extern int inode_init_always(struct super_block *, struct inode *);
18972extern void inode_init_once(struct inode *);
18973extern void address_space_init_once(struct address_space *mapping);
18974extern void ihold(struct inode * inode);
18975extern void iput(struct inode *);
18976extern struct inode * igrab(struct inode *);
18977extern ino_t iunique(struct super_block *, ino_t);
18978extern int inode_needs_sync(struct inode *inode);
18979extern int generic_delete_inode(struct inode *inode);
18980extern int generic_drop_inode(struct inode *inode);
18981extern struct inode *ilookup5_nowait(struct super_block *sb,
18982 unsigned long hashval, int (*test)(struct inode *, void *),
18983 void *data);
18984extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
18985 int (*test)(struct inode *, void *), void *data);
18986extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
18987extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
18988extern struct inode * iget_locked(struct super_block *, unsigned long);
18989extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
18990extern int insert_inode_locked(struct inode *);
18991extern void unlock_new_inode(struct inode *);
18992extern unsigned int get_next_ino(void);
18993extern void __iget(struct inode * inode);
18994extern void iget_failed(struct inode *);
18995extern void end_writeback(struct inode *);
18996extern void __destroy_inode(struct inode *);
18997extern struct inode *new_inode(struct super_block *);
18998extern void free_inode_nonrcu(struct inode *inode);
18999extern int should_remove_suid(struct dentry *);
19000extern int file_remove_suid(struct file *);
19001extern void __insert_inode_hash(struct inode *, unsigned long hashval);
19002extern void remove_inode_hash(struct inode *);
19003static inline __attribute__((always_inline)) void insert_inode_hash(struct inode *inode)
19004{
19005 __insert_inode_hash(inode, inode->i_ino);
19006}
19007extern void inode_sb_list_add(struct inode *inode);
19008extern void submit_bio(int, struct bio *);
19009extern int bdev_read_only(struct block_device *);
19010extern int set_blocksize(struct block_device *, int);
19011extern int sb_set_blocksize(struct super_block *, int);
19012extern int sb_min_blocksize(struct super_block *, int);
19013extern int generic_file_mmap(struct file *, struct vm_area_struct *);
19014extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
19015extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
19016int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
19017extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
19018extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long,
19019 loff_t *);
19020extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
19021extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *,
19022 unsigned long *, loff_t, loff_t *, size_t, size_t);
19023extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *,
19024 unsigned long, loff_t, loff_t *, size_t, ssize_t);
19025extern ssize_t do_sync_read(struct file *filp, char *buf, size_t len, loff_t *ppos);
19026extern ssize_t do_sync_write(struct file *filp, const char *buf, size_t len, loff_t *ppos);
19027extern int generic_segment_checks(const struct iovec *iov,
19028 unsigned long *nr_segs, size_t *count, int access_flags);
19029extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
19030 unsigned long nr_segs, loff_t pos);
19031extern int blkdev_fsync(struct file *filp, int datasync);
19032extern ssize_t generic_file_splice_read(struct file *, loff_t *,
19033 struct pipe_inode_info *, size_t, unsigned int);
19034extern ssize_t default_file_splice_read(struct file *, loff_t *,
19035 struct pipe_inode_info *, size_t, unsigned int);
19036extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
19037 struct file *, loff_t *, size_t, unsigned int);
19038extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
19039 struct file *out, loff_t *, size_t len, unsigned int flags);
19040extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
19041 size_t len, unsigned int flags);
19042extern void
19043file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
19044extern loff_t noop_llseek(struct file *file, loff_t offset, int origin);
19045extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
19046extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
19047extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset,
19048 int origin);
19049extern int generic_file_open(struct inode * inode, struct file * filp);
19050extern int nonseekable_open(struct inode * inode, struct file * filp);
19051static inline __attribute__((always_inline)) int xip_truncate_page(struct address_space *mapping, loff_t from)
19052{
19053 return 0;
19054}
19055typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
19056 loff_t file_offset);
19057enum {
19058 DIO_LOCKING = 0x01,
19059 DIO_SKIP_HOLES = 0x02,
19060};
19061void dio_end_io(struct bio *bio, int error);
19062ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
19063 struct block_device *bdev, const struct iovec *iov, loff_t offset,
19064 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
19065 dio_submit_t submit_io, int flags);
19066static inline __attribute__((always_inline)) ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
19067 struct inode *inode, struct block_device *bdev, const struct iovec *iov,
19068 loff_t offset, unsigned long nr_segs, get_block_t get_block,
19069 dio_iodone_t end_io)
19070{
19071 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
19072 nr_segs, get_block, end_io, ((void *)0),
19073 DIO_LOCKING | DIO_SKIP_HOLES);
19074}
19075extern const struct file_operations generic_ro_fops;
19076extern int vfs_readlink(struct dentry *, char *, int, const char *);
19077extern int vfs_follow_link(struct nameidata *, const char *);
19078extern int page_readlink(struct dentry *, char *, int);
19079extern void *page_follow_link_light(struct dentry *, struct nameidata *);
19080extern void page_put_link(struct dentry *, struct nameidata *, void *);
19081extern int __page_symlink(struct inode *inode, const char *symname, int len,
19082 int nofs);
19083extern int page_symlink(struct inode *inode, const char *symname, int len);
19084extern const struct inode_operations page_symlink_inode_operations;
19085extern int generic_readlink(struct dentry *, char *, int);
19086extern void generic_fillattr(struct inode *, struct kstat *);
19087extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
19088void __inode_add_bytes(struct inode *inode, loff_t bytes);
19089void inode_add_bytes(struct inode *inode, loff_t bytes);
19090void inode_sub_bytes(struct inode *inode, loff_t bytes);
19091loff_t inode_get_bytes(struct inode *inode);
19092void inode_set_bytes(struct inode *inode, loff_t bytes);
19093extern int vfs_readdir(struct file *, filldir_t, void *);
19094extern int vfs_stat(const char *, struct kstat *);
19095extern int vfs_lstat(const char *, struct kstat *);
19096extern int vfs_fstat(unsigned int, struct kstat *);
19097extern int vfs_fstatat(int , const char *, struct kstat *, int);
19098extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
19099 unsigned long arg);
19100extern int __generic_block_fiemap(struct inode *inode,
19101 struct fiemap_extent_info *fieinfo,
19102 loff_t start, loff_t len,
19103 get_block_t *get_block);
19104extern int generic_block_fiemap(struct inode *inode,
19105 struct fiemap_extent_info *fieinfo, u64 start,
19106 u64 len, get_block_t *get_block);
19107extern void get_filesystem(struct file_system_type *fs);
19108extern void put_filesystem(struct file_system_type *fs);
19109extern struct file_system_type *get_fs_type(const char *name);
19110extern struct super_block *get_super(struct block_device *);
19111extern struct super_block *get_active_super(struct block_device *bdev);
19112extern struct super_block *user_get_super(dev_t);
19113extern void drop_super(struct super_block *sb);
19114extern void iterate_supers(void (*)(struct super_block *, void *), void *);
19115extern int dcache_dir_open(struct inode *, struct file *);
19116extern int dcache_dir_close(struct inode *, struct file *);
19117extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
19118extern int dcache_readdir(struct file *, void *, filldir_t);
19119extern int simple_setattr(struct dentry *, struct iattr *);
19120extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
19121extern int simple_statfs(struct dentry *, struct kstatfs *);
19122extern int simple_link(struct dentry *, struct inode *, struct dentry *);
19123extern int simple_unlink(struct inode *, struct dentry *);
19124extern int simple_rmdir(struct inode *, struct dentry *);
19125extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
19126extern int noop_fsync(struct file *, int);
19127extern int simple_empty(struct dentry *);
19128extern int simple_readpage(struct file *file, struct page *page);
19129extern int simple_write_begin(struct file *file, struct address_space *mapping,
19130 loff_t pos, unsigned len, unsigned flags,
19131 struct page **pagep, void **fsdata);
19132extern int simple_write_end(struct file *file, struct address_space *mapping,
19133 loff_t pos, unsigned len, unsigned copied,
19134 struct page *page, void *fsdata);
19135extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *);
19136extern ssize_t generic_read_dir(struct file *, char *, size_t, loff_t *);
19137extern const struct file_operations simple_dir_operations;
19138extern const struct inode_operations simple_dir_inode_operations;
19139struct tree_descr { char *name; const struct file_operations *ops; int mode; };
19140struct dentry *d_alloc_name(struct dentry *, const char *);
19141extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *);
19142extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
19143extern void simple_release_fs(struct vfsmount **mount, int *count);
19144extern ssize_t simple_read_from_buffer(void *to, size_t count,
19145 loff_t *ppos, const void *from, size_t available);
19146extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
19147 const void *from, size_t count);
19148extern int generic_file_fsync(struct file *, int);
19149extern int generic_check_addressable(unsigned, u64);
19150extern int buffer_migrate_page(struct address_space *,
19151 struct page *, struct page *);
19152extern int inode_change_ok(const struct inode *, struct iattr *);
19153extern int inode_newsize_ok(const struct inode *, loff_t offset);
19154extern void setattr_copy(struct inode *inode, const struct iattr *attr);
19155extern void file_update_time(struct file *file);
19156extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt);
19157extern void save_mount_options(struct super_block *sb, char *options);
19158extern void replace_mount_options(struct super_block *sb, char *options);
19159static inline __attribute__((always_inline)) ino_t parent_ino(struct dentry *dentry)
19160{
19161 ino_t res;
19162 spin_lock(&dentry->d_lock);
19163 res = dentry->d_parent->d_inode->i_ino;
19164 spin_unlock(&dentry->d_lock);
19165 return res;
19166}
19167struct simple_transaction_argresp {
19168 ssize_t size;
19169 char data[0];
19170};
19171char *simple_transaction_get(struct file *file, const char *buf,
19172 size_t size);
19173ssize_t simple_transaction_read(struct file *file, char *buf,
19174 size_t size, loff_t *pos);
19175int simple_transaction_release(struct inode *inode, struct file *file);
19176void simple_transaction_set(struct file *file, size_t n);
19177static inline __attribute__((always_inline)) void __attribute__((format(printf, 1, 2)))
19178__simple_attr_check_format(const char *fmt, ...)
19179{
19180}
19181int simple_attr_open(struct inode *inode, struct file *file,
19182 int (*get)(void *, u64 *), int (*set)(void *, u64),
19183 const char *fmt);
19184int simple_attr_release(struct inode *inode, struct file *file);
19185ssize_t simple_attr_read(struct file *file, char *buf,
19186 size_t len, loff_t *ppos);
19187ssize_t simple_attr_write(struct file *file, const char *buf,
19188 size_t len, loff_t *ppos);
19189struct ctl_table;
19190int proc_nr_files(struct ctl_table *table, int write,
19191 void *buffer, size_t *lenp, loff_t *ppos);
19192int proc_nr_dentry(struct ctl_table *table, int write,
19193 void *buffer, size_t *lenp, loff_t *ppos);
19194int proc_nr_inodes(struct ctl_table *table, int write,
19195 void *buffer, size_t *lenp, loff_t *ppos);
19196int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) get_filesystem_list(char *buf);
19197static inline __attribute__((always_inline)) int is_sxid(mode_t mode)
19198{
19199 return (mode & 0004000) || ((mode & 0002000) && (mode & 00010));
19200}
19201static inline __attribute__((always_inline)) void inode_has_no_xattr(struct inode *inode)
19202{
19203 if (__builtin_constant_p(((!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & (1<<28))))) ? !!((!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & (1<<28)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/fs.h", .line = 2597, }; ______r = !!((!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & (1<<28)))); ______f.miss_hit[______r]++; ______r; }))
19204 inode->i_flags |= 4096;
19205}
19206struct range {
19207 u64 start;
19208 u64 end;
19209};
19210int add_range(struct range *range, int az, int nr_range,
19211 u64 start, u64 end);
19212int add_range_with_merge(struct range *range, int az, int nr_range,
19213 u64 start, u64 end);
19214void subtract_range(struct range *range, int az, u64 start, u64 end);
19215int clean_sort_range(struct range *range, int az);
19216void sort_range(struct range *range, int nr_range);
19217static inline __attribute__((always_inline)) resource_size_t cap_resource(u64 val)
19218{
19219 if (__builtin_constant_p(((val > ((resource_size_t)~0)))) ? !!((val > ((resource_size_t)~0))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/range.h", .line = 25, }; ______r = !!((val > ((resource_size_t)~0))); ______f.miss_hit[______r]++; ______r; }))
19220 return ((resource_size_t)~0);
19221 return val;
19222}
19223struct mempolicy;
19224struct anon_vma;
19225struct file_ra_state;
19226struct user_struct;
19227struct writeback_control;
19228extern unsigned long max_mapnr;
19229extern unsigned long num_physpages;
19230extern unsigned long totalram_pages;
19231extern void * high_memory;
19232extern int page_cluster;
19233extern int sysctl_legacy_va_layout;
19234extern unsigned long empty_zero_page[((1UL) << 12) / sizeof(unsigned long)];
19235extern spinlock_t pgd_lock;
19236extern struct list_head pgd_list;
19237extern struct mm_struct *pgd_page_get_mm(struct page *page);
19238static inline __attribute__((always_inline)) int pte_dirty(pte_t pte)
19239{
19240 return pte_flags(pte) & (((pteval_t)(1)) << 6);
19241}
19242static inline __attribute__((always_inline)) int pte_young(pte_t pte)
19243{
19244 return pte_flags(pte) & (((pteval_t)(1)) << 5);
19245}
19246static inline __attribute__((always_inline)) int pmd_young(pmd_t pmd)
19247{
19248 return pmd_flags(pmd) & (((pteval_t)(1)) << 5);
19249}
19250static inline __attribute__((always_inline)) int pte_write(pte_t pte)
19251{
19252 return pte_flags(pte) & (((pteval_t)(1)) << 1);
19253}
19254static inline __attribute__((always_inline)) int pte_file(pte_t pte)
19255{
19256 return pte_flags(pte) & (((pteval_t)(1)) << 6);
19257}
19258static inline __attribute__((always_inline)) int pte_huge(pte_t pte)
19259{
19260 return pte_flags(pte) & (((pteval_t)(1)) << 7);
19261}
19262static inline __attribute__((always_inline)) int pte_global(pte_t pte)
19263{
19264 return pte_flags(pte) & (((pteval_t)(1)) << 8);
19265}
19266static inline __attribute__((always_inline)) int pte_exec(pte_t pte)
19267{
19268 return !(pte_flags(pte) & (((pteval_t)(1)) << 63));
19269}
19270static inline __attribute__((always_inline)) int pte_special(pte_t pte)
19271{
19272 return pte_flags(pte) & (((pteval_t)(1)) << 9);
19273}
19274static inline __attribute__((always_inline)) unsigned long pte_pfn(pte_t pte)
19275{
19276 return (pte_val(pte) & ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1))))) >> 12;
19277}
19278static inline __attribute__((always_inline)) unsigned long pmd_pfn(pmd_t pmd)
19279{
19280 return (pmd_val(pmd) & ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1))))) >> 12;
19281}
19282static inline __attribute__((always_inline)) int pmd_large(pmd_t pte)
19283{
19284 return (pmd_flags(pte) & ((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 0))) ==
19285 ((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 0));
19286}
19287static inline __attribute__((always_inline)) int pmd_trans_splitting(pmd_t pmd)
19288{
19289 return pmd_val(pmd) & (((pteval_t)(1)) << 9);
19290}
19291static inline __attribute__((always_inline)) int pmd_trans_huge(pmd_t pmd)
19292{
19293 return pmd_val(pmd) & (((pteval_t)(1)) << 7);
19294}
19295static inline __attribute__((always_inline)) int has_transparent_hugepage(void)
19296{
19297 return (__builtin_constant_p((0*32+ 3)) && ( ((((0*32+ 3))>>5)==0 && (1UL<<(((0*32+ 3))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+ 3))>>5)==1 && (1UL<<(((0*32+ 3))&31) & (0|0))) || ((((0*32+ 3))>>5)==2 && (1UL<<(((0*32+ 3))&31) & 0)) || ((((0*32+ 3))>>5)==3 && (1UL<<(((0*32+ 3))&31) & (0))) || ((((0*32+ 3))>>5)==4 && (1UL<<(((0*32+ 3))&31) & 0)) || ((((0*32+ 3))>>5)==5 && (1UL<<(((0*32+ 3))&31) & 0)) || ((((0*32+ 3))>>5)==6 && (1UL<<(((0*32+ 3))&31) & 0)) || ((((0*32+ 3))>>5)==7 && (1UL<<(((0*32+ 3))&31) & 0)) || ((((0*32+ 3))>>5)==8 && (1UL<<(((0*32+ 3))&31) & 0)) || ((((0*32+ 3))>>5)==9 && (1UL<<(((0*32+ 3))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+ 3))) ? constant_test_bit(((0*32+ 3)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+ 3)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))));
19298}
19299static inline __attribute__((always_inline)) pte_t pte_set_flags(pte_t pte, pteval_t set)
19300{
19301 pteval_t v = native_pte_val(pte);
19302 return native_make_pte(v | set);
19303}
19304static inline __attribute__((always_inline)) pte_t pte_clear_flags(pte_t pte, pteval_t clear)
19305{
19306 pteval_t v = native_pte_val(pte);
19307 return native_make_pte(v & ~clear);
19308}
19309static inline __attribute__((always_inline)) pte_t pte_mkclean(pte_t pte)
19310{
19311 return pte_clear_flags(pte, (((pteval_t)(1)) << 6));
19312}
19313static inline __attribute__((always_inline)) pte_t pte_mkold(pte_t pte)
19314{
19315 return pte_clear_flags(pte, (((pteval_t)(1)) << 5));
19316}
19317static inline __attribute__((always_inline)) pte_t pte_wrprotect(pte_t pte)
19318{
19319 return pte_clear_flags(pte, (((pteval_t)(1)) << 1));
19320}
19321static inline __attribute__((always_inline)) pte_t pte_mkexec(pte_t pte)
19322{
19323 return pte_clear_flags(pte, (((pteval_t)(1)) << 63));
19324}
19325static inline __attribute__((always_inline)) pte_t pte_mkdirty(pte_t pte)
19326{
19327 return pte_set_flags(pte, (((pteval_t)(1)) << 6));
19328}
19329static inline __attribute__((always_inline)) pte_t pte_mkyoung(pte_t pte)
19330{
19331 return pte_set_flags(pte, (((pteval_t)(1)) << 5));
19332}
19333static inline __attribute__((always_inline)) pte_t pte_mkwrite(pte_t pte)
19334{
19335 return pte_set_flags(pte, (((pteval_t)(1)) << 1));
19336}
19337static inline __attribute__((always_inline)) pte_t pte_mkhuge(pte_t pte)
19338{
19339 return pte_set_flags(pte, (((pteval_t)(1)) << 7));
19340}
19341static inline __attribute__((always_inline)) pte_t pte_clrhuge(pte_t pte)
19342{
19343 return pte_clear_flags(pte, (((pteval_t)(1)) << 7));
19344}
19345static inline __attribute__((always_inline)) pte_t pte_mkglobal(pte_t pte)
19346{
19347 return pte_set_flags(pte, (((pteval_t)(1)) << 8));
19348}
19349static inline __attribute__((always_inline)) pte_t pte_clrglobal(pte_t pte)
19350{
19351 return pte_clear_flags(pte, (((pteval_t)(1)) << 8));
19352}
19353static inline __attribute__((always_inline)) pte_t pte_mkspecial(pte_t pte)
19354{
19355 return pte_set_flags(pte, (((pteval_t)(1)) << 9));
19356}
19357static inline __attribute__((always_inline)) pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
19358{
19359 pmdval_t v = native_pmd_val(pmd);
19360 return __pmd(v | set);
19361}
19362static inline __attribute__((always_inline)) pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
19363{
19364 pmdval_t v = native_pmd_val(pmd);
19365 return __pmd(v & ~clear);
19366}
19367static inline __attribute__((always_inline)) pmd_t pmd_mkold(pmd_t pmd)
19368{
19369 return pmd_clear_flags(pmd, (((pteval_t)(1)) << 5));
19370}
19371static inline __attribute__((always_inline)) pmd_t pmd_wrprotect(pmd_t pmd)
19372{
19373 return pmd_clear_flags(pmd, (((pteval_t)(1)) << 1));
19374}
19375static inline __attribute__((always_inline)) pmd_t pmd_mkdirty(pmd_t pmd)
19376{
19377 return pmd_set_flags(pmd, (((pteval_t)(1)) << 6));
19378}
19379static inline __attribute__((always_inline)) pmd_t pmd_mkhuge(pmd_t pmd)
19380{
19381 return pmd_set_flags(pmd, (((pteval_t)(1)) << 7));
19382}
19383static inline __attribute__((always_inline)) pmd_t pmd_mkyoung(pmd_t pmd)
19384{
19385 return pmd_set_flags(pmd, (((pteval_t)(1)) << 5));
19386}
19387static inline __attribute__((always_inline)) pmd_t pmd_mkwrite(pmd_t pmd)
19388{
19389 return pmd_set_flags(pmd, (((pteval_t)(1)) << 1));
19390}
19391static inline __attribute__((always_inline)) pmd_t pmd_mknotpresent(pmd_t pmd)
19392{
19393 return pmd_clear_flags(pmd, (((pteval_t)(1)) << 0));
19394}
19395static inline __attribute__((always_inline)) pgprotval_t massage_pgprot(pgprot_t pgprot)
19396{
19397 pgprotval_t protval = ((pgprot).pgprot);
19398 if (__builtin_constant_p(((protval & (((pteval_t)(1)) << 0)))) ? !!((protval & (((pteval_t)(1)) << 0))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/pgtable.h", .line = 301, }; ______r = !!((protval & (((pteval_t)(1)) << 0))); ______f.miss_hit[______r]++; ______r; }))
19399 protval &= __supported_pte_mask;
19400 return protval;
19401}
19402static inline __attribute__((always_inline)) pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
19403{
19404 return __pte(((phys_addr_t)page_nr << 12) |
19405 massage_pgprot(pgprot));
19406}
19407static inline __attribute__((always_inline)) pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
19408{
19409 return __pmd(((phys_addr_t)page_nr << 12) |
19410 massage_pgprot(pgprot));
19411}
19412static inline __attribute__((always_inline)) pte_t pte_modify(pte_t pte, pgprot_t newprot)
19413{
19414 pteval_t val = pte_val(pte);
19415 val &= (((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6));
19416 val |= massage_pgprot(newprot) & ~(((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6));
19417 return __pte(val);
19418}
19419static inline __attribute__((always_inline)) pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
19420{
19421 pmdval_t val = pmd_val(pmd);
19422 val &= ((((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6)) | (((pteval_t)(1)) << 7));
19423 val |= massage_pgprot(newprot) & ~((((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6)) | (((pteval_t)(1)) << 7));
19424 return __pmd(val);
19425}
19426static inline __attribute__((always_inline)) pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
19427{
19428 pgprotval_t preservebits = ((oldprot).pgprot) & (((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))) | (((pteval_t)(1)) << 4) | (((pteval_t)(1)) << 3) | (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6));
19429 pgprotval_t addbits = ((newprot).pgprot);
19430 return ((pgprot_t) { (preservebits | addbits) } );
19431}
19432static inline __attribute__((always_inline)) int is_new_memtype_allowed(u64 paddr, unsigned long size,
19433 unsigned long flags,
19434 unsigned long new_flags)
19435{
19436 if (__builtin_constant_p(((x86_platform.is_untracked_pat_range(paddr, paddr + size)))) ? !!((x86_platform.is_untracked_pat_range(paddr, paddr + size))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/pgtable.h", .line = 363, }; ______r = !!((x86_platform.is_untracked_pat_range(paddr, paddr + size))); ______f.miss_hit[______r]++; ______r; }))
19437 return 1;
19438 if (__builtin_constant_p((((flags == ((((pteval_t)(1)) << 4)) && new_flags == (0)) || (flags == ((((pteval_t)(1)) << 3)) && new_flags == (0))))) ? !!(((flags == ((((pteval_t)(1)) << 4)) && new_flags == (0)) || (flags == ((((pteval_t)(1)) << 3)) && new_flags == (0)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
19439 "/data/exp/linux-3.0.4/arch/x86/include/asm/pgtable.h"
19440 , .line =
19441 375
19442 , }; ______r = !!(((flags == ((((pteval_t)(1)) << 4)) && new_flags == (0)) || (flags == ((((pteval_t)(1)) << 3)) && new_flags == (0)))); ______f.miss_hit[______r]++; ______r; }))
19443 {
19444 return 0;
19445 }
19446 return 1;
19447}
19448pmd_t *populate_extra_pmd(unsigned long vaddr);
19449pte_t *populate_extra_pte(unsigned long vaddr);
19450struct mm_struct;
19451struct vm_area_struct;
19452extern pgd_t swapper_pg_dir[1024];
19453extern pgd_t initial_page_table[1024];
19454static inline __attribute__((always_inline)) void pgtable_cache_init(void) { }
19455static inline __attribute__((always_inline)) void check_pgt_cache(void) { }
19456void paging_init(void);
19457extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
19458static inline __attribute__((always_inline)) void native_set_pte(pte_t *ptep, pte_t pte)
19459{
19460 ptep->pte_high = pte.pte_high;
19461 __asm__ __volatile__("": : :"memory");
19462 ptep->pte_low = pte.pte_low;
19463}
19464static inline __attribute__((always_inline)) void native_set_pte_atomic(pte_t *ptep, pte_t pte)
19465{
19466 set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
19467}
19468static inline __attribute__((always_inline)) void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
19469{
19470 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
19471}
19472static inline __attribute__((always_inline)) void native_set_pud(pud_t *pudp, pud_t pud)
19473{
19474 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
19475}
19476static inline __attribute__((always_inline)) void native_pte_clear(struct mm_struct *mm, unsigned long addr,
19477 pte_t *ptep)
19478{
19479 ptep->pte_low = 0;
19480 __asm__ __volatile__("": : :"memory");
19481 ptep->pte_high = 0;
19482}
19483static inline __attribute__((always_inline)) void native_pmd_clear(pmd_t *pmd)
19484{
19485 u32 *tmp = (u32 *)pmd;
19486 *tmp = 0;
19487 __asm__ __volatile__("": : :"memory");
19488 *(tmp + 1) = 0;
19489}
19490static inline __attribute__((always_inline)) void pud_clear(pud_t *pudp)
19491{
19492 set_pud(pudp, ((pud_t) { __pgd(0) } ));
19493}
19494static inline __attribute__((always_inline)) pte_t native_ptep_get_and_clear(pte_t *ptep)
19495{
19496 pte_t res;
19497 res.pte_low = ({ __typeof(*((&ptep->pte_low))) __x = ((0)); switch (sizeof(*&ptep->pte_low)) { case 1: { volatile u8 *__ptr = (volatile u8 *)((&ptep->pte_low)); asm volatile("xchgb %0,%1" : "=q" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)((&ptep->pte_low)); asm volatile("xchgw %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)((&ptep->pte_low)); asm volatile("xchgl %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } default: __xchg_wrong_size(); } __x; });
19498 res.pte_high = ptep->pte_high;
19499 ptep->pte_high = 0;
19500 return res;
19501}
19502union split_pmd {
19503 struct {
19504 u32 pmd_low;
19505 u32 pmd_high;
19506 };
19507 pmd_t pmd;
19508};
19509static inline __attribute__((always_inline)) pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
19510{
19511 union split_pmd res, *orig = (union split_pmd *)pmdp;
19512 res.pmd_low = ({ __typeof(*((&orig->pmd_low))) __x = ((0)); switch (sizeof(*&orig->pmd_low)) { case 1: { volatile u8 *__ptr = (volatile u8 *)((&orig->pmd_low)); asm volatile("xchgb %0,%1" : "=q" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)((&orig->pmd_low)); asm volatile("xchgw %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)((&orig->pmd_low)); asm volatile("xchgl %0,%1" : "=r" (__x), "+m" (*__ptr) : "0" (__x) : "memory"); break; } default: __xchg_wrong_size(); } __x; });
19513 res.pmd_high = orig->pmd_high;
19514 orig->pmd_high = 0;
19515 return res.pmd;
19516}
19517static inline __attribute__((always_inline)) int pte_none(pte_t pte)
19518{
19519 return !pte.pte;
19520}
19521static inline __attribute__((always_inline)) int pte_same(pte_t a, pte_t b)
19522{
19523 return a.pte == b.pte;
19524}
19525static inline __attribute__((always_inline)) int pte_present(pte_t a)
19526{
19527 return pte_flags(a) & ((((pteval_t)(1)) << 0) | (((pteval_t)(1)) << 8));
19528}
19529static inline __attribute__((always_inline)) int pte_hidden(pte_t pte)
19530{
19531 return pte_flags(pte) & (((pteval_t)(0)));
19532}
19533static inline __attribute__((always_inline)) int pmd_present(pmd_t pmd)
19534{
19535 return pmd_flags(pmd) & (((pteval_t)(1)) << 0);
19536}
19537static inline __attribute__((always_inline)) int pmd_none(pmd_t pmd)
19538{
19539 return (unsigned long)native_pmd_val(pmd) == 0;
19540}
19541static inline __attribute__((always_inline)) unsigned long pmd_page_vaddr(pmd_t pmd)
19542{
19543 return (unsigned long)((void *)((unsigned long)(pmd_val(pmd) & ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))))+((unsigned long)(0xC0000000UL))));
19544}
19545static inline __attribute__((always_inline)) unsigned long pmd_index(unsigned long address)
19546{
19547 return (address >> 21) & (512 - 1);
19548}
19549static inline __attribute__((always_inline)) unsigned long pte_index(unsigned long address)
19550{
19551 return (address >> 12) & (512 - 1);
19552}
19553static inline __attribute__((always_inline)) pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
19554{
19555 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
19556}
19557static inline __attribute__((always_inline)) int pmd_bad(pmd_t pmd)
19558{
19559 return (pmd_flags(pmd) & ~(((pteval_t)(1)) << 2)) != ((((pteval_t)(1)) << 0) | (((pteval_t)(1)) << 1) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6));
19560}
19561static inline __attribute__((always_inline)) unsigned long pages_to_mb(unsigned long npg)
19562{
19563 return npg >> (20 - 12);
19564}
19565static inline __attribute__((always_inline)) int pud_none(pud_t pud)
19566{
19567 return native_pud_val(pud) == 0;
19568}
19569static inline __attribute__((always_inline)) int pud_present(pud_t pud)
19570{
19571 return pud_flags(pud) & (((pteval_t)(1)) << 0);
19572}
19573static inline __attribute__((always_inline)) unsigned long pud_page_vaddr(pud_t pud)
19574{
19575 return (unsigned long)((void *)((unsigned long)((unsigned long)(pgd_val((pud).pgd)) & ((pteval_t)(((signed long)(~(((1UL) << 12)-1))) & ((phys_addr_t)((1ULL << 44) - 1)))))+((unsigned long)(0xC0000000UL))));
19576}
19577static inline __attribute__((always_inline)) pmd_t *pmd_offset(pud_t *pud, unsigned long address)
19578{
19579 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
19580}
19581static inline __attribute__((always_inline)) int pud_large(pud_t pud)
19582{
19583 return ((pgd_val((pud).pgd)) & ((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 0))) ==
19584 ((((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 0));
19585}
19586static inline __attribute__((always_inline)) int pud_bad(pud_t pud)
19587{
19588 return (pud_flags(pud) & ~(((((pteval_t)(1)) << 0) | (((pteval_t)(1)) << 1) | (((pteval_t)(1)) << 5) | (((pteval_t)(1)) << 6)) | (((pteval_t)(1)) << 2))) != 0;
19589}
19590extern int direct_gbpages;
19591static inline __attribute__((always_inline)) pte_t native_local_ptep_get_and_clear(pte_t *ptep)
19592{
19593 pte_t res = *ptep;
19594 native_pte_clear(((void *)0), 0, ptep);
19595 return res;
19596}
19597static inline __attribute__((always_inline)) pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
19598{
19599 pmd_t res = *pmdp;
19600 native_pmd_clear(pmdp);
19601 return res;
19602}
19603static inline __attribute__((always_inline)) void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
19604 pte_t *ptep , pte_t pte)
19605{
19606 native_set_pte(ptep, pte);
19607}
19608static inline __attribute__((always_inline)) void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
19609 pmd_t *pmdp , pmd_t pmd)
19610{
19611 native_set_pmd(pmdp, pmd);
19612}
19613struct vm_area_struct;
19614extern int ptep_set_access_flags(struct vm_area_struct *vma,
19615 unsigned long address, pte_t *ptep,
19616 pte_t entry, int dirty);
19617extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
19618 unsigned long addr, pte_t *ptep);
19619extern int ptep_clear_flush_young(struct vm_area_struct *vma,
19620 unsigned long address, pte_t *ptep);
19621static inline __attribute__((always_inline)) pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
19622 pte_t *ptep)
19623{
19624 pte_t pte = native_ptep_get_and_clear(ptep);
19625 pte_update(mm, addr, ptep);
19626 return pte;
19627}
19628static inline __attribute__((always_inline)) pte_t ptep_get_and_clear_full(struct mm_struct *mm,
19629 unsigned long addr, pte_t *ptep,
19630 int full)
19631{
19632 pte_t pte;
19633 if (__builtin_constant_p(((full))) ? !!((full)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/pgtable.h", .line = 686, }; ______r = !!((full)); ______f.miss_hit[______r]++; ______r; })) {
19634 pte = native_local_ptep_get_and_clear(ptep);
19635 } else {
19636 pte = ptep_get_and_clear(mm, addr, ptep);
19637 }
19638 return pte;
19639}
19640static inline __attribute__((always_inline)) void ptep_set_wrprotect(struct mm_struct *mm,
19641 unsigned long addr, pte_t *ptep)
19642{
19643 clear_bit(1, (unsigned long *)&ptep->pte);
19644 pte_update(mm, addr, ptep);
19645}
19646extern int pmdp_set_access_flags(struct vm_area_struct *vma,
19647 unsigned long address, pmd_t *pmdp,
19648 pmd_t entry, int dirty);
19649extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
19650 unsigned long addr, pmd_t *pmdp);
19651extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
19652 unsigned long address, pmd_t *pmdp);
19653extern void pmdp_splitting_flush(struct vm_area_struct *vma,
19654 unsigned long addr, pmd_t *pmdp);
19655static inline __attribute__((always_inline)) int pmd_write(pmd_t pmd)
19656{
19657 return pmd_flags(pmd) & (((pteval_t)(1)) << 1);
19658}
19659static inline __attribute__((always_inline)) pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
19660 pmd_t *pmdp)
19661{
19662 pmd_t pmd = native_pmdp_get_and_clear(pmdp);
19663 pmd_update(mm, addr, pmdp);
19664 return pmd;
19665}
19666static inline __attribute__((always_inline)) void pmdp_set_wrprotect(struct mm_struct *mm,
19667 unsigned long addr, pmd_t *pmdp)
19668{
19669 clear_bit(1, (unsigned long *)pmdp);
19670 pmd_update(mm, addr, pmdp);
19671}
19672static inline __attribute__((always_inline)) void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
19673{
19674 __builtin_memcpy(dst, src, count * sizeof(pgd_t));
19675}
19676static inline __attribute__((always_inline)) void pte_clear_not_present_full(struct mm_struct *mm,
19677 unsigned long address,
19678 pte_t *ptep,
19679 int full)
19680{
19681 pte_clear(mm, address, ptep);
19682}
19683extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
19684 unsigned long address,
19685 pte_t *ptep);
19686extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
19687 unsigned long address,
19688 pmd_t *pmdp);
19689static inline __attribute__((always_inline)) int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
19690{
19691 return pmd_val(pmd_a) == pmd_val(pmd_b);
19692}
19693void pgd_clear_bad(pgd_t *);
19694void pud_clear_bad(pud_t *);
19695void pmd_clear_bad(pmd_t *);
19696static inline __attribute__((always_inline)) int pgd_none_or_clear_bad(pgd_t *pgd)
19697{
19698 if (__builtin_constant_p(((pgd_none(*pgd)))) ? !!((pgd_none(*pgd))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 257, }; ______r = !!((pgd_none(*pgd))); ______f.miss_hit[______r]++; ______r; }))
19699 return 1;
19700 if (__builtin_constant_p((((__builtin_constant_p(pgd_bad(*pgd)) ? !!(pgd_bad(*pgd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 259, }; ______r = __builtin_expect(!!(pgd_bad(*pgd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(pgd_bad(*pgd)) ? !!(pgd_bad(*pgd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 259, }; ______r = __builtin_expect(!!(pgd_bad(*pgd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 259, }; ______r = !!(((__builtin_constant_p(pgd_bad(*pgd)) ? !!(pgd_bad(*pgd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 259, }; ______r = __builtin_expect(!!(pgd_bad(*pgd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) {
19701 pgd_clear_bad(pgd);
19702 return 1;
19703 }
19704 return 0;
19705}
19706static inline __attribute__((always_inline)) int pud_none_or_clear_bad(pud_t *pud)
19707{
19708 if (__builtin_constant_p(((pud_none(*pud)))) ? !!((pud_none(*pud))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 268, }; ______r = !!((pud_none(*pud))); ______f.miss_hit[______r]++; ______r; }))
19709 return 1;
19710 if (__builtin_constant_p((((__builtin_constant_p(pud_bad(*pud)) ? !!(pud_bad(*pud)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 270, }; ______r = __builtin_expect(!!(pud_bad(*pud)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(pud_bad(*pud)) ? !!(pud_bad(*pud)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 270, }; ______r = __builtin_expect(!!(pud_bad(*pud)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 270, }; ______r = !!(((__builtin_constant_p(pud_bad(*pud)) ? !!(pud_bad(*pud)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 270, }; ______r = __builtin_expect(!!(pud_bad(*pud)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) {
19711 pud_clear_bad(pud);
19712 return 1;
19713 }
19714 return 0;
19715}
19716static inline __attribute__((always_inline)) int pmd_none_or_clear_bad(pmd_t *pmd)
19717{
19718 if (__builtin_constant_p(((pmd_none(*pmd)))) ? !!((pmd_none(*pmd))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 279, }; ______r = !!((pmd_none(*pmd))); ______f.miss_hit[______r]++; ______r; }))
19719 return 1;
19720 if (__builtin_constant_p((((__builtin_constant_p(pmd_bad(*pmd)) ? !!(pmd_bad(*pmd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 281, }; ______r = __builtin_expect(!!(pmd_bad(*pmd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(pmd_bad(*pmd)) ? !!(pmd_bad(*pmd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 281, }; ______r = __builtin_expect(!!(pmd_bad(*pmd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 281, }; ______r = !!(((__builtin_constant_p(pmd_bad(*pmd)) ? !!(pmd_bad(*pmd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/asm-generic/pgtable.h", .line = 281, }; ______r = __builtin_expect(!!(pmd_bad(*pmd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) {
19721 pmd_clear_bad(pmd);
19722 return 1;
19723 }
19724 return 0;
19725}
19726static inline __attribute__((always_inline)) pte_t __ptep_modify_prot_start(struct mm_struct *mm,
19727 unsigned long addr,
19728 pte_t *ptep)
19729{
19730 return ptep_get_and_clear(mm, addr, ptep);
19731}
19732static inline __attribute__((always_inline)) void __ptep_modify_prot_commit(struct mm_struct *mm,
19733 unsigned long addr,
19734 pte_t *ptep, pte_t pte)
19735{
19736 set_pte_at(mm, addr, ptep, pte);
19737}
19738extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
19739 unsigned long pfn, unsigned long size);
19740extern int track_pfn_vma_copy(struct vm_area_struct *vma);
19741extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
19742 unsigned long size);
19743extern struct kmem_cache *vm_area_cachep;
19744extern pgprot_t protection_map[16];
19745static inline __attribute__((always_inline)) int is_linear_pfn_mapping(struct vm_area_struct *vma)
19746{
19747 return !!(vma->vm_flags & 0x40000000);
19748}
19749static inline __attribute__((always_inline)) int is_pfn_mapping(struct vm_area_struct *vma)
19750{
19751 return !!(vma->vm_flags & 0x00000400);
19752}
19753struct vm_fault {
19754 unsigned int flags;
19755 unsigned long pgoff;
19756 void *virtual_address;
19757 struct page *page;
19758};
19759struct vm_operations_struct {
19760 void (*open)(struct vm_area_struct * area);
19761 void (*close)(struct vm_area_struct * area);
19762 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
19763 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
19764 int (*access)(struct vm_area_struct *vma, unsigned long addr,
19765 void *buf, int len, int write);
19766};
19767struct mmu_gather;
19768struct inode;
19769enum pageflags {
19770 PG_locked,
19771 PG_error,
19772 PG_referenced,
19773 PG_uptodate,
19774 PG_dirty,
19775 PG_lru,
19776 PG_active,
19777 PG_slab,
19778 PG_owner_priv_1,
19779 PG_arch_1,
19780 PG_reserved,
19781 PG_private,
19782 PG_private_2,
19783 PG_writeback,
19784 PG_head,
19785 PG_tail,
19786 PG_swapcache,
19787 PG_mappedtodisk,
19788 PG_reclaim,
19789 PG_swapbacked,
19790 PG_unevictable,
19791 PG_mlocked,
19792 PG_uncached,
19793 PG_compound_lock,
19794 __NR_PAGEFLAGS,
19795 PG_checked = PG_owner_priv_1,
19796 PG_fscache = PG_private_2,
19797 PG_pinned = PG_owner_priv_1,
19798 PG_savepinned = PG_dirty,
19799 PG_slob_free = PG_private,
19800 PG_slub_frozen = PG_active,
19801};
19802struct page;
19803static inline __attribute__((always_inline)) int PageLocked(struct page *page) { return (__builtin_constant_p((PG_locked)) ? constant_test_bit((PG_locked), (&page->flags)) : variable_test_bit((PG_locked), (&page->flags))); }
19804static inline __attribute__((always_inline)) int PageError(struct page *page) { return (__builtin_constant_p((PG_error)) ? constant_test_bit((PG_error), (&page->flags)) : variable_test_bit((PG_error), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageError(struct page *page) { set_bit(PG_error, &page->flags); } static inline __attribute__((always_inline)) void ClearPageError(struct page *page) { clear_bit(PG_error, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageError(struct page *page) { return test_and_clear_bit(PG_error, &page->flags); }
19805static inline __attribute__((always_inline)) int PageReferenced(struct page *page) { return (__builtin_constant_p((PG_referenced)) ? constant_test_bit((PG_referenced), (&page->flags)) : variable_test_bit((PG_referenced), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageReferenced(struct page *page) { set_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReferenced(struct page *page) { clear_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageReferenced(struct page *page) { return test_and_clear_bit(PG_referenced, &page->flags); }
19806static inline __attribute__((always_inline)) int PageDirty(struct page *page) { return (__builtin_constant_p((PG_dirty)) ? constant_test_bit((PG_dirty), (&page->flags)) : variable_test_bit((PG_dirty), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageDirty(struct page *page) { set_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void ClearPageDirty(struct page *page) { clear_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) int TestSetPageDirty(struct page *page) { return test_and_set_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageDirty(struct page *page) { return test_and_clear_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageDirty(struct page *page) { __clear_bit(PG_dirty, &page->flags); }
19807static inline __attribute__((always_inline)) int PageLRU(struct page *page) { return (__builtin_constant_p((PG_lru)) ? constant_test_bit((PG_lru), (&page->flags)) : variable_test_bit((PG_lru), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageLRU(struct page *page) { set_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void ClearPageLRU(struct page *page) { clear_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageLRU(struct page *page) { __clear_bit(PG_lru, &page->flags); }
19808static inline __attribute__((always_inline)) int PageActive(struct page *page) { return (__builtin_constant_p((PG_active)) ? constant_test_bit((PG_active), (&page->flags)) : variable_test_bit((PG_active), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageActive(struct page *page) { set_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void ClearPageActive(struct page *page) { clear_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageActive(struct page *page) { __clear_bit(PG_active, &page->flags); }
19809 static inline __attribute__((always_inline)) int TestClearPageActive(struct page *page) { return test_and_clear_bit(PG_active, &page->flags); }
19810static inline __attribute__((always_inline)) int PageSlab(struct page *page) { return (__builtin_constant_p((PG_slab)) ? constant_test_bit((PG_slab), (&page->flags)) : variable_test_bit((PG_slab), (&page->flags))); } static inline __attribute__((always_inline)) void __SetPageSlab(struct page *page) { __set_bit(PG_slab, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlab(struct page *page) { __clear_bit(PG_slab, &page->flags); }
19811static inline __attribute__((always_inline)) int PageChecked(struct page *page) { return (__builtin_constant_p((PG_checked)) ? constant_test_bit((PG_checked), (&page->flags)) : variable_test_bit((PG_checked), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageChecked(struct page *page) { set_bit(PG_checked, &page->flags); } static inline __attribute__((always_inline)) void ClearPageChecked(struct page *page) { clear_bit(PG_checked, &page->flags); }
19812static inline __attribute__((always_inline)) int PagePinned(struct page *page) { return (__builtin_constant_p((PG_pinned)) ? constant_test_bit((PG_pinned), (&page->flags)) : variable_test_bit((PG_pinned), (&page->flags))); } static inline __attribute__((always_inline)) void SetPagePinned(struct page *page) { set_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePinned(struct page *page) { clear_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) int TestSetPagePinned(struct page *page) { return test_and_set_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) int TestClearPagePinned(struct page *page) { return test_and_clear_bit(PG_pinned, &page->flags); }
19813static inline __attribute__((always_inline)) int PageSavePinned(struct page *page) { return (__builtin_constant_p((PG_savepinned)) ? constant_test_bit((PG_savepinned), (&page->flags)) : variable_test_bit((PG_savepinned), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageSavePinned(struct page *page) { set_bit(PG_savepinned, &page->flags); } static inline __attribute__((always_inline)) void ClearPageSavePinned(struct page *page) { clear_bit(PG_savepinned, &page->flags); };
19814static inline __attribute__((always_inline)) int PageReserved(struct page *page) { return (__builtin_constant_p((PG_reserved)) ? constant_test_bit((PG_reserved), (&page->flags)) : variable_test_bit((PG_reserved), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageReserved(struct page *page) { set_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReserved(struct page *page) { clear_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageReserved(struct page *page) { __clear_bit(PG_reserved, &page->flags); }
19815static inline __attribute__((always_inline)) int PageSwapBacked(struct page *page) { return (__builtin_constant_p((PG_swapbacked)) ? constant_test_bit((PG_swapbacked), (&page->flags)) : variable_test_bit((PG_swapbacked), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageSwapBacked(struct page *page) { set_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void ClearPageSwapBacked(struct page *page) { clear_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSwapBacked(struct page *page) { __clear_bit(PG_swapbacked, &page->flags); }
19816static inline __attribute__((always_inline)) int PageSlobFree(struct page *page) { return (__builtin_constant_p((PG_slob_free)) ? constant_test_bit((PG_slob_free), (&page->flags)) : variable_test_bit((PG_slob_free), (&page->flags))); } static inline __attribute__((always_inline)) void __SetPageSlobFree(struct page *page) { __set_bit(PG_slob_free, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlobFree(struct page *page) { __clear_bit(PG_slob_free, &page->flags); }
19817static inline __attribute__((always_inline)) int PageSlubFrozen(struct page *page) { return (__builtin_constant_p((PG_slub_frozen)) ? constant_test_bit((PG_slub_frozen), (&page->flags)) : variable_test_bit((PG_slub_frozen), (&page->flags))); } static inline __attribute__((always_inline)) void __SetPageSlubFrozen(struct page *page) { __set_bit(PG_slub_frozen, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlubFrozen(struct page *page) { __clear_bit(PG_slub_frozen, &page->flags); }
19818static inline __attribute__((always_inline)) int PagePrivate(struct page *page) { return (__builtin_constant_p((PG_private)) ? constant_test_bit((PG_private), (&page->flags)) : variable_test_bit((PG_private), (&page->flags))); } static inline __attribute__((always_inline)) void SetPagePrivate(struct page *page) { set_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePrivate(struct page *page) { clear_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void __SetPagePrivate(struct page *page) { __set_bit(PG_private, &page->flags); }
19819 static inline __attribute__((always_inline)) void __ClearPagePrivate(struct page *page) { __clear_bit(PG_private, &page->flags); }
19820static inline __attribute__((always_inline)) int PagePrivate2(struct page *page) { return (__builtin_constant_p((PG_private_2)) ? constant_test_bit((PG_private_2), (&page->flags)) : variable_test_bit((PG_private_2), (&page->flags))); } static inline __attribute__((always_inline)) void SetPagePrivate2(struct page *page) { set_bit(PG_private_2, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePrivate2(struct page *page) { clear_bit(PG_private_2, &page->flags); } static inline __attribute__((always_inline)) int TestSetPagePrivate2(struct page *page) { return test_and_set_bit(PG_private_2, &page->flags); } static inline __attribute__((always_inline)) int TestClearPagePrivate2(struct page *page) { return test_and_clear_bit(PG_private_2, &page->flags); }
19821static inline __attribute__((always_inline)) int PageOwnerPriv1(struct page *page) { return (__builtin_constant_p((PG_owner_priv_1)) ? constant_test_bit((PG_owner_priv_1), (&page->flags)) : variable_test_bit((PG_owner_priv_1), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageOwnerPriv1(struct page *page) { set_bit(PG_owner_priv_1, &page->flags); } static inline __attribute__((always_inline)) void ClearPageOwnerPriv1(struct page *page) { clear_bit(PG_owner_priv_1, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageOwnerPriv1(struct page *page) { return test_and_clear_bit(PG_owner_priv_1, &page->flags); }
19822static inline __attribute__((always_inline)) int PageWriteback(struct page *page) { return (__builtin_constant_p((PG_writeback)) ? constant_test_bit((PG_writeback), (&page->flags)) : variable_test_bit((PG_writeback), (&page->flags))); } static inline __attribute__((always_inline)) int TestSetPageWriteback(struct page *page) { return test_and_set_bit(PG_writeback, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageWriteback(struct page *page) { return test_and_clear_bit(PG_writeback, &page->flags); }
19823static inline __attribute__((always_inline)) int PageMappedToDisk(struct page *page) { return (__builtin_constant_p((PG_mappedtodisk)) ? constant_test_bit((PG_mappedtodisk), (&page->flags)) : variable_test_bit((PG_mappedtodisk), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageMappedToDisk(struct page *page) { set_bit(PG_mappedtodisk, &page->flags); } static inline __attribute__((always_inline)) void ClearPageMappedToDisk(struct page *page) { clear_bit(PG_mappedtodisk, &page->flags); }
19824static inline __attribute__((always_inline)) int PageReclaim(struct page *page) { return (__builtin_constant_p((PG_reclaim)) ? constant_test_bit((PG_reclaim), (&page->flags)) : variable_test_bit((PG_reclaim), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageReclaim(struct page *page) { set_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReclaim(struct page *page) { clear_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageReclaim(struct page *page) { return test_and_clear_bit(PG_reclaim, &page->flags); }
19825static inline __attribute__((always_inline)) int PageReadahead(struct page *page) { return (__builtin_constant_p((PG_reclaim)) ? constant_test_bit((PG_reclaim), (&page->flags)) : variable_test_bit((PG_reclaim), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageReadahead(struct page *page) { set_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReadahead(struct page *page) { clear_bit(PG_reclaim, &page->flags); }
19826static inline __attribute__((always_inline)) int PageSwapCache(struct page *page) { return (__builtin_constant_p((PG_swapcache)) ? constant_test_bit((PG_swapcache), (&page->flags)) : variable_test_bit((PG_swapcache), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageSwapCache(struct page *page) { set_bit(PG_swapcache, &page->flags); } static inline __attribute__((always_inline)) void ClearPageSwapCache(struct page *page) { clear_bit(PG_swapcache, &page->flags); }
19827static inline __attribute__((always_inline)) int PageUnevictable(struct page *page) { return (__builtin_constant_p((PG_unevictable)) ? constant_test_bit((PG_unevictable), (&page->flags)) : variable_test_bit((PG_unevictable), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageUnevictable(struct page *page) { set_bit(PG_unevictable, &page->flags); } static inline __attribute__((always_inline)) void ClearPageUnevictable(struct page *page) { clear_bit(PG_unevictable, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageUnevictable(struct page *page) { __clear_bit(PG_unevictable, &page->flags); }
19828 static inline __attribute__((always_inline)) int TestClearPageUnevictable(struct page *page) { return test_and_clear_bit(PG_unevictable, &page->flags); }
19829static inline __attribute__((always_inline)) int PageMlocked(struct page *page) { return (__builtin_constant_p((PG_mlocked)) ? constant_test_bit((PG_mlocked), (&page->flags)) : variable_test_bit((PG_mlocked), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageMlocked(struct page *page) { set_bit(PG_mlocked, &page->flags); } static inline __attribute__((always_inline)) void ClearPageMlocked(struct page *page) { clear_bit(PG_mlocked, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageMlocked(struct page *page) { __clear_bit(PG_mlocked, &page->flags); }
19830 static inline __attribute__((always_inline)) int TestSetPageMlocked(struct page *page) { return test_and_set_bit(PG_mlocked, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageMlocked(struct page *page) { return test_and_clear_bit(PG_mlocked, &page->flags); } static inline __attribute__((always_inline)) int __TestClearPageMlocked(struct page *page) { return __test_and_clear_bit(PG_mlocked, &page->flags); }
19831static inline __attribute__((always_inline)) int PageUncached(struct page *page) { return (__builtin_constant_p((PG_uncached)) ? constant_test_bit((PG_uncached), (&page->flags)) : variable_test_bit((PG_uncached), (&page->flags))); } static inline __attribute__((always_inline)) void SetPageUncached(struct page *page) { set_bit(PG_uncached, &page->flags); } static inline __attribute__((always_inline)) void ClearPageUncached(struct page *page) { clear_bit(PG_uncached, &page->flags); }
19832static inline __attribute__((always_inline)) int PageHWPoison(struct page *page) { return 0; }
19833u64 stable_page_flags(struct page *page);
19834static inline __attribute__((always_inline)) int PageUptodate(struct page *page)
19835{
19836 int ret = (__builtin_constant_p((PG_uptodate)) ? constant_test_bit((PG_uptodate), (&(page)->flags)) : variable_test_bit((PG_uptodate), (&(page)->flags)));
19837 if (__builtin_constant_p(((ret))) ? !!((ret)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/page-flags.h", .line = 295, }; ______r = !!((ret)); ______f.miss_hit[______r]++; ______r; }))
19838 __asm__ __volatile__("": : :"memory");
19839 return ret;
19840}
19841static inline __attribute__((always_inline)) void __SetPageUptodate(struct page *page)
19842{
19843 __asm__ __volatile__("": : :"memory");
19844 __set_bit(PG_uptodate, &(page)->flags);
19845}
19846static inline __attribute__((always_inline)) void SetPageUptodate(struct page *page)
19847{
19848 __asm__ __volatile__("": : :"memory");
19849 set_bit(PG_uptodate, &(page)->flags);
19850}
19851static inline __attribute__((always_inline)) void ClearPageUptodate(struct page *page) { clear_bit(PG_uptodate, &page->flags); }
19852extern void cancel_dirty_page(struct page *page, unsigned int account_size);
19853int test_clear_page_writeback(struct page *page);
19854int test_set_page_writeback(struct page *page);
19855static inline __attribute__((always_inline)) void set_page_writeback(struct page *page)
19856{
19857 test_set_page_writeback(page);
19858}
19859static inline __attribute__((always_inline)) int PageHead(struct page *page) { return (__builtin_constant_p((PG_head)) ? constant_test_bit((PG_head), (&page->flags)) : variable_test_bit((PG_head), (&page->flags))); } static inline __attribute__((always_inline)) void __SetPageHead(struct page *page) { __set_bit(PG_head, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageHead(struct page *page) { __clear_bit(PG_head, &page->flags); } static inline __attribute__((always_inline)) void ClearPageHead(struct page *page) { clear_bit(PG_head, &page->flags); }
19860static inline __attribute__((always_inline)) int PageTail(struct page *page) { return (__builtin_constant_p((PG_tail)) ? constant_test_bit((PG_tail), (&page->flags)) : variable_test_bit((PG_tail), (&page->flags))); } static inline __attribute__((always_inline)) void __SetPageTail(struct page *page) { __set_bit(PG_tail, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageTail(struct page *page) { __clear_bit(PG_tail, &page->flags); }
19861static inline __attribute__((always_inline)) int PageCompound(struct page *page)
19862{
19863 return page->flags & ((1L << PG_head) | (1L << PG_tail));
19864}
19865static inline __attribute__((always_inline)) void ClearPageCompound(struct page *page)
19866{
19867 do { if (__builtin_constant_p((((__builtin_constant_p(!PageHead(page)) ? !!(!PageHead(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/page-flags.h", .line = 356, }; ______r = __builtin_expect(!!(!PageHead(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(!PageHead(page)) ? !!(!PageHead(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/page-flags.h", .line = 356, }; ______r = __builtin_expect(!!(!PageHead(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/page-flags.h", .line = 356, }; ______r = !!(((__builtin_constant_p(!PageHead(page)) ? !!(!PageHead(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/page-flags.h", .line = 356, }; ______r = __builtin_expect(!!(!PageHead(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/page-flags.h"), "i" (356), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
19868 ClearPageHead(page);
19869}
19870static inline __attribute__((always_inline)) int PageTransHuge(struct page *page)
19871{
19872 do { (void)(PageTail(page)); } while (0);
19873 return PageHead(page);
19874}
19875static inline __attribute__((always_inline)) int PageTransCompound(struct page *page)
19876{
19877 return PageCompound(page);
19878}
19879static inline __attribute__((always_inline)) int page_has_private(struct page *page)
19880{
19881 return !!(page->flags & (1 << PG_private | 1 << PG_private_2));
19882}
19883extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
19884 struct vm_area_struct *vma,
19885 unsigned long address, pmd_t *pmd,
19886 unsigned int flags);
19887extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
19888 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
19889 struct vm_area_struct *vma);
19890extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
19891 unsigned long address, pmd_t *pmd,
19892 pmd_t orig_pmd);
19893extern pgtable_t get_pmd_huge_pte(struct mm_struct *mm);
19894extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
19895 unsigned long addr,
19896 pmd_t *pmd,
19897 unsigned int flags);
19898extern int zap_huge_pmd(struct mmu_gather *tlb,
19899 struct vm_area_struct *vma,
19900 pmd_t *pmd);
19901extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
19902 unsigned long addr, unsigned long end,
19903 unsigned char *vec);
19904extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
19905 unsigned long addr, pgprot_t newprot);
19906enum transparent_hugepage_flag {
19907 TRANSPARENT_HUGEPAGE_FLAG,
19908 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
19909 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
19910 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
19911 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
19912};
19913enum page_check_address_pmd_flag {
19914 PAGE_CHECK_ADDRESS_PMD_FLAG,
19915 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG,
19916 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG,
19917};
19918extern pmd_t *page_check_address_pmd(struct page *page,
19919 struct mm_struct *mm,
19920 unsigned long address,
19921 enum page_check_address_pmd_flag flag);
19922extern unsigned long transparent_hugepage_flags;
19923extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
19924 pmd_t *dst_pmd, pmd_t *src_pmd,
19925 struct vm_area_struct *vma,
19926 unsigned long addr, unsigned long end);
19927extern int handle_pte_fault(struct mm_struct *mm,
19928 struct vm_area_struct *vma, unsigned long address,
19929 pte_t *pte, pmd_t *pmd, unsigned int flags);
19930extern int split_huge_page(struct page *page);
19931extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
19932extern int hugepage_madvise(struct vm_area_struct *vma,
19933 unsigned long *vm_flags, int advice);
19934extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
19935 unsigned long start,
19936 unsigned long end,
19937 long adjust_next);
19938static inline __attribute__((always_inline)) void vma_adjust_trans_huge(struct vm_area_struct *vma,
19939 unsigned long start,
19940 unsigned long end,
19941 long adjust_next)
19942{
19943 if (__builtin_constant_p(((!vma->anon_vma || vma->vm_ops))) ? !!((!vma->anon_vma || vma->vm_ops)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 116, }; ______r = !!((!vma->anon_vma || vma->vm_ops)); ______f.miss_hit[______r]++; ______r; }))
19944 return;
19945 __vma_adjust_trans_huge(vma, start, end, adjust_next);
19946}
19947static inline __attribute__((always_inline)) int hpage_nr_pages(struct page *page)
19948{
19949 if (__builtin_constant_p((((__builtin_constant_p(PageTransHuge(page)) ? !!(PageTransHuge(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 122, }; ______r = __builtin_expect(!!(PageTransHuge(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(PageTransHuge(page)) ? !!(PageTransHuge(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 122, }; ______r = __builtin_expect(!!(PageTransHuge(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 122, }; ______r = !!(((__builtin_constant_p(PageTransHuge(page)) ? !!(PageTransHuge(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 122, }; ______r = __builtin_expect(!!(PageTransHuge(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
19950 return (1<<(21 -12));
19951 return 1;
19952}
19953static inline __attribute__((always_inline)) struct page *compound_trans_head(struct page *page)
19954{
19955 if (__builtin_constant_p(((PageTail(page)))) ? !!((PageTail(page))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 128, }; ______r = !!((PageTail(page))); ______f.miss_hit[______r]++; ______r; })) {
19956 struct page *head;
19957 head = page->first_page;
19958 __asm__ __volatile__("": : :"memory");
19959 if (__builtin_constant_p(((PageTail(page)))) ? !!((PageTail(page))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/huge_mm.h", .line = 138, }; ______r = !!((PageTail(page))); ______f.miss_hit[______r]++; ______r; }))
19960 return head;
19961 }
19962 return page;
19963}
19964static inline __attribute__((always_inline)) int put_page_testzero(struct page *page)
19965{
19966 do { (void)(atomic_read(&page->_count) == 0); } while (0);
19967 return atomic_dec_and_test(&page->_count);
19968}
19969static inline __attribute__((always_inline)) int get_page_unless_zero(struct page *page)
19970{
19971 return atomic_add_unless((&page->_count), 1, 0);
19972}
19973extern int page_is_ram(unsigned long pfn);
19974struct page *vmalloc_to_page(const void *addr);
19975unsigned long vmalloc_to_pfn(const void *addr);
19976static inline __attribute__((always_inline)) int is_vmalloc_addr(const void *x)
19977{
19978 unsigned long addr = (unsigned long)x;
19979 return addr >= ((unsigned long)high_memory + (8 * 1024 * 1024)) && addr < ((((((unsigned long)__FIXADDR_TOP) - (__end_of_fixed_addresses << 12)) - ((1UL) << 12) * (512 + 1)) & (~((1UL << 21) - 1))) - 2 * ((1UL) << 12));
19980}
19981extern int is_vmalloc_or_module_addr(const void *x);
19982static inline __attribute__((always_inline)) void compound_lock(struct page *page)
19983{
19984 bit_spin_lock(PG_compound_lock, &page->flags);
19985}
19986static inline __attribute__((always_inline)) void compound_unlock(struct page *page)
19987{
19988 bit_spin_unlock(PG_compound_lock, &page->flags);
19989}
19990static inline __attribute__((always_inline)) unsigned long compound_lock_irqsave(struct page *page)
19991{
19992 unsigned long flags = flags;
19993 do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); trace_hardirqs_off(); } while (0);
19994 compound_lock(page);
19995 return flags;
19996}
19997static inline __attribute__((always_inline)) void compound_unlock_irqrestore(struct page *page,
19998 unsigned long flags)
19999{
20000 compound_unlock(page);
20001 do { if (__builtin_constant_p(((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })))) ? !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 347, }; ______r = !!((({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); }))); ______f.miss_hit[______r]++; ______r; })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); trace_hardirqs_off(); } else { trace_hardirqs_on(); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0);
20002}
20003static inline __attribute__((always_inline)) struct page *compound_head(struct page *page)
20004{
20005 if (__builtin_constant_p((((__builtin_constant_p(PageTail(page)) ? !!(PageTail(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 353, }; ______r = __builtin_expect(!!(PageTail(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(PageTail(page)) ? !!(PageTail(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 353, }; ______r = __builtin_expect(!!(PageTail(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 353, }; ______r = !!(((__builtin_constant_p(PageTail(page)) ? !!(PageTail(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 353, }; ______r = __builtin_expect(!!(PageTail(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
20006 return page->first_page;
20007 return page;
20008}
20009static inline __attribute__((always_inline)) int page_count(struct page *page)
20010{
20011 return atomic_read(&compound_head(page)->_count);
20012}
20013static inline __attribute__((always_inline)) void get_page(struct page *page)
20014{
20015 do { (void)(atomic_read(&page->_count) < !PageTail(page)); } while (0);
20016 atomic_inc(&page->_count);
20017 if (__builtin_constant_p((((__builtin_constant_p(PageTail(page)) ? !!(PageTail(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 379, }; ______r = __builtin_expect(!!(PageTail(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(PageTail(page)) ? !!(PageTail(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 379, }; ______r = __builtin_expect(!!(PageTail(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 379, }; ______r = !!(((__builtin_constant_p(PageTail(page)) ? !!(PageTail(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 379, }; ______r = __builtin_expect(!!(PageTail(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) {
20018 do { (void)(atomic_read(&page->first_page->_count) <= 0); } while (0);
20019 atomic_inc(&page->first_page->_count);
20020 }
20021}
20022static inline __attribute__((always_inline)) struct page *virt_to_head_page(const void *x)
20023{
20024 struct page *page = (mem_map + (((((unsigned long)(x)) - ((unsigned long)(0xC0000000UL))) >> 12) - (0UL)));
20025 return compound_head(page);
20026}
20027static inline __attribute__((always_inline)) void init_page_count(struct page *page)
20028{
20029 atomic_set(&page->_count, 1);
20030}
20031static inline __attribute__((always_inline)) int PageBuddy(struct page *page)
20032{
20033 return atomic_read(&page->_mapcount) == (-128);
20034}
20035static inline __attribute__((always_inline)) void __SetPageBuddy(struct page *page)
20036{
20037 do { (void)(atomic_read(&page->_mapcount) != -1); } while (0);
20038 atomic_set(&page->_mapcount, (-128));
20039}
20040static inline __attribute__((always_inline)) void __ClearPageBuddy(struct page *page)
20041{
20042 do { (void)(!PageBuddy(page)); } while (0);
20043 atomic_set(&page->_mapcount, -1);
20044}
20045void put_page(struct page *page);
20046void put_pages_list(struct list_head *pages);
20047void split_page(struct page *page, unsigned int order);
20048int split_free_page(struct page *page);
20049typedef void compound_page_dtor(struct page *);
20050static inline __attribute__((always_inline)) void set_compound_page_dtor(struct page *page,
20051 compound_page_dtor *dtor)
20052{
20053 page[1].lru.next = (void *)dtor;
20054}
20055static inline __attribute__((always_inline)) compound_page_dtor *get_compound_page_dtor(struct page *page)
20056{
20057 return (compound_page_dtor *)page[1].lru.next;
20058}
20059static inline __attribute__((always_inline)) int compound_order(struct page *page)
20060{
20061 if (__builtin_constant_p(((!PageHead(page)))) ? !!((!PageHead(page))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 459, }; ______r = !!((!PageHead(page))); ______f.miss_hit[______r]++; ______r; }))
20062 return 0;
20063 return (unsigned long)page[1].lru.prev;
20064}
20065static inline __attribute__((always_inline)) int compound_trans_order(struct page *page)
20066{
20067 int order;
20068 unsigned long flags;
20069 if (__builtin_constant_p(((!PageHead(page)))) ? !!((!PageHead(page))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 469, }; ______r = !!((!PageHead(page))); ______f.miss_hit[______r]++; ______r; }))
20070 return 0;
20071 flags = compound_lock_irqsave(page);
20072 order = compound_order(page);
20073 compound_unlock_irqrestore(page, flags);
20074 return order;
20075}
20076static inline __attribute__((always_inline)) void set_compound_order(struct page *page, unsigned long order)
20077{
20078 page[1].lru.prev = (void *)order;
20079}
20080static inline __attribute__((always_inline)) pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
20081{
20082 if (__builtin_constant_p((((__builtin_constant_p(vma->vm_flags & 0x00000002) ? !!(vma->vm_flags & 0x00000002) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 492, }; ______r = __builtin_expect(!!(vma->vm_flags & 0x00000002), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(vma->vm_flags & 0x00000002) ? !!(vma->vm_flags & 0x00000002) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 492, }; ______r = __builtin_expect(!!(vma->vm_flags & 0x00000002), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 492, }; ______r = !!(((__builtin_constant_p(vma->vm_flags & 0x00000002) ? !!(vma->vm_flags & 0x00000002) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 492, }; ______r = __builtin_expect(!!(vma->vm_flags & 0x00000002), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
20083 pte = pte_mkwrite(pte);
20084 return pte;
20085}
20086static inline __attribute__((always_inline)) enum zone_type page_zonenum(struct page *page)
20087{
20088 return (page->flags >> (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0))) & ((1UL << 2) - 1);
20089}
20090static inline __attribute__((always_inline)) int page_zone_id(struct page *page)
20091{
20092 return (page->flags >> ((((((sizeof(unsigned long)*8) - 0) - 0) < ((((sizeof(unsigned long)*8) - 0) - 0) - 2))? (((sizeof(unsigned long)*8) - 0) - 0) : ((((sizeof(unsigned long)*8) - 0) - 0) - 2)) * ((0 + 2) != 0))) & ((1UL << (0 + 2)) - 1);
20093}
20094static inline __attribute__((always_inline)) int zone_to_nid(struct zone *zone)
20095{
20096 return 0;
20097}
20098static inline __attribute__((always_inline)) int page_to_nid(struct page *page)
20099{
20100 return (page->flags >> ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0))) & ((1UL << 0) - 1);
20101}
20102static inline __attribute__((always_inline)) struct zone *page_zone(struct page *page)
20103{
20104 return &(&contig_page_data)->node_zones[page_zonenum(page)];
20105}
20106static inline __attribute__((always_inline)) void set_page_zone(struct page *page, enum zone_type zone)
20107{
20108 page->flags &= ~(((1UL << 2) - 1) << (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0)));
20109 page->flags |= (zone & ((1UL << 2) - 1)) << (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0));
20110}
20111static inline __attribute__((always_inline)) void set_page_node(struct page *page, unsigned long node)
20112{
20113 page->flags &= ~(((1UL << 0) - 1) << ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0)));
20114 page->flags |= (node & ((1UL << 0) - 1)) << ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0));
20115}
20116static inline __attribute__((always_inline)) void set_page_links(struct page *page, enum zone_type zone,
20117 unsigned long node, unsigned long pfn)
20118{
20119 set_page_zone(page, zone);
20120 set_page_node(page, node);
20121}
20122enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
20123 PGALLOC_DMA, PGALLOC_NORMAL , PGALLOC_HIGH , PGALLOC_MOVABLE,
20124 PGFREE, PGACTIVATE, PGDEACTIVATE,
20125 PGFAULT, PGMAJFAULT,
20126 PGREFILL_DMA, PGREFILL_NORMAL , PGREFILL_HIGH , PGREFILL_MOVABLE,
20127 PGSTEAL_DMA, PGSTEAL_NORMAL , PGSTEAL_HIGH , PGSTEAL_MOVABLE,
20128 PGSCAN_KSWAPD_DMA, PGSCAN_KSWAPD_NORMAL , PGSCAN_KSWAPD_HIGH , PGSCAN_KSWAPD_MOVABLE,
20129 PGSCAN_DIRECT_DMA, PGSCAN_DIRECT_NORMAL , PGSCAN_DIRECT_HIGH , PGSCAN_DIRECT_MOVABLE,
20130 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
20131 KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
20132 KSWAPD_SKIP_CONGESTION_WAIT,
20133 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
20134 COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
20135 COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
20136 UNEVICTABLE_PGCULLED,
20137 UNEVICTABLE_PGSCANNED,
20138 UNEVICTABLE_PGRESCUED,
20139 UNEVICTABLE_PGMLOCKED,
20140 UNEVICTABLE_PGMUNLOCKED,
20141 UNEVICTABLE_PGCLEARED,
20142 UNEVICTABLE_PGSTRANDED,
20143 UNEVICTABLE_MLOCKFREED,
20144 THP_FAULT_ALLOC,
20145 THP_FAULT_FALLBACK,
20146 THP_COLLAPSE_ALLOC,
20147 THP_COLLAPSE_ALLOC_FAILED,
20148 THP_SPLIT,
20149 NR_VM_EVENT_ITEMS
20150};
20151extern int sysctl_stat_interval;
20152struct vm_event_state {
20153 unsigned long event[NR_VM_EVENT_ITEMS];
20154};
20155extern __attribute__((section(".data..percpu" ""))) __typeof__(struct vm_event_state) vm_event_states;
20156static inline __attribute__((always_inline)) void __count_vm_event(enum vm_event_item item)
20157{
20158 do { do { const void *__vpp_verify = (typeof(&(((vm_event_states.event[item])))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((vm_event_states.event[item])))) { case 1: do { typedef typeof((((vm_event_states.event[item])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((vm_event_states.event[item]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((((vm_event_states.event[item])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((vm_event_states.event[item]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((((vm_event_states.event[item])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((vm_event_states.event[item]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 32, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((((vm_event_states.event[item])))))); (typeof(*(&((((vm_event_states.event[item])))))) *)tcp_ptr__; }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
20159}
20160static inline __attribute__((always_inline)) void count_vm_event(enum vm_event_item item)
20161{
20162 do { do { const void *__vpp_verify = (typeof(&(((vm_event_states.event[item])))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((vm_event_states.event[item])))) { case 1: do { typedef typeof((((vm_event_states.event[item])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((vm_event_states.event[item]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof((((vm_event_states.event[item])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((vm_event_states.event[item]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof((((vm_event_states.event[item])))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof((((vm_event_states.event[item]))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item]))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" ((((vm_event_states.event[item])))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((((vm_event_states.event[item])))))); (typeof(*(&((((vm_event_states.event[item])))))) *)tcp_ptr__; }) += ((1)); do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 37, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
20163}
20164static inline __attribute__((always_inline)) void __count_vm_events(enum vm_event_item item, long delta)
20165{
20166 do { do { const void *__vpp_verify = (typeof(&((vm_event_states.event[item]))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((vm_event_states.event[item]))) { case 1: do { typedef typeof(((vm_event_states.event[item]))) pao_T__; const int pao_ID__ = (__builtin_constant_p((delta)) && (((delta)) == 1 || ((delta)) == -1)) ? ((delta)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((delta)); (void)pao_tmp__; } switch (sizeof(((vm_event_states.event[item])))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "qi" ((pao_T__)((delta)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "re" ((pao_T__)((delta)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof(((vm_event_states.event[item]))) pao_T__; const int pao_ID__ = (__builtin_constant_p((delta)) && (((delta)) == 1 || ((delta)) == -1)) ? ((delta)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((delta)); (void)pao_tmp__; } switch (sizeof(((vm_event_states.event[item])))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "qi" ((pao_T__)((delta)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "re" ((pao_T__)((delta)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof(((vm_event_states.event[item]))) pao_T__; const int pao_ID__ = (__builtin_constant_p((delta)) && (((delta)) == 1 || ((delta)) == -1)) ? ((delta)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((delta)); (void)pao_tmp__; } switch (sizeof(((vm_event_states.event[item])))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "qi" ((pao_T__)((delta)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 42, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "re" ((pao_T__)((delta)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&(((vm_event_states.event[item])))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&(((vm_event_states.event[item]))))); (typeof(*(&(((vm_event_states.event[item]))))) *)tcp_ptr__; }) += ((delta)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
20167}
20168static inline __attribute__((always_inline)) void count_vm_events(enum vm_event_item item, long delta)
20169{
20170 do { do { const void *__vpp_verify = (typeof(&((vm_event_states.event[item]))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((vm_event_states.event[item]))) { case 1: do { typedef typeof(((vm_event_states.event[item]))) pao_T__; const int pao_ID__ = (__builtin_constant_p((delta)) && (((delta)) == 1 || ((delta)) == -1)) ? ((delta)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((delta)); (void)pao_tmp__; } switch (sizeof(((vm_event_states.event[item])))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "qi" ((pao_T__)((delta)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "re" ((pao_T__)((delta)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof(((vm_event_states.event[item]))) pao_T__; const int pao_ID__ = (__builtin_constant_p((delta)) && (((delta)) == 1 || ((delta)) == -1)) ? ((delta)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((delta)); (void)pao_tmp__; } switch (sizeof(((vm_event_states.event[item])))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "qi" ((pao_T__)((delta)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "re" ((pao_T__)((delta)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof(((vm_event_states.event[item]))) pao_T__; const int pao_ID__ = (__builtin_constant_p((delta)) && (((delta)) == 1 || ((delta)) == -1)) ? ((delta)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((delta)); (void)pao_tmp__; } switch (sizeof(((vm_event_states.event[item])))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "qi" ((pao_T__)((delta)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "ri" ((pao_T__)((delta)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item])))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((vm_event_states.event[item]))) : "re" ((pao_T__)((delta)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&(((vm_event_states.event[item])))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&(((vm_event_states.event[item]))))); (typeof(*(&(((vm_event_states.event[item]))))) *)tcp_ptr__; }) += ((delta)); do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 47, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
20171}
20172extern void all_vm_events(unsigned long *);
20173extern void vm_events_fold_cpu(int cpu);
20174extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
20175static inline __attribute__((always_inline)) void zone_page_state_add(long x, struct zone *zone,
20176 enum zone_stat_item item)
20177{
20178 atomic_long_add(x, &zone->vm_stat[item]);
20179 atomic_long_add(x, &vm_stat[item]);
20180}
20181static inline __attribute__((always_inline)) unsigned long global_page_state(enum zone_stat_item item)
20182{
20183 long x = atomic_long_read(&vm_stat[item]);
20184 if (__builtin_constant_p(((x < 0))) ? !!((x < 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 103, }; ______r = !!((x < 0)); ______f.miss_hit[______r]++; ______r; }))
20185 x = 0;
20186 return x;
20187}
20188static inline __attribute__((always_inline)) unsigned long zone_page_state(struct zone *zone,
20189 enum zone_stat_item item)
20190{
20191 long x = atomic_long_read(&zone->vm_stat[item]);
20192 if (__builtin_constant_p(((x < 0))) ? !!((x < 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 114, }; ______r = !!((x < 0)); ______f.miss_hit[______r]++; ______r; }))
20193 x = 0;
20194 return x;
20195}
20196static inline __attribute__((always_inline)) unsigned long zone_page_state_snapshot(struct zone *zone,
20197 enum zone_stat_item item)
20198{
20199 long x = atomic_long_read(&zone->vm_stat[item]);
20200 int cpu;
20201 for (((cpu)) = -1; ((cpu)) = cpumask_next(((cpu)), (cpu_online_mask)), ((cpu)) < nr_cpu_ids;)
20202 x += ({ do { const void *__vpp_verify = (typeof(((zone->pageset))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((zone->pageset))) *)((zone->pageset)))); (typeof((typeof(*((zone->pageset))) *)((zone->pageset)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->vm_stat_diff[item];
20203 if (__builtin_constant_p(((x < 0))) ? !!((x < 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/vmstat.h", .line = 136, }; ______r = !!((x < 0)); ______f.miss_hit[______r]++; ______r; }))
20204 x = 0;
20205 return x;
20206}
20207extern unsigned long global_reclaimable_pages(void);
20208extern unsigned long zone_reclaimable_pages(struct zone *zone);
20209static inline __attribute__((always_inline)) void zap_zone_vm_stats(struct zone *zone)
20210{
20211 __builtin_memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
20212}
20213extern void inc_zone_state(struct zone *, enum zone_stat_item);
20214void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
20215void __inc_zone_page_state(struct page *, enum zone_stat_item);
20216void __dec_zone_page_state(struct page *, enum zone_stat_item);
20217void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
20218void inc_zone_page_state(struct page *, enum zone_stat_item);
20219void dec_zone_page_state(struct page *, enum zone_stat_item);
20220extern void inc_zone_state(struct zone *, enum zone_stat_item);
20221extern void __inc_zone_state(struct zone *, enum zone_stat_item);
20222extern void dec_zone_state(struct zone *, enum zone_stat_item);
20223extern void __dec_zone_state(struct zone *, enum zone_stat_item);
20224void refresh_cpu_vm_stats(int);
20225void refresh_zone_stat_thresholds(void);
20226int calculate_pressure_threshold(struct zone *zone);
20227int calculate_normal_threshold(struct zone *zone);
20228void set_pgdat_percpu_threshold(pg_data_t *pgdat,
20229 int (*calculate_pressure)(struct zone *));
20230extern const char * const vmstat_text[];
20231static inline __attribute__((always_inline)) __attribute__((always_inline)) void *lowmem_page_address(struct page *page)
20232{
20233 return ((void *)((unsigned long)(((phys_addr_t)(((unsigned long)((page) - mem_map) + (0UL))) << 12))+((unsigned long)(0xC0000000UL))));
20234}
20235void *page_address(struct page *page);
20236void set_page_address(struct page *page, void *virtual);
20237void page_address_init(void);
20238extern struct address_space swapper_space;
20239static inline __attribute__((always_inline)) struct address_space *page_mapping(struct page *page)
20240{
20241 struct address_space *mapping = page->mapping;
20242 do { (void)(PageSlab(page)); } while (0);
20243 if (__builtin_constant_p((((__builtin_constant_p(PageSwapCache(page)) ? !!(PageSwapCache(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 776, }; ______r = __builtin_expect(!!(PageSwapCache(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(PageSwapCache(page)) ? !!(PageSwapCache(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 776, }; ______r = __builtin_expect(!!(PageSwapCache(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 776, }; ______r = !!(((__builtin_constant_p(PageSwapCache(page)) ? !!(PageSwapCache(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 776, }; ______r = __builtin_expect(!!(PageSwapCache(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
20244 mapping = &swapper_space;
20245 else if (__builtin_constant_p((((unsigned long)mapping & 1))) ? !!(((unsigned long)mapping & 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 778, }; ______r = !!(((unsigned long)mapping & 1)); ______f.miss_hit[______r]++; ______r; }))
20246 mapping = ((void *)0);
20247 return mapping;
20248}
20249static inline __attribute__((always_inline)) void *page_rmapping(struct page *page)
20250{
20251 return (void *)((unsigned long)page->mapping & ~(1 | 2));
20252}
20253static inline __attribute__((always_inline)) int PageAnon(struct page *page)
20254{
20255 return ((unsigned long)page->mapping & 1) != 0;
20256}
20257static inline __attribute__((always_inline)) unsigned long page_index(struct page *page)
20258{
20259 if (__builtin_constant_p((((__builtin_constant_p(PageSwapCache(page)) ? !!(PageSwapCache(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 800, }; ______r = __builtin_expect(!!(PageSwapCache(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(PageSwapCache(page)) ? !!(PageSwapCache(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 800, }; ______r = __builtin_expect(!!(PageSwapCache(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 800, }; ______r = !!(((__builtin_constant_p(PageSwapCache(page)) ? !!(PageSwapCache(page)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 800, }; ______r = __builtin_expect(!!(PageSwapCache(page)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
20260 return ((page)->private);
20261 return page->index;
20262}
20263static inline __attribute__((always_inline)) void reset_page_mapcount(struct page *page)
20264{
20265 atomic_set(&(page)->_mapcount, -1);
20266}
20267static inline __attribute__((always_inline)) int page_mapcount(struct page *page)
20268{
20269 return atomic_read(&(page)->_mapcount) + 1;
20270}
20271static inline __attribute__((always_inline)) int page_mapped(struct page *page)
20272{
20273 return atomic_read(&(page)->_mapcount) >= 0;
20274}
20275extern void pagefault_out_of_memory(void);
20276extern void show_free_areas(unsigned int flags);
20277extern bool skip_free_areas_node(unsigned int flags, int nid);
20278int shmem_lock(struct file *file, int lock, struct user_struct *user);
20279struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
20280int shmem_zero_setup(struct vm_area_struct *);
20281extern int can_do_mlock(void);
20282extern int user_shm_lock(size_t, struct user_struct *);
20283extern void user_shm_unlock(size_t, struct user_struct *);
20284struct zap_details {
20285 struct vm_area_struct *nonlinear_vma;
20286 struct address_space *check_mapping;
20287 unsigned long first_index;
20288 unsigned long last_index;
20289};
20290struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
20291 pte_t pte);
20292int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
20293 unsigned long size);
20294unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
20295 unsigned long size, struct zap_details *);
20296unsigned long unmap_vmas(struct mmu_gather *tlb,
20297 struct vm_area_struct *start_vma, unsigned long start_addr,
20298 unsigned long end_addr, unsigned long *nr_accounted,
20299 struct zap_details *);
20300struct mm_walk {
20301 int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
20302 int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *);
20303 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
20304 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
20305 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
20306 int (*hugetlb_entry)(pte_t *, unsigned long,
20307 unsigned long, unsigned long, struct mm_walk *);
20308 struct mm_struct *mm;
20309 void *private;
20310};
20311int walk_page_range(unsigned long addr, unsigned long end,
20312 struct mm_walk *walk);
20313void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
20314 unsigned long end, unsigned long floor, unsigned long ceiling);
20315int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
20316 struct vm_area_struct *vma);
20317void unmap_mapping_range(struct address_space *mapping,
20318 loff_t const holebegin, loff_t const holelen, int even_cows);
20319int follow_pfn(struct vm_area_struct *vma, unsigned long address,
20320 unsigned long *pfn);
20321int follow_phys(struct vm_area_struct *vma, unsigned long address,
20322 unsigned int flags, unsigned long *prot, resource_size_t *phys);
20323int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
20324 void *buf, int len, int write);
20325static inline __attribute__((always_inline)) void unmap_shared_mapping_range(struct address_space *mapping,
20326 loff_t const holebegin, loff_t const holelen)
20327{
20328 unmap_mapping_range(mapping, holebegin, holelen, 0);
20329}
20330extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
20331extern void truncate_setsize(struct inode *inode, loff_t newsize);
20332extern int vmtruncate(struct inode *inode, loff_t offset);
20333extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
20334int truncate_inode_page(struct address_space *mapping, struct page *page);
20335int generic_error_remove_page(struct address_space *mapping, struct page *page);
20336int invalidate_inode_page(struct page *page);
20337extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
20338 unsigned long address, unsigned int flags);
20339extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
20340 unsigned long address, unsigned int fault_flags);
20341extern int make_pages_present(unsigned long addr, unsigned long end);
20342extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
20343extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
20344 void *buf, int len, int write);
20345int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
20346 unsigned long start, int len, unsigned int foll_flags,
20347 struct page **pages, struct vm_area_struct **vmas,
20348 int *nonblocking);
20349int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
20350 unsigned long start, int nr_pages, int write, int force,
20351 struct page **pages, struct vm_area_struct **vmas);
20352int get_user_pages_fast(unsigned long start, int nr_pages, int write,
20353 struct page **pages);
20354struct page *get_dump_page(unsigned long addr);
20355extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
20356extern void do_invalidatepage(struct page *page, unsigned long offset);
20357int __set_page_dirty_nobuffers(struct page *page);
20358int __set_page_dirty_no_writeback(struct page *page);
20359int redirty_page_for_writepage(struct writeback_control *wbc,
20360 struct page *page);
20361void account_page_dirtied(struct page *page, struct address_space *mapping);
20362void account_page_writeback(struct page *page);
20363int set_page_dirty(struct page *page);
20364int set_page_dirty_lock(struct page *page);
20365int clear_page_dirty_for_io(struct page *page);
20366static inline __attribute__((always_inline)) int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
20367{
20368 return vma && (vma->vm_end == addr) && (vma->vm_flags & 0x00000100);
20369}
20370static inline __attribute__((always_inline)) int stack_guard_page_start(struct vm_area_struct *vma,
20371 unsigned long addr)
20372{
20373 return (vma->vm_flags & 0x00000100) &&
20374 (vma->vm_start == addr) &&
20375 !vma_growsdown(vma->vm_prev, addr);
20376}
20377static inline __attribute__((always_inline)) int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
20378{
20379 return vma && (vma->vm_start == addr) && (vma->vm_flags & 0x00000000);
20380}
20381static inline __attribute__((always_inline)) int stack_guard_page_end(struct vm_area_struct *vma,
20382 unsigned long addr)
20383{
20384 return (vma->vm_flags & 0x00000000) &&
20385 (vma->vm_end == addr) &&
20386 !vma_growsup(vma->vm_next, addr);
20387}
20388extern unsigned long move_page_tables(struct vm_area_struct *vma,
20389 unsigned long old_addr, struct vm_area_struct *new_vma,
20390 unsigned long new_addr, unsigned long len);
20391extern unsigned long do_mremap(unsigned long addr,
20392 unsigned long old_len, unsigned long new_len,
20393 unsigned long flags, unsigned long new_addr);
20394extern int mprotect_fixup(struct vm_area_struct *vma,
20395 struct vm_area_struct **pprev, unsigned long start,
20396 unsigned long end, unsigned long newflags);
20397int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
20398 struct page **pages);
20399static inline __attribute__((always_inline)) void set_mm_counter(struct mm_struct *mm, int member, long value)
20400{
20401 atomic_long_set(&mm->rss_stat.count[member], value);
20402}
20403static inline __attribute__((always_inline)) unsigned long get_mm_counter(struct mm_struct *mm, int member)
20404{
20405 return atomic_long_read(&mm->rss_stat.count[member]);
20406}
20407static inline __attribute__((always_inline)) void add_mm_counter(struct mm_struct *mm, int member, long value)
20408{
20409 atomic_long_add(value, &mm->rss_stat.count[member]);
20410}
20411static inline __attribute__((always_inline)) void inc_mm_counter(struct mm_struct *mm, int member)
20412{
20413 atomic_long_inc(&mm->rss_stat.count[member]);
20414}
20415static inline __attribute__((always_inline)) void dec_mm_counter(struct mm_struct *mm, int member)
20416{
20417 atomic_long_dec(&mm->rss_stat.count[member]);
20418}
20419static inline __attribute__((always_inline)) unsigned long get_mm_rss(struct mm_struct *mm)
20420{
20421 return get_mm_counter(mm, MM_FILEPAGES) +
20422 get_mm_counter(mm, MM_ANONPAGES);
20423}
20424static inline __attribute__((always_inline)) unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
20425{
20426 return ({ typeof(mm->hiwater_rss) _max1 = (mm->hiwater_rss); typeof(get_mm_rss(mm)) _max2 = (get_mm_rss(mm)); (void) (&_max1 == &_max2); _max1 > _max2 ? _max1 : _max2; });
20427}
20428static inline __attribute__((always_inline)) unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
20429{
20430 return ({ typeof(mm->hiwater_vm) _max1 = (mm->hiwater_vm); typeof(mm->total_vm) _max2 = (mm->total_vm); (void) (&_max1 == &_max2); _max1 > _max2 ? _max1 : _max2; });
20431}
20432static inline __attribute__((always_inline)) void update_hiwater_rss(struct mm_struct *mm)
20433{
20434 unsigned long _rss = get_mm_rss(mm);
20435 if (__builtin_constant_p((((mm)->hiwater_rss < _rss))) ? !!(((mm)->hiwater_rss < _rss)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1107, }; ______r = !!(((mm)->hiwater_rss < _rss)); ______f.miss_hit[______r]++; ______r; }))
20436 (mm)->hiwater_rss = _rss;
20437}
20438static inline __attribute__((always_inline)) void update_hiwater_vm(struct mm_struct *mm)
20439{
20440 if (__builtin_constant_p(((mm->hiwater_vm < mm->total_vm))) ? !!((mm->hiwater_vm < mm->total_vm)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1113, }; ______r = !!((mm->hiwater_vm < mm->total_vm)); ______f.miss_hit[______r]++; ______r; }))
20441 mm->hiwater_vm = mm->total_vm;
20442}
20443static inline __attribute__((always_inline)) void setmax_mm_hiwater_rss(unsigned long *maxrss,
20444 struct mm_struct *mm)
20445{
20446 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
20447 if (__builtin_constant_p(((*maxrss < hiwater_rss))) ? !!((*maxrss < hiwater_rss)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1122, }; ______r = !!((*maxrss < hiwater_rss)); ______f.miss_hit[______r]++; ______r; }))
20448 *maxrss = hiwater_rss;
20449}
20450static inline __attribute__((always_inline)) void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
20451{
20452}
20453struct shrink_control {
20454 gfp_t gfp_mask;
20455 unsigned long nr_to_scan;
20456};
20457struct shrinker {
20458 int (*shrink)(struct shrinker *, struct shrink_control *sc);
20459 int seeks;
20460 struct list_head list;
20461 long nr;
20462};
20463extern void register_shrinker(struct shrinker *);
20464extern void unregister_shrinker(struct shrinker *);
20465int vma_wants_writenotify(struct vm_area_struct *vma);
20466extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
20467 spinlock_t **ptl);
20468static inline __attribute__((always_inline)) pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
20469 spinlock_t **ptl)
20470{
20471 pte_t *ptep;
20472 (ptep = __get_locked_pte(mm, addr, ptl));
20473 return ptep;
20474}
20475static inline __attribute__((always_inline)) int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
20476 unsigned long address)
20477{
20478 return 0;
20479}
20480int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
20481int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
20482 pmd_t *pmd, unsigned long address);
20483int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
20484static inline __attribute__((always_inline)) pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
20485{
20486 return ((__builtin_constant_p(pgd_none(*pgd)) ? !!(pgd_none(*pgd)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1215, }; ______r = __builtin_expect(!!(pgd_none(*pgd)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })) && __pud_alloc(mm, pgd, address))?
20487 ((void *)0): pud_offset(pgd, address);
20488}
20489static inline __attribute__((always_inline)) pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
20490{
20491 return ((__builtin_constant_p(pud_none(*pud)) ? !!(pud_none(*pud)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1221, }; ______r = __builtin_expect(!!(pud_none(*pud)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })) && __pmd_alloc(mm, pud, address))?
20492 ((void *)0): pmd_offset(pud, address);
20493}
20494static inline __attribute__((always_inline)) void pgtable_page_ctor(struct page *page)
20495{
20496 do {} while (0);
20497 inc_zone_page_state(page, NR_PAGETABLE);
20498}
20499static inline __attribute__((always_inline)) void pgtable_page_dtor(struct page *page)
20500{
20501 do {} while (0);
20502 dec_zone_page_state(page, NR_PAGETABLE);
20503}
20504extern void free_area_init(unsigned long * zones_size);
20505extern void free_area_init_node(int nid, unsigned long * zones_size,
20506 unsigned long zone_start_pfn, unsigned long *zholes_size);
20507extern void free_area_init_nodes(unsigned long *max_zone_pfn);
20508extern void add_active_range(unsigned int nid, unsigned long start_pfn,
20509 unsigned long end_pfn);
20510extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
20511 unsigned long end_pfn);
20512extern void remove_all_active_ranges(void);
20513void sort_node_map(void);
20514unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
20515 unsigned long end_pfn);
20516extern unsigned long absent_pages_in_range(unsigned long start_pfn,
20517 unsigned long end_pfn);
20518extern void get_pfn_range_for_nid(unsigned int nid,
20519 unsigned long *start_pfn, unsigned long *end_pfn);
20520extern unsigned long find_min_pfn_with_active_regions(void);
20521extern void free_bootmem_with_active_regions(int nid,
20522 unsigned long max_low_pfn);
20523int add_from_early_node_map(struct range *range, int az,
20524 int nr_range, int nid);
20525u64 __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) find_memory_core_early(int nid, u64 size, u64 align,
20526 u64 goal, u64 limit);
20527typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
20528extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
20529extern void sparse_memory_present_with_active_regions(int nid);
20530extern int __attribute__ ((__section__(".meminit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) early_pfn_to_nid(unsigned long pfn);
20531extern void set_dma_reserve(unsigned long new_dma_reserve);
20532extern void memmap_init_zone(unsigned long, int, unsigned long,
20533 unsigned long, enum memmap_context);
20534extern void setup_per_zone_wmarks(void);
20535extern int __attribute__ ((__section__(".meminit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) init_per_zone_wmark_min(void);
20536extern void mem_init(void);
20537extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) mmap_init(void);
20538extern void show_mem(unsigned int flags);
20539extern void si_meminfo(struct sysinfo * val);
20540extern void si_meminfo_node(struct sysinfo *val, int nid);
20541extern int after_bootmem;
20542extern void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
20543extern void setup_per_cpu_pageset(void);
20544extern void zone_pcp_update(struct zone *zone);
20545extern atomic_long_t mmap_pages_allocated;
20546extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
20547void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
20548void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
20549void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *);
20550struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
20551 struct prio_tree_iter *iter);
20552static inline __attribute__((always_inline)) void vma_nonlinear_insert(struct vm_area_struct *vma,
20553 struct list_head *list)
20554{
20555 vma->shared.vm_set.parent = ((void *)0);
20556 list_add_tail(&vma->shared.vm_set.list, list);
20557}
20558extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
20559extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
20560 unsigned long end, unsigned long pgoff, struct vm_area_struct *insert);
20561extern struct vm_area_struct *vma_merge(struct mm_struct *,
20562 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
20563 unsigned long vm_flags, struct anon_vma *, struct file *, unsigned long,
20564 struct mempolicy *);
20565extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
20566extern int split_vma(struct mm_struct *,
20567 struct vm_area_struct *, unsigned long addr, int new_below);
20568extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
20569extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
20570 struct rb_node **, struct rb_node *);
20571extern void unlink_file_vma(struct vm_area_struct *);
20572extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
20573 unsigned long addr, unsigned long len, unsigned long pgoff);
20574extern void exit_mmap(struct mm_struct *);
20575extern int mm_take_all_locks(struct mm_struct *mm);
20576extern void mm_drop_all_locks(struct mm_struct *mm);
20577extern void added_exe_file_vma(struct mm_struct *mm);
20578extern void removed_exe_file_vma(struct mm_struct *mm);
20579extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
20580extern struct file *get_mm_exe_file(struct mm_struct *mm);
20581extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
20582extern int install_special_mapping(struct mm_struct *mm,
20583 unsigned long addr, unsigned long len,
20584 unsigned long flags, struct page **pages);
20585extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
20586extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
20587 unsigned long len, unsigned long prot,
20588 unsigned long flag, unsigned long pgoff);
20589extern unsigned long mmap_region(struct file *file, unsigned long addr,
20590 unsigned long len, unsigned long flags,
20591 vm_flags_t vm_flags, unsigned long pgoff);
20592static inline __attribute__((always_inline)) unsigned long do_mmap(struct file *file, unsigned long addr,
20593 unsigned long len, unsigned long prot,
20594 unsigned long flag, unsigned long offset)
20595{
20596 unsigned long ret = -22;
20597 if (__builtin_constant_p((((offset + ((((len)) + ((typeof((len)))((((1UL) << 12))) - 1)) & ~((typeof((len)))((((1UL) << 12))) - 1))) < offset))) ? !!(((offset + ((((len)) + ((typeof((len)))((((1UL) << 12))) - 1)) & ~((typeof((len)))((((1UL) << 12))) - 1))) < offset)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1446, }; ______r = !!(((offset + ((((len)) + ((typeof((len)))((((1UL) << 12))) - 1)) & ~((typeof((len)))((((1UL) << 12))) - 1))) < offset)); ______f.miss_hit[______r]++; ______r; }))
20598 goto out;
20599 if (__builtin_constant_p(((!(offset & ~(~(((1UL) << 12)-1)))))) ? !!((!(offset & ~(~(((1UL) << 12)-1))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1448, }; ______r = !!((!(offset & ~(~(((1UL) << 12)-1))))); ______f.miss_hit[______r]++; ______r; }))
20600 ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> 12);
20601out:
20602 return ret;
20603}
20604extern int do_munmap(struct mm_struct *, unsigned long, size_t);
20605extern unsigned long do_brk(unsigned long, unsigned long);
20606extern unsigned long page_unuse(struct page *);
20607extern void truncate_inode_pages(struct address_space *, loff_t);
20608extern void truncate_inode_pages_range(struct address_space *,
20609 loff_t lstart, loff_t lend);
20610extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
20611int write_one_page(struct page *page, int wait);
20612void task_dirty_inc(struct task_struct *tsk);
20613int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
20614 unsigned long offset, unsigned long nr_to_read);
20615void page_cache_sync_readahead(struct address_space *mapping,
20616 struct file_ra_state *ra,
20617 struct file *filp,
20618 unsigned long offset,
20619 unsigned long size);
20620void page_cache_async_readahead(struct address_space *mapping,
20621 struct file_ra_state *ra,
20622 struct file *filp,
20623 struct page *pg,
20624 unsigned long offset,
20625 unsigned long size);
20626unsigned long max_sane_readahead(unsigned long nr);
20627unsigned long ra_submit(struct file_ra_state *ra,
20628 struct address_space *mapping,
20629 struct file *filp);
20630extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
20631extern int expand_downwards(struct vm_area_struct *vma,
20632 unsigned long address);
20633extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
20634extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
20635 struct vm_area_struct **pprev);
20636static inline __attribute__((always_inline)) struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
20637{
20638 struct vm_area_struct * vma = find_vma(mm,start_addr);
20639 if (__builtin_constant_p(((vma && end_addr <= vma->vm_start))) ? !!((vma && end_addr <= vma->vm_start)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/mm.h", .line = 1519, }; ______r = !!((vma && end_addr <= vma->vm_start)); ______f.miss_hit[______r]++; ______r; }))
20640 vma = ((void *)0);
20641 return vma;
20642}
20643static inline __attribute__((always_inline)) unsigned long vma_pages(struct vm_area_struct *vma)
20644{
20645 return (vma->vm_end - vma->vm_start) >> 12;
20646}
20647pgprot_t vm_get_page_prot(unsigned long vm_flags);
20648struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
20649int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
20650 unsigned long pfn, unsigned long size, pgprot_t);
20651int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
20652int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
20653 unsigned long pfn);
20654int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
20655 unsigned long pfn);
20656struct page *follow_page(struct vm_area_struct *, unsigned long address,
20657 unsigned int foll_flags);
20658typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
20659 void *data);
20660extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
20661 unsigned long size, pte_fn_t fn, void *data);
20662void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
20663static inline __attribute__((always_inline)) void
20664kernel_map_pages(struct page *page, int numpages, int enable) {}
20665static inline __attribute__((always_inline)) void enable_debug_pagealloc(void)
20666{
20667}
20668static inline __attribute__((always_inline)) bool kernel_page_present(struct page *page) { return true; }
20669extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
20670int in_gate_area_no_mm(unsigned long addr);
20671int in_gate_area(struct mm_struct *mm, unsigned long addr);
20672int drop_caches_sysctl_handler(struct ctl_table *, int,
20673 void *, size_t *, loff_t *);
20674unsigned long shrink_slab(struct shrink_control *shrink,
20675 unsigned long nr_pages_scanned,
20676 unsigned long lru_pages);
20677extern int randomize_va_space;
20678const char * arch_vma_name(struct vm_area_struct *vma);
20679void print_vma_addr(char *prefix, unsigned long rip);
20680void sparse_mem_maps_populate_node(struct page **map_map,
20681 unsigned long pnum_begin,
20682 unsigned long pnum_end,
20683 unsigned long map_count,
20684 int nodeid);
20685struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
20686pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
20687pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
20688pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
20689pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
20690void *vmemmap_alloc_block(unsigned long size, int node);
20691void *vmemmap_alloc_block_buf(unsigned long size, int node);
20692void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
20693int vmemmap_populate_basepages(struct page *start_page,
20694 unsigned long pages, int node);
20695int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
20696void vmemmap_populate_print_last(void);
20697enum mf_flags {
20698 MF_COUNT_INCREASED = 1 << 0,
20699};
20700extern void memory_failure(unsigned long pfn, int trapno);
20701extern int __memory_failure(unsigned long pfn, int trapno, int flags);
20702extern int unpoison_memory(unsigned long pfn);
20703extern int sysctl_memory_failure_early_kill;
20704extern int sysctl_memory_failure_recovery;
20705extern void shake_page(struct page *p, int access);
20706extern atomic_long_t mce_bad_pages;
20707extern int soft_offline_page(struct page *page, int flags);
20708extern void dump_page(struct page *page);
20709extern void clear_huge_page(struct page *page,
20710 unsigned long addr,
20711 unsigned int pages_per_huge_page);
20712extern void copy_user_huge_page(struct page *dst, struct page *src,
20713 unsigned long addr, struct vm_area_struct *vma,
20714 unsigned int pages_per_huge_page);
20715static inline __attribute__((always_inline)) unsigned long get_page_memtype(struct page *pg)
20716{
20717 unsigned long pg_flags = pg->flags & (1UL << PG_uncached | 1UL << PG_arch_1);
20718 if (__builtin_constant_p(((pg_flags == 0))) ? !!((pg_flags == 0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/cacheflush.h", .line = 28, }; ______r = !!((pg_flags == 0)); ______f.miss_hit[______r]++; ______r; }))
20719 return -1;
20720 else if (__builtin_constant_p(((pg_flags == (1UL << PG_arch_1)))) ? !!((pg_flags == (1UL << PG_arch_1))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/cacheflush.h", .line = 30, }; ______r = !!((pg_flags == (1UL << PG_arch_1))); ______f.miss_hit[______r]++; ______r; }))
20721 return ((((pteval_t)(1)) << 3));
20722 else if (__builtin_constant_p(((pg_flags == (1UL << PG_uncached)))) ? !!((pg_flags == (1UL << PG_uncached))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/cacheflush.h", .line = 32, }; ______r = !!((pg_flags == (1UL << PG_uncached))); ______f.miss_hit[______r]++; ______r; }))
20723 return ((((pteval_t)(1)) << 4));
20724 else
20725 return (0);
20726}
20727static inline __attribute__((always_inline)) void set_page_memtype(struct page *pg, unsigned long memtype)
20728{
20729 unsigned long memtype_flags = 0;
20730 unsigned long old_flags;
20731 unsigned long new_flags;
20732 switch (memtype) {
20733 case ((((pteval_t)(1)) << 3)):
20734 memtype_flags = (1UL << PG_arch_1);
20735 break;
20736 case ((((pteval_t)(1)) << 4)):
20737 memtype_flags = (1UL << PG_uncached);
20738 break;
20739 case (0):
20740 memtype_flags = (1UL << PG_uncached | 1UL << PG_arch_1);
20741 break;
20742 }
20743 do {
20744 old_flags = pg->flags;
20745 new_flags = (old_flags & (~(1UL << PG_uncached | 1UL << PG_arch_1))) | memtype_flags;
20746 } while (({ __typeof__(*(((&pg->flags)))) __ret; __typeof__(*(((&pg->flags)))) __old = (((old_flags))); __typeof__(*(((&pg->flags)))) __new = (((new_flags))); switch ((sizeof(*&pg->flags))) { case 1: { volatile u8 *__ptr = (volatile u8 *)(((&pg->flags))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgb %2,%1" : "=a" (__ret), "+m" (*__ptr) : "q" (__new), "0" (__old) : "memory"); break; } case 2: { volatile u16 *__ptr = (volatile u16 *)(((&pg->flags))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgw %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } case 4: { volatile u32 *__ptr = (volatile u32 *)(((&pg->flags))); asm volatile(".section .smp_locks,\"a\"\n" ".balign 4\n" ".long 671f - .\n" ".previous\n" "671:" "\n\tlock; " "cmpxchgl %2,%1" : "=a" (__ret), "+m" (*__ptr) : "r" (__new), "0" (__old) : "memory"); break; } default: __cmpxchg_wrong_size(); } __ret; }) != old_flags);
20747}
20748int _set_memory_uc(unsigned long addr, int numpages);
20749int _set_memory_wc(unsigned long addr, int numpages);
20750int _set_memory_wb(unsigned long addr, int numpages);
20751int set_memory_uc(unsigned long addr, int numpages);
20752int set_memory_wc(unsigned long addr, int numpages);
20753int set_memory_wb(unsigned long addr, int numpages);
20754int set_memory_x(unsigned long addr, int numpages);
20755int set_memory_nx(unsigned long addr, int numpages);
20756int set_memory_ro(unsigned long addr, int numpages);
20757int set_memory_rw(unsigned long addr, int numpages);
20758int set_memory_np(unsigned long addr, int numpages);
20759int set_memory_4k(unsigned long addr, int numpages);
20760int set_memory_array_uc(unsigned long *addr, int addrinarray);
20761int set_memory_array_wc(unsigned long *addr, int addrinarray);
20762int set_memory_array_wb(unsigned long *addr, int addrinarray);
20763int set_pages_array_uc(struct page **pages, int addrinarray);
20764int set_pages_array_wc(struct page **pages, int addrinarray);
20765int set_pages_array_wb(struct page **pages, int addrinarray);
20766int set_pages_uc(struct page *page, int numpages);
20767int set_pages_wb(struct page *page, int numpages);
20768int set_pages_x(struct page *page, int numpages);
20769int set_pages_nx(struct page *page, int numpages);
20770int set_pages_ro(struct page *page, int numpages);
20771int set_pages_rw(struct page *page, int numpages);
20772void clflush_cache_range(void *addr, unsigned int size);
20773void mark_rodata_ro(void);
20774extern const int rodata_test_data;
20775extern int kernel_set_to_readonly;
20776void set_kernel_text_rw(void);
20777void set_kernel_text_ro(void);
20778static inline __attribute__((always_inline)) int rodata_test(void)
20779{
20780 return 0;
20781}
20782static inline __attribute__((always_inline)) void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
20783{
20784}
20785static inline __attribute__((always_inline)) void flush_kernel_dcache_page(struct page *page)
20786{
20787}
20788static inline __attribute__((always_inline)) void flush_kernel_vmap_range(void *vaddr, int size)
20789{
20790}
20791static inline __attribute__((always_inline)) void invalidate_kernel_vmap_range(void *vaddr, int size)
20792{
20793}
20794static inline __attribute__((always_inline)) void __native_flush_tlb(void)
20795{
20796 native_write_cr3(native_read_cr3());
20797}
20798static inline __attribute__((always_inline)) void __native_flush_tlb_global(void)
20799{
20800 unsigned long flags;
20801 unsigned long cr4;
20802 do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0);
20803 cr4 = native_read_cr4();
20804 native_write_cr4(cr4 & ~0x00000080);
20805 native_write_cr4(cr4);
20806 do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0);
20807}
20808static inline __attribute__((always_inline)) void __native_flush_tlb_single(unsigned long addr)
20809{
20810 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
20811}
20812static inline __attribute__((always_inline)) void __flush_tlb_all(void)
20813{
20814 if (__builtin_constant_p((((__builtin_constant_p((0*32+13)) && ( ((((0*32+13))>>5)==0 && (1UL<<(((0*32+13))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+13))>>5)==1 && (1UL<<(((0*32+13))&31) & (0|0))) || ((((0*32+13))>>5)==2 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==3 && (1UL<<(((0*32+13))&31) & (0))) || ((((0*32+13))>>5)==4 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==5 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==6 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==7 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==8 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==9 && (1UL<<(((0*32+13))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+13))) ? constant_test_bit(((0*32+13)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+13)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))))) ? !!(((__builtin_constant_p((0*32+13)) && ( ((((0*32+13))>>5)==0 && (1UL<<(((0*32+13))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+13))>>5)==1 && (1UL<<(((0*32+13))&31) & (0|0))) || ((((0*32+13))>>5)==2 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==3 && (1UL<<(((0*32+13))&31) & (0))) || ((((0*32+13))>>5)==4 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==5 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==6 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==7 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==8 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==9 && (1UL<<(((0*32+13))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+13))) ? constant_test_bit(((0*32+13)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+13)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/tlbflush.h", .line = 51, }; ______r = !!(((__builtin_constant_p((0*32+13)) && ( ((((0*32+13))>>5)==0 && (1UL<<(((0*32+13))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((0*32+13))>>5)==1 && (1UL<<(((0*32+13))&31) & (0|0))) || ((((0*32+13))>>5)==2 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==3 && (1UL<<(((0*32+13))&31) & (0))) || ((((0*32+13))>>5)==4 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==5 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==6 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==7 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==8 && (1UL<<(((0*32+13))&31) & 0)) || ((((0*32+13))>>5)==9 && (1UL<<(((0*32+13))&31) & 0)) ) ? 1 : (__builtin_constant_p(((0*32+13))) ? constant_test_bit(((0*32+13)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((0*32+13)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; }))
20815 __flush_tlb_global();
20816 else
20817 __flush_tlb();
20818}
20819static inline __attribute__((always_inline)) void __flush_tlb_one(unsigned long addr)
20820{
20821 if (__builtin_constant_p(((1))) ? !!((1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/tlbflush.h", .line = 59, }; ______r = !!((1)); ______f.miss_hit[______r]++; ______r; }))
20822 __flush_tlb_single(addr);
20823 else
20824 __flush_tlb();
20825}
20826extern void flush_tlb_all(void);
20827extern void flush_tlb_current_task(void);
20828extern void flush_tlb_mm(struct mm_struct *);
20829extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
20830static inline __attribute__((always_inline)) void flush_tlb_range(struct vm_area_struct *vma,
20831 unsigned long start, unsigned long end)
20832{
20833 flush_tlb_mm(vma->vm_mm);
20834}
20835void native_flush_tlb_others(const struct cpumask *cpumask,
20836 struct mm_struct *mm, unsigned long va);
20837struct tlb_state {
20838 struct mm_struct *active_mm;
20839 int state;
20840};
20841extern __attribute__((section(".data..percpu" ""))) __typeof__(struct tlb_state) cpu_tlbstate __attribute__((__aligned__((1 << (6)))));
20842static inline __attribute__((always_inline)) void reset_lazy_tlbstate(void)
20843{
20844 do { typedef typeof(cpu_tlbstate.state) pto_T__; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/tlbflush.h", .line = 159, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pto_T__ pto_tmp__; pto_tmp__ = (0); (void)pto_tmp__; } switch (sizeof(cpu_tlbstate.state)) { case 1: asm("mov" "b %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.state) : "qi" ((pto_T__)(0))); break; case 2: asm("mov" "w %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.state) : "ri" ((pto_T__)(0))); break; case 4: asm("mov" "l %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.state) : "ri" ((pto_T__)(0))); break; case 8: asm("mov" "q %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.state) : "re" ((pto_T__)(0))); break; default: __bad_percpu_size(); } } while (0);
20845 do { typedef typeof(cpu_tlbstate.active_mm) pto_T__; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "/data/exp/linux-3.0.4/arch/x86/include/asm/tlbflush.h", .line = 160, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pto_T__ pto_tmp__; pto_tmp__ = (&init_mm); (void)pto_tmp__; } switch (sizeof(cpu_tlbstate.active_mm)) { case 1: asm("mov" "b %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.active_mm) : "qi" ((pto_T__)(&init_mm))); break; case 2: asm("mov" "w %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.active_mm) : "ri" ((pto_T__)(&init_mm))); break; case 4: asm("mov" "l %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.active_mm) : "ri" ((pto_T__)(&init_mm))); break; case 8: asm("mov" "q %1,""%%""fs"":" "%P" "0" : "+m" (cpu_tlbstate.active_mm) : "re" ((pto_T__)(&init_mm))); break; default: __bad_percpu_size(); } } while (0);
20846}
20847static inline __attribute__((always_inline)) void flush_tlb_kernel_range(unsigned long start,
20848 unsigned long end)
20849{
20850 flush_tlb_all();
20851}
20852extern unsigned long highstart_pfn, highend_pfn;
20853extern void *kmap_high(struct page *page);
20854extern void kunmap_high(struct page *page);
20855void *kmap(struct page *page);
20856void kunmap(struct page *page);
20857void *kmap_atomic_prot(struct page *page, pgprot_t prot);
20858void *__kmap_atomic(struct page *page);
20859void __kunmap_atomic(void *kvaddr);
20860void *kmap_atomic_pfn(unsigned long pfn);
20861void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
20862struct page *kmap_atomic_to_page(void *ptr);
20863extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
20864 unsigned long end_pfn);
20865unsigned int nr_free_highpages(void);
20866extern unsigned long totalhigh_pages;
20867void kmap_flush_unused(void);
20868extern __attribute__((section(".data..percpu" ""))) __typeof__(int) __kmap_atomic_idx;
20869static inline __attribute__((always_inline)) int kmap_atomic_idx_push(void)
20870{
20871 int idx = ({ typeof(__kmap_atomic_idx) pscr2_ret__; do { const void *__vpp_verify = (typeof(&(__kmap_atomic_idx)))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(__kmap_atomic_idx)) { case 1: pscr2_ret__ = ({ typeof(__kmap_atomic_idx) paro_ret__ = 1; switch (sizeof(__kmap_atomic_idx)) { case 1: asm("xaddb %0, ""%%""fs"":" "%P" "1" : "+q" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 2: asm("xaddw %0, ""%%""fs"":" "%P" "1" : "+r" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 4: asm("xaddl %0, ""%%""fs"":" "%P" "1" : "+r" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 8: asm("xaddq %0, ""%%""fs"":" "%P" "1" : "+re" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; default: __bad_percpu_size(); } paro_ret__ += 1; paro_ret__; }); break; case 2: pscr2_ret__ = ({ typeof(__kmap_atomic_idx) paro_ret__ = 1; switch (sizeof(__kmap_atomic_idx)) { case 1: asm("xaddb %0, ""%%""fs"":" "%P" "1" : "+q" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 2: asm("xaddw %0, ""%%""fs"":" "%P" "1" : "+r" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 4: asm("xaddl %0, ""%%""fs"":" "%P" "1" : "+r" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 8: asm("xaddq %0, ""%%""fs"":" "%P" "1" : "+re" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; default: __bad_percpu_size(); } paro_ret__ += 1; paro_ret__; }); break; case 4: pscr2_ret__ = ({ typeof(__kmap_atomic_idx) paro_ret__ = 1; switch (sizeof(__kmap_atomic_idx)) { case 1: asm("xaddb %0, ""%%""fs"":" "%P" "1" : "+q" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 2: asm("xaddw %0, ""%%""fs"":" "%P" "1" : "+r" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 4: asm("xaddl %0, ""%%""fs"":" "%P" "1" : "+r" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; case 8: asm("xaddq %0, ""%%""fs"":" "%P" "1" : "+re" (paro_ret__), "+m" (__kmap_atomic_idx) : : "memory"); break; default: __bad_percpu_size(); } paro_ret__ += 1; paro_ret__; }); break; case 8: pscr2_ret__ = ({ typeof(__kmap_atomic_idx) ret__; do { add_preempt_count(1); __asm__ __volatile__("": : :"memory"); } while (0); do { do { const void *__vpp_verify = (typeof(&((__kmap_atomic_idx))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((__kmap_atomic_idx))) { case 1: do { typedef typeof(((__kmap_atomic_idx))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof(((__kmap_atomic_idx)))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof(((__kmap_atomic_idx))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof(((__kmap_atomic_idx)))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof(((__kmap_atomic_idx))) pao_T__; const int pao_ID__ = (__builtin_constant_p((1)) && (((1)) == 1 || ((1)) == -1)) ? ((1)) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((1)); (void)pao_tmp__; } switch (sizeof(((__kmap_atomic_idx)))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "qi" ((pao_T__)((1)))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "ri" ((pao_T__)((1)))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "ri" ((pao_T__)((1)))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx)))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((__kmap_atomic_idx))) : "re" ((pao_T__)((1)))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&(((__kmap_atomic_idx)))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&(((__kmap_atomic_idx))))); (typeof(*(&(((__kmap_atomic_idx))))) *)tcp_ptr__; }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); ret__ = ({ typeof((__kmap_atomic_idx)) pscr_ret__; do { const void *__vpp_verify = (typeof(&((__kmap_atomic_idx))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((__kmap_atomic_idx))) { case 1: pscr_ret__ = ({ typeof(((__kmap_atomic_idx))) pfo_ret__; switch (sizeof(((__kmap_atomic_idx)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 2: pscr_ret__ = ({ typeof(((__kmap_atomic_idx))) pfo_ret__; switch (sizeof(((__kmap_atomic_idx)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 4: pscr_ret__ = ({ typeof(((__kmap_atomic_idx))) pfo_ret__; switch (sizeof(((__kmap_atomic_idx)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 8: pscr_ret__ = (*({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((__kmap_atomic_idx))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((__kmap_atomic_idx)))); (typeof(*(&((__kmap_atomic_idx)))) *)tcp_ptr__; }));break; default: __bad_size_call_parameter();break; } pscr_ret__; }); do { do { __asm__ __volatile__("": : :"memory"); sub_preempt_count(1); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_constant_p((((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = !!(((__builtin_constant_p(test_ti_thread_flag(current_thread_info(), 3)) ? !!(test_ti_thread_flag(current_thread_info(), 3)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 84, }; ______r = __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 3)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) preempt_schedule(); } while (0); } while (0); ret__; }); break; default: __bad_size_call_parameter(); break; } pscr2_ret__; }) - 1;
20872 return idx;
20873}
20874static inline __attribute__((always_inline)) int kmap_atomic_idx(void)
20875{
20876 return ({ typeof((__kmap_atomic_idx)) pscr_ret__; do { const void *__vpp_verify = (typeof(&((__kmap_atomic_idx))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((__kmap_atomic_idx))) { case 1: pscr_ret__ = ({ typeof(((__kmap_atomic_idx))) pfo_ret__; switch (sizeof(((__kmap_atomic_idx)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 2: pscr_ret__ = ({ typeof(((__kmap_atomic_idx))) pfo_ret__; switch (sizeof(((__kmap_atomic_idx)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 4: pscr_ret__ = ({ typeof(((__kmap_atomic_idx))) pfo_ret__; switch (sizeof(((__kmap_atomic_idx)))) { case 1: asm("mov" "b ""%%""fs"":" "%P" "1"",%0" : "=q" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 2: asm("mov" "w ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 4: asm("mov" "l ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; case 8: asm("mov" "q ""%%""fs"":" "%P" "1"",%0" : "=r" (pfo_ret__) : "m"((__kmap_atomic_idx))); break; default: __bad_percpu_size(); } pfo_ret__; });break; case 8: pscr_ret__ = (*({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&((__kmap_atomic_idx))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&((__kmap_atomic_idx)))); (typeof(*(&((__kmap_atomic_idx)))) *)tcp_ptr__; }));break; default: __bad_size_call_parameter();break; } pscr_ret__; }) - 1;
20877}
20878static inline __attribute__((always_inline)) void kmap_atomic_idx_pop(void)
20879{
20880 do { do { const void *__vpp_verify = (typeof(&((((__kmap_atomic_idx))))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((((__kmap_atomic_idx))))) { case 1: do { typedef typeof(((((__kmap_atomic_idx))))) pao_T__; const int pao_ID__ = (__builtin_constant_p((-(1))) && (((-(1))) == 1 || ((-(1))) == -1)) ? ((-(1))) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((-(1))); (void)pao_tmp__; } switch (sizeof(((((__kmap_atomic_idx)))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "qi" ((pao_T__)((-(1))))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "ri" ((pao_T__)((-(1))))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "ri" ((pao_T__)((-(1))))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "re" ((pao_T__)((-(1))))); break; default: __bad_percpu_size(); } } while (0);break; case 2: do { typedef typeof(((((__kmap_atomic_idx))))) pao_T__; const int pao_ID__ = (__builtin_constant_p((-(1))) && (((-(1))) == 1 || ((-(1))) == -1)) ? ((-(1))) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((-(1))); (void)pao_tmp__; } switch (sizeof(((((__kmap_atomic_idx)))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "qi" ((pao_T__)((-(1))))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "ri" ((pao_T__)((-(1))))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "ri" ((pao_T__)((-(1))))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "re" ((pao_T__)((-(1))))); break; default: __bad_percpu_size(); } } while (0);break; case 4: do { typedef typeof(((((__kmap_atomic_idx))))) pao_T__; const int pao_ID__ = (__builtin_constant_p((-(1))) && (((-(1))) == 1 || ((-(1))) == -1)) ? ((-(1))) : 0; if (__builtin_constant_p(((0))) ? !!((0)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((0)); ______f.miss_hit[______r]++; ______r; })) { pao_T__ pao_tmp__; pao_tmp__ = ((-(1))); (void)pao_tmp__; } switch (sizeof(((((__kmap_atomic_idx)))))) { case 1: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incb ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decb ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addb %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "qi" ((pao_T__)((-(1))))); break; case 2: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incw ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decw ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addw %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "ri" ((pao_T__)((-(1))))); break; case 4: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incl ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decl ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addl %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "ri" ((pao_T__)((-(1))))); break; case 8: if (__builtin_constant_p(((pao_ID__ == 1))) ? !!((pao_ID__ == 1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == 1)); ______f.miss_hit[______r]++; ______r; })) asm("incq ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else if (__builtin_constant_p(((pao_ID__ == -1))) ? !!((pao_ID__ == -1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 105, }; ______r = !!((pao_ID__ == -1)); ______f.miss_hit[______r]++; ______r; })) asm("decq ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx)))))); else asm("addq %1, ""%%""fs"":" "%P" "0" : "+m" (((((__kmap_atomic_idx))))) : "re" ((pao_T__)((-(1))))); break; default: __bad_percpu_size(); } } while (0);break; case 8: do { *({ unsigned long tcp_ptr__; do { const void *__vpp_verify = (typeof(&(((((__kmap_atomic_idx)))))))((void *)0); (void)__vpp_verify; } while (0); asm volatile("add " "%%""fs"":" "%P" "1" ", %0" : "=r" (tcp_ptr__) : "m" (this_cpu_off), "0" (&(((((__kmap_atomic_idx))))))); (typeof(*(&(((((__kmap_atomic_idx))))))) *)tcp_ptr__; }) += ((-(1))); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
20881}
20882static inline __attribute__((always_inline)) void clear_user_highpage(struct page *page, unsigned long vaddr)
20883{
20884 void *addr = __kmap_atomic(page);
20885 clear_user_page(addr, vaddr, page);
20886 do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((addr)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((addr)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((addr)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 132, }; ______r = !!((__builtin_types_compatible_p(typeof((addr)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(addr); } while (0);
20887}
20888static inline __attribute__((always_inline)) struct page *
20889alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
20890 unsigned long vaddr)
20891{
20892 return alloc_pages_node(numa_node_id(), ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u) | (( gfp_t)0x20000u) | (( gfp_t)0x02u)) | (( gfp_t)0x8000u) | (( gfp_t)0x08u), 0);
20893}
20894static inline __attribute__((always_inline)) void clear_highpage(struct page *page)
20895{
20896 void *kaddr = __kmap_atomic(page);
20897 clear_page(kaddr);
20898 do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 185, }; ______r = !!((__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(kaddr); } while (0);
20899}
20900static inline __attribute__((always_inline)) void zero_user_segments(struct page *page,
20901 unsigned start1, unsigned end1,
20902 unsigned start2, unsigned end2)
20903{
20904 void *kaddr = __kmap_atomic(page);
20905 do { if (__builtin_constant_p((((__builtin_constant_p(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)) ? !!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 194, }; ______r = __builtin_expect(!!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)) ? !!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 194, }; ______r = __builtin_expect(!!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 194, }; ______r = !!(((__builtin_constant_p(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)) ? !!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 194, }; ______r = __builtin_expect(!!(end1 > ((1UL) << 12) || end2 > ((1UL) << 12)), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/highmem.h"), "i" (194), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
20906 if (__builtin_constant_p(((end1 > start1))) ? !!((end1 > start1)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 196, }; ______r = !!((end1 > start1)); ______f.miss_hit[______r]++; ______r; }))
20907 __builtin_memset(kaddr + start1, 0, end1 - start1);
20908 if (__builtin_constant_p(((end2 > start2))) ? !!((end2 > start2)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 199, }; ______r = !!((end2 > start2)); ______f.miss_hit[______r]++; ______r; }))
20909 __builtin_memset(kaddr + start2, 0, end2 - start2);
20910 do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 202, }; ______r = !!((__builtin_types_compatible_p(typeof((kaddr)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(kaddr); } while (0);
20911 do { } while (0);
20912}
20913static inline __attribute__((always_inline)) void zero_user_segment(struct page *page,
20914 unsigned start, unsigned end)
20915{
20916 zero_user_segments(page, start, end, 0, 0);
20917}
20918static inline __attribute__((always_inline)) void zero_user(struct page *page,
20919 unsigned start, unsigned size)
20920{
20921 zero_user_segments(page, start, start + size, 0, 0);
20922}
20923static inline __attribute__((always_inline)) void __attribute__((deprecated)) memclear_highpage_flush(struct page *page,
20924 unsigned int offset, unsigned int size)
20925{
20926 zero_user(page, offset, size);
20927}
20928static inline __attribute__((always_inline)) void copy_user_highpage(struct page *to, struct page *from,
20929 unsigned long vaddr, struct vm_area_struct *vma)
20930{
20931 char *vfrom, *vto;
20932 vfrom = __kmap_atomic(from);
20933 vto = __kmap_atomic(to);
20934 copy_user_page(vto, vfrom, vaddr, to);
20935 do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((vto)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((vto)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((vto)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 234, }; ______r = !!((__builtin_types_compatible_p(typeof((vto)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(vto); } while (0);
20936 do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 235, }; ______r = !!((__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(vfrom); } while (0);
20937}
20938static inline __attribute__((always_inline)) void copy_highpage(struct page *to, struct page *from)
20939{
20940 char *vfrom, *vto;
20941 vfrom = __kmap_atomic(from);
20942 vto = __kmap_atomic(to);
20943 copy_page(vto, vfrom);
20944 do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((vto)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((vto)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((vto)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 247, }; ______r = !!((__builtin_types_compatible_p(typeof((vto)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(vto); } while (0);
20945 do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/highmem.h", .line = 248, }; ______r = !!((__builtin_types_compatible_p(typeof((vfrom)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(vfrom); } while (0);
20946}
20947struct scatterlist {
20948 unsigned long page_link;
20949 unsigned int offset;
20950 unsigned int length;
20951 dma_addr_t dma_address;
20952 unsigned int dma_length;
20953};
20954struct sg_table {
20955 struct scatterlist *sgl;
20956 unsigned int nents;
20957 unsigned int orig_nents;
20958};
20959static inline __attribute__((always_inline)) void sg_assign_page(struct scatterlist *sg, struct page *page)
20960{
20961 unsigned long page_link = sg->page_link & 0x3;
20962 do { if (__builtin_constant_p((((__builtin_constant_p((unsigned long) page & 0x03) ? !!((unsigned long) page & 0x03) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/scatterlist.h", .line = 63, }; ______r = __builtin_expect(!!((unsigned long) page & 0x03), 1); ftrace_likely_update(&______f, ______r, 0); ______r; }))))) ? !!(((__builtin_constant_p((unsigned long) page & 0x03) ? !!((unsigned long) page & 0x03) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/scatterlist.h", .line = 63, }; ______r = __builtin_expect(!!((unsigned long) page & 0x03), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/linux/scatterlist.h", .line = 63, }; ______r = !!(((__builtin_constant_p((unsigned long) page & 0x03) ? !!((unsigned long) page & 0x03) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/linux/scatterlist.h", .line = 63, }; ______r = __builtin_expect(!!((unsigned long) page & 0x03), 1); ftrace_likely_update(&______f, ______r, 0); ______r; })))); ______f.miss_hit[______r]++; ______r; })) do { asm volatile("1:\tud2\n" ".pushsection __bug_table,\"a\"\n" "2:\t.long 1b, %c0\n" "\t.word %c1, 0\n" "\t.org 2b+%c2\n" ".popsection" : : "i" ("include/linux/scatterlist.h"), "i" (63), "i" (sizeof(struct bug_entry))); __builtin_unreachable(); } while (0); } while(0);
20963 sg->page_link = page_link | (unsigned long) page;
20964}
20965static inline __attribute__((always_inline)) void sg_set_page(struct scatterlist *sg, struct page *page,
20966 unsigned int len, unsigned int offset)
20967{
20968 sg_assign_page(sg, page);
20969 sg->offset = offset;
20970 sg->length = len;
20971}
20972static inline __attribute__((always_inline)) struct page *sg_page(struct scatterlist *sg)
20973{
20974 return (struct page *)((sg)->page_link & ~0x3);
20975}
20976static inline __attribute__((always_inline)) void sg_set_buf(struct scatterlist *sg, const void *buf,
20977 unsigned int buflen)
20978{
20979 sg_set_page(sg, (mem_map + (((((unsigned long)(buf)) - ((unsigned long)(0xC0000000UL))) >> 12) - (0UL))), buflen, ((unsigned long)(buf) & ~(~(((1UL) << 12)-1))));
20980}
20981static inline __attribute__((always_inline)) void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
20982 struct scatterlist *sgl)
20983{
20984 prv[prv_nents - 1].offset = 0;
20985 prv[prv_nents - 1].length = 0;
20986 prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02;
20987}
20988static inline __attribute__((always_inline)) void sg_mark_end(struct scatterlist *sg)
20989{
20990 sg->page_link |= 0x02;
20991 sg->page_link &= ~0x01;
20992}
20993static inline __attribute__((always_inline)) dma_addr_t sg_phys(struct scatterlist *sg)
20994{
20995 return ((dma_addr_t)((unsigned long)((sg_page(sg)) - mem_map) + (0UL)) << 12) + sg->offset;
20996}
20997static inline __attribute__((always_inline)) void *sg_virt(struct scatterlist *sg)
20998{
20999 return page_address(sg_page(sg)) + sg->offset;
21000}
21001struct scatterlist *sg_next(struct scatterlist *);
21002struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
21003void sg_init_table(struct scatterlist *, unsigned int);
21004void sg_init_one(struct scatterlist *, const void *, unsigned int);
21005typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
21006typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
21007void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *);
21008void sg_free_table(struct sg_table *);
21009int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
21010 sg_alloc_fn *);
21011int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
21012size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
21013 void *buf, size_t buflen);
21014size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
21015 void *buf, size_t buflen);
21016struct sg_mapping_iter {
21017 struct page *page;
21018 void *addr;
21019 size_t length;
21020 size_t consumed;
21021 struct scatterlist *__sg;
21022 unsigned int __nents;
21023 unsigned int __offset;
21024 unsigned int __flags;
21025};
21026void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
21027 unsigned int nents, unsigned int flags);
21028bool sg_miter_next(struct sg_mapping_iter *miter);
21029void sg_miter_stop(struct sg_mapping_iter *miter);
21030static inline __attribute__((always_inline)) enum km_type crypto_kmap_type(int out)
21031{
21032 enum km_type type;
21033 if (__builtin_constant_p((((((current_thread_info()->preempt_count) & (((1UL << (8))-1) << (0 + 8))))))) ? !!(((((current_thread_info()->preempt_count) & (((1UL << (8))-1) << (0 + 8)))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/scatterwalk.h", .line = 32, }; ______r = !!(((((current_thread_info()->preempt_count) & (((1UL << (8))-1) << (0 + 8)))))); ______f.miss_hit[______r]++; ______r; }))
21034 type = out * (KM_SOFTIRQ1 - KM_SOFTIRQ0) + KM_SOFTIRQ0;
21035 else
21036 type = out * (KM_USER1 - KM_USER0) + KM_USER0;
21037 return type;
21038}
21039static inline __attribute__((always_inline)) void *crypto_kmap(struct page *page, int out)
21040{
21041 return __kmap_atomic(page);
21042}
21043static inline __attribute__((always_inline)) void crypto_kunmap(void *vaddr, int out)
21044{
21045 do { do { ((void)sizeof(char[1 - 2*!!(__builtin_types_compatible_p(typeof((vaddr)), typeof(struct page *)))])); if (__builtin_constant_p(((__builtin_types_compatible_p(typeof((vaddr)), typeof(struct page *))))) ? !!((__builtin_types_compatible_p(typeof((vaddr)), typeof(struct page *)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/scatterwalk.h", .line = 47, }; ______r = !!((__builtin_types_compatible_p(typeof((vaddr)), typeof(struct page *)))); ______f.miss_hit[______r]++; ______r; })) __build_bug_on_failed = 1; } while(0); __kunmap_atomic(vaddr); } while (0);
21046}
21047static inline __attribute__((always_inline)) void crypto_yield(u32 flags)
21048{
21049 if (__builtin_constant_p(((flags & 0x00000200))) ? !!((flags & 0x00000200)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/scatterwalk.h", .line = 52, }; ______r = !!((flags & 0x00000200)); ______f.miss_hit[______r]++; ______r; }))
21050 ({ __might_sleep("include/crypto/scatterwalk.h", 53, 0); _cond_resched(); });
21051}
21052static inline __attribute__((always_inline)) void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
21053 struct scatterlist *sg2)
21054{
21055 sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
21056 sg1[num - 1].page_link &= ~0x02;
21057}
21058static inline __attribute__((always_inline)) struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
21059{
21060 if (__builtin_constant_p(((((sg)->page_link & 0x02)))) ? !!((((sg)->page_link & 0x02))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/scatterwalk.h", .line = 65, }; ______r = !!((((sg)->page_link & 0x02))); ______f.miss_hit[______r]++; ______r; }))
21061 return ((void *)0);
21062 return (++sg)->length ? sg : (void *)sg_page(sg);
21063}
21064static inline __attribute__((always_inline)) void scatterwalk_crypto_chain(struct scatterlist *head,
21065 struct scatterlist *sg,
21066 int chain, int num)
21067{
21068 if (__builtin_constant_p(((chain))) ? !!((chain)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/scatterwalk.h", .line = 75, }; ______r = !!((chain)); ______f.miss_hit[______r]++; ______r; })) {
21069 head->length += sg->length;
21070 sg = scatterwalk_sg_next(sg);
21071 }
21072 if (__builtin_constant_p(((sg))) ? !!((sg)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/scatterwalk.h", .line = 80, }; ______r = !!((sg)); ______f.miss_hit[______r]++; ______r; }))
21073 scatterwalk_sg_chain(head, num, sg);
21074 else
21075 sg_mark_end(head);
21076}
21077static inline __attribute__((always_inline)) unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
21078 struct scatter_walk *walk_out)
21079{
21080 return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << 12) +
21081 (int)(walk_in->offset - walk_out->offset));
21082}
21083static inline __attribute__((always_inline)) unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
21084{
21085 unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
21086 unsigned int len_this_page = ((unsigned long)(~walk->offset) & ~(~(((1UL) << 12)-1))) + 1;
21087 return len_this_page > len ? len : len_this_page;
21088}
21089static inline __attribute__((always_inline)) unsigned int scatterwalk_clamp(struct scatter_walk *walk,
21090 unsigned int nbytes)
21091{
21092 unsigned int len_this_page = scatterwalk_pagelen(walk);
21093 return nbytes > len_this_page ? len_this_page : nbytes;
21094}
21095static inline __attribute__((always_inline)) void scatterwalk_advance(struct scatter_walk *walk,
21096 unsigned int nbytes)
21097{
21098 walk->offset += nbytes;
21099}
21100static inline __attribute__((always_inline)) unsigned int scatterwalk_aligned(struct scatter_walk *walk,
21101 unsigned int alignmask)
21102{
21103 return !(walk->offset & alignmask);
21104}
21105static inline __attribute__((always_inline)) struct page *scatterwalk_page(struct scatter_walk *walk)
21106{
21107 return sg_page(walk->sg) + (walk->offset >> 12);
21108}
21109static inline __attribute__((always_inline)) void scatterwalk_unmap(void *vaddr, int out)
21110{
21111 crypto_kunmap(vaddr, out);
21112}
21113void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
21114void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
21115 size_t nbytes, int out);
21116void *scatterwalk_map(struct scatter_walk *walk, int out);
21117void scatterwalk_done(struct scatter_walk *walk, int out, int more);
21118void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
21119 unsigned int start, unsigned int nbytes, int out);
21120struct aead_givcrypt_request {
21121 u64 seq;
21122 u8 *giv;
21123 struct aead_request areq;
21124};
21125static inline __attribute__((always_inline)) struct crypto_aead *aead_givcrypt_reqtfm(
21126 struct aead_givcrypt_request *req)
21127{
21128 return crypto_aead_reqtfm(&req->areq);
21129}
21130static inline __attribute__((always_inline)) int crypto_aead_givencrypt(struct aead_givcrypt_request *req)
21131{
21132 struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req));
21133 return crt->givencrypt(req);
21134};
21135static inline __attribute__((always_inline)) int crypto_aead_givdecrypt(struct aead_givcrypt_request *req)
21136{
21137 struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req));
21138 return crt->givdecrypt(req);
21139};
21140static inline __attribute__((always_inline)) void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req,
21141 struct crypto_aead *tfm)
21142{
21143 req->areq.base.tfm = crypto_aead_tfm(tfm);
21144}
21145static inline __attribute__((always_inline)) struct aead_givcrypt_request *aead_givcrypt_alloc(
21146 struct crypto_aead *tfm, gfp_t gfp)
21147{
21148 struct aead_givcrypt_request *req;
21149 req = kmalloc(sizeof(struct aead_givcrypt_request) +
21150 crypto_aead_reqsize(tfm), gfp);
21151 if (__builtin_constant_p((((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/aead.h", .line = 65, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; }))))) ? !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/aead.h", .line = 65, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "include/crypto/aead.h", .line = 65, }; ______r = !!(((__builtin_constant_p(req) ? !!(req) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_annotated_branch"))) ______f = { .func = __func__, .file = "include/crypto/aead.h", .line = 65, }; ______r = __builtin_expect(!!(req), 1); ftrace_likely_update(&______f, ______r, 1); ______r; })))); ______f.miss_hit[______r]++; ______r; }))
21152 aead_givcrypt_set_tfm(req, tfm);
21153 return req;
21154}
21155static inline __attribute__((always_inline)) void aead_givcrypt_free(struct aead_givcrypt_request *req)
21156{
21157 kfree(req);
21158}
21159static inline __attribute__((always_inline)) void aead_givcrypt_set_callback(
21160 struct aead_givcrypt_request *req, u32 flags,
21161 crypto_completion_t complete, void *data)
21162{
21163 aead_request_set_callback(&req->areq, flags, complete, data);
21164}
21165static inline __attribute__((always_inline)) void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req,
21166 struct scatterlist *src,
21167 struct scatterlist *dst,
21168 unsigned int nbytes, void *iv)
21169{
21170 aead_request_set_crypt(&req->areq, src, dst, nbytes, iv);
21171}
21172static inline __attribute__((always_inline)) void aead_givcrypt_set_assoc(struct aead_givcrypt_request *req,
21173 struct scatterlist *assoc,
21174 unsigned int assoclen)
21175{
21176 aead_request_set_assoc(&req->areq, assoc, assoclen);
21177}
21178static inline __attribute__((always_inline)) void aead_givcrypt_set_giv(struct aead_givcrypt_request *req,
21179 u8 *giv, u64 seq)
21180{
21181 req->giv = giv;
21182 req->seq = seq;
21183}
21184struct rtattr;
21185struct crypto_aead_spawn {
21186 struct crypto_spawn base;
21187};
21188extern const struct crypto_type crypto_nivaead_type;
21189static inline __attribute__((always_inline)) void crypto_set_aead_spawn(
21190 struct crypto_aead_spawn *spawn, struct crypto_instance *inst)
21191{
21192 crypto_set_spawn(&spawn->base, inst);
21193}
21194int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
21195 u32 type, u32 mask);
21196static inline __attribute__((always_inline)) void crypto_drop_aead(struct crypto_aead_spawn *spawn)
21197{
21198 crypto_drop_spawn(&spawn->base);
21199}
21200static inline __attribute__((always_inline)) struct crypto_alg *crypto_aead_spawn_alg(
21201 struct crypto_aead_spawn *spawn)
21202{
21203 return spawn->base.alg;
21204}
21205static inline __attribute__((always_inline)) struct crypto_aead *crypto_spawn_aead(
21206 struct crypto_aead_spawn *spawn)
21207{
21208 return __crypto_aead_cast(
21209 crypto_spawn_tfm(&spawn->base, 0x00000003,
21210 0x0000000f));
21211}
21212struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl,
21213 struct rtattr **tb, u32 type,
21214 u32 mask);
21215void aead_geniv_free(struct crypto_instance *inst);
21216int aead_geniv_init(struct crypto_tfm *tfm);
21217void aead_geniv_exit(struct crypto_tfm *tfm);
21218static inline __attribute__((always_inline)) struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv)
21219{
21220 return crypto_aead_crt(geniv)->base;
21221}
21222static inline __attribute__((always_inline)) void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req)
21223{
21224 return aead_request_ctx(&req->areq);
21225}
21226static inline __attribute__((always_inline)) void aead_givcrypt_complete(struct aead_givcrypt_request *req,
21227 int err)
21228{
21229 aead_request_complete(&req->areq, err);
21230}
21231struct async_aes_ctx {
21232 struct cryptd_ablkcipher *cryptd_tfm;
21233};
21234struct aesni_rfc4106_gcm_ctx {
21235 u8 hash_subkey[16];
21236 struct crypto_aes_ctx aes_key_expanded;
21237 u8 nonce[4];
21238 struct cryptd_aead *cryptd_tfm;
21239};
21240struct aesni_gcm_set_hash_subkey_result {
21241 int err;
21242 struct completion completion;
21243};
21244struct aesni_hash_subkey_req_data {
21245 u8 iv[16];
21246 struct aesni_gcm_set_hash_subkey_result result;
21247 struct scatterlist sg;
21248};
21249 __attribute__((regparm(0))) int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
21250 unsigned int key_len);
21251 __attribute__((regparm(0))) void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
21252 const u8 *in);
21253 __attribute__((regparm(0))) void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
21254 const u8 *in);
21255 __attribute__((regparm(0))) void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
21256 const u8 *in, unsigned int len);
21257 __attribute__((regparm(0))) void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
21258 const u8 *in, unsigned int len);
21259 __attribute__((regparm(0))) void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
21260 const u8 *in, unsigned int len, u8 *iv);
21261 __attribute__((regparm(0))) void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
21262 const u8 *in, unsigned int len, u8 *iv);
21263int crypto_fpu_init(void);
21264void crypto_fpu_exit(void);
21265static inline __attribute__((always_inline)) struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
21266{
21267 unsigned long addr = (unsigned long)raw_ctx;
21268 unsigned long align = (16);
21269 if (__builtin_constant_p(((align <= crypto_tfm_ctx_alignment()))) ? !!((align <= crypto_tfm_ctx_alignment())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 162, }; ______r = !!((align <= crypto_tfm_ctx_alignment())); ______f.miss_hit[______r]++; ______r; }))
21270 align = 1;
21271 return (struct crypto_aes_ctx *)((((addr)) + ((typeof((addr)))((align)) - 1)) & ~((typeof((addr)))((align)) - 1));
21272}
21273static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
21274 const u8 *in_key, unsigned int key_len)
21275{
21276 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
21277 u32 *flags = &tfm->crt_flags;
21278 int err;
21279 if (__builtin_constant_p(((key_len != 16 && key_len != 24 && key_len != 32))) ? !!((key_len != 16 && key_len != 24 && key_len != 32)) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file =
21280 "arch/x86/crypto/aesni-intel_glue.c"
21281 , .line =
21282 175
21283 , }; ______r = !!((key_len != 16 && key_len != 24 && key_len != 32)); ______f.miss_hit[______r]++; ______r; }))
21284 {
21285 *flags |= 0x00200000;
21286 return -22;
21287 }
21288 if (__builtin_constant_p(((!irq_fpu_usable()))) ? !!((!irq_fpu_usable())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 180, }; ______r = !!((!irq_fpu_usable())); ______f.miss_hit[______r]++; ______r; }))
21289 err = crypto_aes_expand_key(ctx, in_key, key_len);
21290 else {
21291 kernel_fpu_begin();
21292 err = aesni_set_key(ctx, in_key, key_len);
21293 kernel_fpu_end();
21294 }
21295 return err;
21296}
21297static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
21298 unsigned int key_len)
21299{
21300 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
21301}
21302static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
21303{
21304 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
21305 if (__builtin_constant_p(((!irq_fpu_usable()))) ? !!((!irq_fpu_usable())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 201, }; ______r = !!((!irq_fpu_usable())); ______f.miss_hit[______r]++; ______r; }))
21306 crypto_aes_encrypt_x86(ctx, dst, src);
21307 else {
21308 kernel_fpu_begin();
21309 aesni_enc(ctx, dst, src);
21310 kernel_fpu_end();
21311 }
21312}
21313static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
21314{
21315 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
21316 if (__builtin_constant_p(((!irq_fpu_usable()))) ? !!((!irq_fpu_usable())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 214, }; ______r = !!((!irq_fpu_usable())); ______f.miss_hit[______r]++; ______r; }))
21317 crypto_aes_decrypt_x86(ctx, dst, src);
21318 else {
21319 kernel_fpu_begin();
21320 aesni_dec(ctx, dst, src);
21321 kernel_fpu_end();
21322 }
21323}
21324static struct crypto_alg aesni_alg = {
21325 .cra_name = "aes",
21326 .cra_driver_name = "aes-aesni",
21327 .cra_priority = 300,
21328 .cra_flags = 0x00000001,
21329 .cra_blocksize = 16,
21330 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+(16)-1,
21331 .cra_alignmask = 0,
21332 .cra_module = (&__this_module),
21333 .cra_list = { &(aesni_alg.cra_list), &(aesni_alg.cra_list) },
21334 .cra_u = {
21335 .cipher = {
21336 .cia_min_keysize = 16,
21337 .cia_max_keysize = 32,
21338 .cia_setkey = aes_set_key,
21339 .cia_encrypt = aes_encrypt,
21340 .cia_decrypt = aes_decrypt
21341 }
21342 }
21343};
21344static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
21345{
21346 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
21347 aesni_enc(ctx, dst, src);
21348}
21349static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
21350{
21351 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
21352 aesni_dec(ctx, dst, src);
21353}
21354static struct crypto_alg __aesni_alg = {
21355 .cra_name = "__aes-aesni",
21356 .cra_driver_name = "__driver-aes-aesni",
21357 .cra_priority = 0,
21358 .cra_flags = 0x00000001,
21359 .cra_blocksize = 16,
21360 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+(16)-1,
21361 .cra_alignmask = 0,
21362 .cra_module = (&__this_module),
21363 .cra_list = { &(__aesni_alg.cra_list), &(__aesni_alg.cra_list) },
21364 .cra_u = {
21365 .cipher = {
21366 .cia_min_keysize = 16,
21367 .cia_max_keysize = 32,
21368 .cia_setkey = aes_set_key,
21369 .cia_encrypt = __aes_encrypt,
21370 .cia_decrypt = __aes_decrypt
21371 }
21372 }
21373};
21374static int ecb_encrypt(struct blkcipher_desc *desc,
21375 struct scatterlist *dst, struct scatterlist *src,
21376 unsigned int nbytes)
21377{
21378 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
21379 struct blkcipher_walk walk;
21380 int err;
21381 blkcipher_walk_init(&walk, dst, src, nbytes);
21382 err = blkcipher_walk_virt(desc, &walk);
21383 desc->flags &= ~0x00000200;
21384 kernel_fpu_begin();
21385 while ((nbytes = walk.nbytes)) {
21386 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
21387 nbytes & (~(16 -1)));
21388 nbytes &= 16 - 1;
21389 err = blkcipher_walk_done(desc, &walk, nbytes);
21390 }
21391 kernel_fpu_end();
21392 return err;
21393}
21394static int ecb_decrypt(struct blkcipher_desc *desc,
21395 struct scatterlist *dst, struct scatterlist *src,
21396 unsigned int nbytes)
21397{
21398 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
21399 struct blkcipher_walk walk;
21400 int err;
21401 blkcipher_walk_init(&walk, dst, src, nbytes);
21402 err = blkcipher_walk_virt(desc, &walk);
21403 desc->flags &= ~0x00000200;
21404 kernel_fpu_begin();
21405 while ((nbytes = walk.nbytes)) {
21406 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
21407 nbytes & (~(16 -1)));
21408 nbytes &= 16 - 1;
21409 err = blkcipher_walk_done(desc, &walk, nbytes);
21410 }
21411 kernel_fpu_end();
21412 return err;
21413}
21414static struct crypto_alg blk_ecb_alg = {
21415 .cra_name = "__ecb-aes-aesni",
21416 .cra_driver_name = "__driver-ecb-aes-aesni",
21417 .cra_priority = 0,
21418 .cra_flags = 0x00000004,
21419 .cra_blocksize = 16,
21420 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+(16)-1,
21421 .cra_alignmask = 0,
21422 .cra_type = &crypto_blkcipher_type,
21423 .cra_module = (&__this_module),
21424 .cra_list = { &(blk_ecb_alg.cra_list), &(blk_ecb_alg.cra_list) },
21425 .cra_u = {
21426 .blkcipher = {
21427 .min_keysize = 16,
21428 .max_keysize = 32,
21429 .setkey = aes_set_key,
21430 .encrypt = ecb_encrypt,
21431 .decrypt = ecb_decrypt,
21432 },
21433 },
21434};
21435static int cbc_encrypt(struct blkcipher_desc *desc,
21436 struct scatterlist *dst, struct scatterlist *src,
21437 unsigned int nbytes)
21438{
21439 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
21440 struct blkcipher_walk walk;
21441 int err;
21442 blkcipher_walk_init(&walk, dst, src, nbytes);
21443 err = blkcipher_walk_virt(desc, &walk);
21444 desc->flags &= ~0x00000200;
21445 kernel_fpu_begin();
21446 while ((nbytes = walk.nbytes)) {
21447 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
21448 nbytes & (~(16 -1)), walk.iv);
21449 nbytes &= 16 - 1;
21450 err = blkcipher_walk_done(desc, &walk, nbytes);
21451 }
21452 kernel_fpu_end();
21453 return err;
21454}
21455static int cbc_decrypt(struct blkcipher_desc *desc,
21456 struct scatterlist *dst, struct scatterlist *src,
21457 unsigned int nbytes)
21458{
21459 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
21460 struct blkcipher_walk walk;
21461 int err;
21462 blkcipher_walk_init(&walk, dst, src, nbytes);
21463 err = blkcipher_walk_virt(desc, &walk);
21464 desc->flags &= ~0x00000200;
21465 kernel_fpu_begin();
21466 while ((nbytes = walk.nbytes)) {
21467 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
21468 nbytes & (~(16 -1)), walk.iv);
21469 nbytes &= 16 - 1;
21470 err = blkcipher_walk_done(desc, &walk, nbytes);
21471 }
21472 kernel_fpu_end();
21473 return err;
21474}
21475static struct crypto_alg blk_cbc_alg = {
21476 .cra_name = "__cbc-aes-aesni",
21477 .cra_driver_name = "__driver-cbc-aes-aesni",
21478 .cra_priority = 0,
21479 .cra_flags = 0x00000004,
21480 .cra_blocksize = 16,
21481 .cra_ctxsize = sizeof(struct crypto_aes_ctx)+(16)-1,
21482 .cra_alignmask = 0,
21483 .cra_type = &crypto_blkcipher_type,
21484 .cra_module = (&__this_module),
21485 .cra_list = { &(blk_cbc_alg.cra_list), &(blk_cbc_alg.cra_list) },
21486 .cra_u = {
21487 .blkcipher = {
21488 .min_keysize = 16,
21489 .max_keysize = 32,
21490 .setkey = aes_set_key,
21491 .encrypt = cbc_encrypt,
21492 .decrypt = cbc_decrypt,
21493 },
21494 },
21495};
21496static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
21497 unsigned int key_len)
21498{
21499 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
21500 struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
21501 int err;
21502 crypto_ablkcipher_clear_flags(child, 0x000fff00);
21503 crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
21504 & 0x000fff00);
21505 err = crypto_ablkcipher_setkey(child, key, key_len);
21506 crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
21507 & 0xfff00000);
21508 return err;
21509}
21510static int ablk_encrypt(struct ablkcipher_request *req)
21511{
21512 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
21513 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
21514 if (__builtin_constant_p(((!irq_fpu_usable()))) ? !!((!irq_fpu_usable())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 508, }; ______r = !!((!irq_fpu_usable())); ______f.miss_hit[______r]++; ______r; })) {
21515 struct ablkcipher_request *cryptd_req =
21516 ablkcipher_request_ctx(req);
21517 __builtin_memcpy(cryptd_req, req, sizeof(*req));
21518 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
21519 return crypto_ablkcipher_encrypt(cryptd_req);
21520 } else {
21521 struct blkcipher_desc desc;
21522 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
21523 desc.info = req->info;
21524 desc.flags = 0;
21525 return crypto_blkcipher_crt(desc.tfm)->encrypt(
21526 &desc, req->dst, req->src, req->nbytes);
21527 }
21528}
21529static int ablk_decrypt(struct ablkcipher_request *req)
21530{
21531 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
21532 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
21533 if (__builtin_constant_p(((!irq_fpu_usable()))) ? !!((!irq_fpu_usable())) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 529, }; ______r = !!((!irq_fpu_usable())); ______f.miss_hit[______r]++; ______r; })) {
21534 struct ablkcipher_request *cryptd_req =
21535 ablkcipher_request_ctx(req);
21536 __builtin_memcpy(cryptd_req, req, sizeof(*req));
21537 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
21538 return crypto_ablkcipher_decrypt(cryptd_req);
21539 } else {
21540 struct blkcipher_desc desc;
21541 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
21542 desc.info = req->info;
21543 desc.flags = 0;
21544 return crypto_blkcipher_crt(desc.tfm)->decrypt(
21545 &desc, req->dst, req->src, req->nbytes);
21546 }
21547}
21548static void ablk_exit(struct crypto_tfm *tfm)
21549{
21550 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
21551 cryptd_free_ablkcipher(ctx->cryptd_tfm);
21552}
21553static void ablk_init_common(struct crypto_tfm *tfm,
21554 struct cryptd_ablkcipher *cryptd_tfm)
21555{
21556 struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
21557 ctx->cryptd_tfm = cryptd_tfm;
21558 tfm->crt_u.ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
21559 crypto_ablkcipher_reqsize(&cryptd_tfm->base);
21560}
21561static int ablk_ecb_init(struct crypto_tfm *tfm)
21562{
21563 struct cryptd_ablkcipher *cryptd_tfm;
21564 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
21565 if (__builtin_constant_p(((IS_ERR(cryptd_tfm)))) ? !!((IS_ERR(cryptd_tfm))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 567, }; ______r = !!((IS_ERR(cryptd_tfm))); ______f.miss_hit[______r]++; ______r; }))
21566 return PTR_ERR(cryptd_tfm);
21567 ablk_init_common(tfm, cryptd_tfm);
21568 return 0;
21569}
21570static struct crypto_alg ablk_ecb_alg = {
21571 .cra_name = "ecb(aes)",
21572 .cra_driver_name = "ecb-aes-aesni",
21573 .cra_priority = 400,
21574 .cra_flags = 0x00000005|0x00000080,
21575 .cra_blocksize = 16,
21576 .cra_ctxsize = sizeof(struct async_aes_ctx),
21577 .cra_alignmask = 0,
21578 .cra_type = &crypto_ablkcipher_type,
21579 .cra_module = (&__this_module),
21580 .cra_list = { &(ablk_ecb_alg.cra_list), &(ablk_ecb_alg.cra_list) },
21581 .cra_init = ablk_ecb_init,
21582 .cra_exit = ablk_exit,
21583 .cra_u = {
21584 .ablkcipher = {
21585 .min_keysize = 16,
21586 .max_keysize = 32,
21587 .setkey = ablk_set_key,
21588 .encrypt = ablk_encrypt,
21589 .decrypt = ablk_decrypt,
21590 },
21591 },
21592};
21593static int ablk_cbc_init(struct crypto_tfm *tfm)
21594{
21595 struct cryptd_ablkcipher *cryptd_tfm;
21596 cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
21597 if (__builtin_constant_p(((IS_ERR(cryptd_tfm)))) ? !!((IS_ERR(cryptd_tfm))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 602, }; ______r = !!((IS_ERR(cryptd_tfm))); ______f.miss_hit[______r]++; ______r; }))
21598 return PTR_ERR(cryptd_tfm);
21599 ablk_init_common(tfm, cryptd_tfm);
21600 return 0;
21601}
21602static struct crypto_alg ablk_cbc_alg = {
21603 .cra_name = "cbc(aes)",
21604 .cra_driver_name = "cbc-aes-aesni",
21605 .cra_priority = 400,
21606 .cra_flags = 0x00000005|0x00000080,
21607 .cra_blocksize = 16,
21608 .cra_ctxsize = sizeof(struct async_aes_ctx),
21609 .cra_alignmask = 0,
21610 .cra_type = &crypto_ablkcipher_type,
21611 .cra_module = (&__this_module),
21612 .cra_list = { &(ablk_cbc_alg.cra_list), &(ablk_cbc_alg.cra_list) },
21613 .cra_init = ablk_cbc_init,
21614 .cra_exit = ablk_exit,
21615 .cra_u = {
21616 .ablkcipher = {
21617 .min_keysize = 16,
21618 .max_keysize = 32,
21619 .ivsize = 16,
21620 .setkey = ablk_set_key,
21621 .encrypt = ablk_encrypt,
21622 .decrypt = ablk_decrypt,
21623 },
21624 },
21625};
21626static int ablk_pcbc_init(struct crypto_tfm *tfm)
21627{
21628 struct cryptd_ablkcipher *cryptd_tfm;
21629 cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
21630 0, 0);
21631 if (__builtin_constant_p(((IS_ERR(cryptd_tfm)))) ? !!((IS_ERR(cryptd_tfm))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 758, }; ______r = !!((IS_ERR(cryptd_tfm))); ______f.miss_hit[______r]++; ______r; }))
21632 return PTR_ERR(cryptd_tfm);
21633 ablk_init_common(tfm, cryptd_tfm);
21634 return 0;
21635}
21636static struct crypto_alg ablk_pcbc_alg = {
21637 .cra_name = "pcbc(aes)",
21638 .cra_driver_name = "pcbc-aes-aesni",
21639 .cra_priority = 400,
21640 .cra_flags = 0x00000005|0x00000080,
21641 .cra_blocksize = 16,
21642 .cra_ctxsize = sizeof(struct async_aes_ctx),
21643 .cra_alignmask = 0,
21644 .cra_type = &crypto_ablkcipher_type,
21645 .cra_module = (&__this_module),
21646 .cra_list = { &(ablk_pcbc_alg.cra_list), &(ablk_pcbc_alg.cra_list) },
21647 .cra_init = ablk_pcbc_init,
21648 .cra_exit = ablk_exit,
21649 .cra_u = {
21650 .ablkcipher = {
21651 .min_keysize = 16,
21652 .max_keysize = 32,
21653 .ivsize = 16,
21654 .setkey = ablk_set_key,
21655 .encrypt = ablk_encrypt,
21656 .decrypt = ablk_decrypt,
21657 },
21658 },
21659};
21660static int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) aesni_init(void)
21661{
21662 int err;
21663 if (__builtin_constant_p(((!(__builtin_constant_p((4*32+25)) && ( ((((4*32+25))>>5)==0 && (1UL<<(((4*32+25))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+25))>>5)==1 && (1UL<<(((4*32+25))&31) & (0|0))) || ((((4*32+25))>>5)==2 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==3 && (1UL<<(((4*32+25))&31) & (0))) || ((((4*32+25))>>5)==4 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==5 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==6 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==7 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==8 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==9 && (1UL<<(((4*32+25))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+25))) ? constant_test_bit(((4*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((4*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability)))))))) ? !!((!(__builtin_constant_p((4*32+25)) && ( ((((4*32+25))>>5)==0 && (1UL<<(((4*32+25))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+25))>>5)==1 && (1UL<<(((4*32+25))&31) & (0|0))) || ((((4*32+25))>>5)==2 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==3 && (1UL<<(((4*32+25))&31) & (0))) || ((((4*32+25))>>5)==4 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==5 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==6 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==7 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==8 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==9 && (1UL<<(((4*32+25))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+25))) ? constant_test_bit(((4*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((4*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1259, }; ______r = !!((!(__builtin_constant_p((4*32+25)) && ( ((((4*32+25))>>5)==0 && (1UL<<(((4*32+25))&31) & ((1<<((0*32+ 0) & 31))|0|0|(1<<((0*32+ 6) & 31))| (1<<((0*32+ 8) & 31))|0|0|(1<<((0*32+15) & 31))| 0|0))) || ((((4*32+25))>>5)==1 && (1UL<<(((4*32+25))&31) & (0|0))) || ((((4*32+25))>>5)==2 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==3 && (1UL<<(((4*32+25))&31) & (0))) || ((((4*32+25))>>5)==4 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==5 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==6 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==7 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==8 && (1UL<<(((4*32+25))&31) & 0)) || ((((4*32+25))>>5)==9 && (1UL<<(((4*32+25))&31) & 0)) ) ? 1 : (__builtin_constant_p(((4*32+25))) ? constant_test_bit(((4*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))) : variable_test_bit(((4*32+25)), ((unsigned long *)((&boot_cpu_data)->x86_capability))))))); ______f.miss_hit[______r]++; ______r; })) {
21664 printk("<6>" "Intel AES-NI instructions are not detected.\n");
21665 return -19;
21666 }
21667 if (__builtin_constant_p((((err = crypto_fpu_init())))) ? !!(((err = crypto_fpu_init()))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1264, }; ______r = !!(((err = crypto_fpu_init()))); ______f.miss_hit[______r]++; ______r; }))
21668 goto fpu_err;
21669 if (__builtin_constant_p((((err = crypto_register_alg(&aesni_alg))))) ? !!(((err = crypto_register_alg(&aesni_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1266, }; ______r = !!(((err = crypto_register_alg(&aesni_alg)))); ______f.miss_hit[______r]++; ______r; }))
21670 goto aes_err;
21671 if (__builtin_constant_p((((err = crypto_register_alg(&__aesni_alg))))) ? !!(((err = crypto_register_alg(&__aesni_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1268, }; ______r = !!(((err = crypto_register_alg(&__aesni_alg)))); ______f.miss_hit[______r]++; ______r; }))
21672 goto __aes_err;
21673 if (__builtin_constant_p((((err = crypto_register_alg(&blk_ecb_alg))))) ? !!(((err = crypto_register_alg(&blk_ecb_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1270, }; ______r = !!(((err = crypto_register_alg(&blk_ecb_alg)))); ______f.miss_hit[______r]++; ______r; }))
21674 goto blk_ecb_err;
21675 if (__builtin_constant_p((((err = crypto_register_alg(&blk_cbc_alg))))) ? !!(((err = crypto_register_alg(&blk_cbc_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1272, }; ______r = !!(((err = crypto_register_alg(&blk_cbc_alg)))); ______f.miss_hit[______r]++; ______r; }))
21676 goto blk_cbc_err;
21677 if (__builtin_constant_p((((err = crypto_register_alg(&ablk_ecb_alg))))) ? !!(((err = crypto_register_alg(&ablk_ecb_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1274, }; ______r = !!(((err = crypto_register_alg(&ablk_ecb_alg)))); ______f.miss_hit[______r]++; ______r; }))
21678 goto ablk_ecb_err;
21679 if (__builtin_constant_p((((err = crypto_register_alg(&ablk_cbc_alg))))) ? !!(((err = crypto_register_alg(&ablk_cbc_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1276, }; ______r = !!(((err = crypto_register_alg(&ablk_cbc_alg)))); ______f.miss_hit[______r]++; ______r; }))
21680 goto ablk_cbc_err;
21681 if (__builtin_constant_p((((err = crypto_register_alg(&ablk_pcbc_alg))))) ? !!(((err = crypto_register_alg(&ablk_pcbc_alg)))) : ({ int ______r; static struct ftrace_branch_data __attribute__((__aligned__(4))) __attribute__((section("_ftrace_branch"))) ______f = { .func = __func__, .file = "arch/x86/crypto/aesni-intel_glue.c", .line = 1297, }; ______r = !!(((err = crypto_register_alg(&ablk_pcbc_alg)))); ______f.miss_hit[______r]++; ______r; }))
21682 goto ablk_pcbc_err;
21683 return err;
21684 crypto_unregister_alg(&ablk_pcbc_alg);
21685ablk_pcbc_err:
21686 crypto_unregister_alg(&ablk_cbc_alg);
21687ablk_cbc_err:
21688 crypto_unregister_alg(&ablk_ecb_alg);
21689ablk_ecb_err:
21690 crypto_unregister_alg(&blk_cbc_alg);
21691blk_cbc_err:
21692 crypto_unregister_alg(&blk_ecb_alg);
21693blk_ecb_err:
21694 crypto_unregister_alg(&__aesni_alg);
21695__aes_err:
21696 crypto_unregister_alg(&aesni_alg);
21697aes_err:
21698fpu_err:
21699 return err;
21700}
21701static void __attribute__ ((__section__(".exit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) aesni_exit(void)
21702{
21703 crypto_unregister_alg(&ablk_pcbc_alg);
21704 crypto_unregister_alg(&ablk_cbc_alg);
21705 crypto_unregister_alg(&ablk_ecb_alg);
21706 crypto_unregister_alg(&blk_cbc_alg);
21707 crypto_unregister_alg(&blk_ecb_alg);
21708 crypto_unregister_alg(&__aesni_alg);
21709 crypto_unregister_alg(&aesni_alg);
21710 crypto_fpu_exit();
21711}
21712static inline __attribute__((always_inline)) initcall_t __inittest(void) { return aesni_init; } int init_module(void) __attribute__((alias("aesni_init")));;
21713static inline __attribute__((always_inline)) exitcall_t __exittest(void) { return aesni_exit; } void cleanup_module(void) __attribute__((alias("aesni_exit")));;
21714static const char __mod_description1380[] __attribute__((__used__)) __attribute__((section(".modinfo"), unused, aligned(1))) = "description" "=" "Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized";
21715static const char __mod_license1381[] __attribute__((__used__)) __attribute__((section(".modinfo"), unused, aligned(1))) = "license" "=" "GPL";
21716static const char __mod_alias1382[] __attribute__((__used__)) __attribute__((section(".modinfo"), unused, aligned(1))) = "alias" "=" "aes";