]> git.wh0rd.org - ICEs.git/blob - bfin-4845/trace.i.0
more
[ICEs.git] / bfin-4845 / trace.i.0
1 # 1 "kernel/trace/trace.c"
2 # 1 "/usr/local/src/blackfin/git/linux-kernel//"
3 # 1 "<built-in>"
4 # 1 "<command-line>"
5 # 1 "./include/linux/autoconf.h" 1
6 # 1 "<command-line>" 2
7 # 1 "kernel/trace/trace.c"
8 # 14 "kernel/trace/trace.c"
9 # 1 "include/linux/utsrelease.h" 1
10 # 15 "kernel/trace/trace.c" 2
11 # 1 "include/linux/kallsyms.h" 1
12
13
14
15
16
17
18
19 # 1 "include/linux/errno.h" 1
20
21
22
23 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/errno.h" 1
24
25
26
27 # 1 "include/asm-generic/errno.h" 1
28
29
30
31 # 1 "include/asm-generic/errno-base.h" 1
32 # 5 "include/asm-generic/errno.h" 2
33 # 5 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/errno.h" 2
34 # 5 "include/linux/errno.h" 2
35 # 9 "include/linux/kallsyms.h" 2
36 # 1 "include/linux/kernel.h" 1
37 # 10 "include/linux/kernel.h"
38 # 1 "/usr/local/src/blackfin/toolchains/20090128/bfin-uclinux/lib/gcc/bfin-uclinux/4.3.3/include/stdarg.h" 1 3 4
39 # 43 "/usr/local/src/blackfin/toolchains/20090128/bfin-uclinux/lib/gcc/bfin-uclinux/4.3.3/include/stdarg.h" 3 4
40 typedef __builtin_va_list __gnuc_va_list;
41 # 105 "/usr/local/src/blackfin/toolchains/20090128/bfin-uclinux/lib/gcc/bfin-uclinux/4.3.3/include/stdarg.h" 3 4
42 typedef __gnuc_va_list va_list;
43 # 11 "include/linux/kernel.h" 2
44 # 1 "include/linux/linkage.h" 1
45
46
47
48 # 1 "include/linux/compiler.h" 1
49 # 40 "include/linux/compiler.h"
50 # 1 "include/linux/compiler-gcc4.h" 1
51
52
53
54
55
56 # 1 "include/linux/compiler-gcc.h" 1
57 # 7 "include/linux/compiler-gcc4.h" 2
58 # 41 "include/linux/compiler.h" 2
59 # 5 "include/linux/linkage.h" 2
60 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/linkage.h" 1
61 # 6 "include/linux/linkage.h" 2
62 # 12 "include/linux/kernel.h" 2
63 # 1 "include/linux/stddef.h" 1
64 # 15 "include/linux/stddef.h"
65 enum {
66 false = 0,
67 true = 1
68 };
69 # 13 "include/linux/kernel.h" 2
70 # 1 "include/linux/types.h" 1
71 # 11 "include/linux/types.h"
72 # 1 "include/linux/posix_types.h" 1
73 # 36 "include/linux/posix_types.h"
74 typedef struct {
75 unsigned long fds_bits [(1024/(8 * sizeof(unsigned long)))];
76 } __kernel_fd_set;
77
78
79 typedef void (*__kernel_sighandler_t)(int);
80
81
82 typedef int __kernel_key_t;
83 typedef int __kernel_mqd_t;
84
85 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/posix_types.h" 1
86 # 10 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/posix_types.h"
87 typedef unsigned long __kernel_ino_t;
88 typedef unsigned short __kernel_mode_t;
89 typedef unsigned short __kernel_nlink_t;
90 typedef long __kernel_off_t;
91 typedef int __kernel_pid_t;
92 typedef unsigned int __kernel_ipc_pid_t;
93 typedef unsigned int __kernel_uid_t;
94 typedef unsigned int __kernel_gid_t;
95 typedef unsigned long __kernel_size_t;
96 typedef long __kernel_ssize_t;
97 typedef int __kernel_ptrdiff_t;
98 typedef long __kernel_time_t;
99 typedef long __kernel_suseconds_t;
100 typedef long __kernel_clock_t;
101 typedef int __kernel_timer_t;
102 typedef int __kernel_clockid_t;
103 typedef int __kernel_daddr_t;
104 typedef char *__kernel_caddr_t;
105 typedef unsigned short __kernel_uid16_t;
106 typedef unsigned short __kernel_gid16_t;
107 typedef unsigned int __kernel_uid32_t;
108 typedef unsigned int __kernel_gid32_t;
109
110 typedef unsigned short __kernel_old_uid_t;
111 typedef unsigned short __kernel_old_gid_t;
112 typedef unsigned short __kernel_old_dev_t;
113
114
115 typedef long long __kernel_loff_t;
116
117
118 typedef struct {
119 int val[2];
120 } __kernel_fsid_t;
121 # 48 "include/linux/posix_types.h" 2
122 # 12 "include/linux/types.h" 2
123 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" 1
124 # 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h"
125 # 1 "include/asm-generic/int-ll64.h" 1
126 # 17 "include/asm-generic/int-ll64.h"
127 typedef __signed__ char __s8;
128 typedef unsigned char __u8;
129
130 typedef __signed__ short __s16;
131 typedef unsigned short __u16;
132
133 typedef __signed__ int __s32;
134 typedef unsigned int __u32;
135
136
137 __extension__ typedef __signed__ long long __s64;
138 __extension__ typedef unsigned long long __u64;
139 # 40 "include/asm-generic/int-ll64.h"
140 typedef signed char s8;
141 typedef unsigned char u8;
142
143 typedef signed short s16;
144 typedef unsigned short u16;
145
146 typedef signed int s32;
147 typedef unsigned int u32;
148
149 typedef signed long long s64;
150 typedef unsigned long long u64;
151 # 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" 2
152
153
154
155 typedef unsigned short umode_t;
156 # 29 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h"
157 typedef u32 dma_addr_t;
158 typedef u64 dma64_addr_t;
159 # 13 "include/linux/types.h" 2
160
161
162
163 typedef __u32 __kernel_dev_t;
164
165 typedef __kernel_fd_set fd_set;
166 typedef __kernel_dev_t dev_t;
167 typedef __kernel_ino_t ino_t;
168 typedef __kernel_mode_t mode_t;
169 typedef __kernel_nlink_t nlink_t;
170 typedef __kernel_off_t off_t;
171 typedef __kernel_pid_t pid_t;
172 typedef __kernel_daddr_t daddr_t;
173 typedef __kernel_key_t key_t;
174 typedef __kernel_suseconds_t suseconds_t;
175 typedef __kernel_timer_t timer_t;
176 typedef __kernel_clockid_t clockid_t;
177 typedef __kernel_mqd_t mqd_t;
178
179
180 typedef _Bool bool;
181
182 typedef __kernel_uid32_t uid_t;
183 typedef __kernel_gid32_t gid_t;
184 typedef __kernel_uid16_t uid16_t;
185 typedef __kernel_gid16_t gid16_t;
186
187 typedef unsigned long uintptr_t;
188
189
190
191 typedef __kernel_old_uid_t old_uid_t;
192 typedef __kernel_old_gid_t old_gid_t;
193 # 57 "include/linux/types.h"
194 typedef __kernel_loff_t loff_t;
195 # 66 "include/linux/types.h"
196 typedef __kernel_size_t size_t;
197
198
199
200
201 typedef __kernel_ssize_t ssize_t;
202
203
204
205
206 typedef __kernel_ptrdiff_t ptrdiff_t;
207
208
209
210
211 typedef __kernel_time_t time_t;
212
213
214
215
216 typedef __kernel_clock_t clock_t;
217
218
219
220
221 typedef __kernel_caddr_t caddr_t;
222
223
224
225 typedef unsigned char u_char;
226 typedef unsigned short u_short;
227 typedef unsigned int u_int;
228 typedef unsigned long u_long;
229
230
231 typedef unsigned char unchar;
232 typedef unsigned short ushort;
233 typedef unsigned int uint;
234 typedef unsigned long ulong;
235
236
237
238
239 typedef __u8 u_int8_t;
240 typedef __s8 int8_t;
241 typedef __u16 u_int16_t;
242 typedef __s16 int16_t;
243 typedef __u32 u_int32_t;
244 typedef __s32 int32_t;
245
246
247
248 typedef __u8 uint8_t;
249 typedef __u16 uint16_t;
250 typedef __u32 uint32_t;
251
252
253 typedef __u64 uint64_t;
254 typedef __u64 u_int64_t;
255 typedef __s64 int64_t;
256 # 140 "include/linux/types.h"
257 typedef u64 sector_t;
258 # 151 "include/linux/types.h"
259 typedef unsigned long blkcnt_t;
260 # 180 "include/linux/types.h"
261 typedef __u16 __le16;
262 typedef __u16 __be16;
263 typedef __u32 __le32;
264 typedef __u32 __be32;
265
266 typedef __u64 __le64;
267 typedef __u64 __be64;
268
269 typedef __u16 __sum16;
270 typedef __u32 __wsum;
271
272
273 typedef unsigned gfp_t;
274 typedef unsigned fmode_t;
275
276
277
278
279 typedef u32 phys_addr_t;
280
281
282 typedef phys_addr_t resource_size_t;
283
284 struct ustat {
285 __kernel_daddr_t f_tfree;
286 __kernel_ino_t f_tinode;
287 char f_fname[6];
288 char f_fpack[6];
289 };
290 # 14 "include/linux/kernel.h" 2
291
292 # 1 "include/linux/bitops.h" 1
293 # 17 "include/linux/bitops.h"
294 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 1
295 # 9 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h"
296 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/byteorder.h" 1
297 # 9 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/byteorder.h"
298 static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u32 ___arch__swahb32(__u32 xx)
299 {
300 __u32 tmp;
301 __asm__("%1 = %0 >> 8 (V);\n\t"
302 "%0 = %0 << 8 (V);\n\t"
303 "%0 = %0 | %1;\n\t"
304 : "+d"(xx), "=&d"(tmp));
305 return xx;
306 }
307
308 static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u32 ___arch__swahw32(__u32 xx)
309 {
310 __u32 rv;
311 __asm__("%0 = PACK(%1.L, %1.H);\n\t": "=d"(rv): "d"(xx));
312 return rv;
313 }
314
315
316
317
318
319 static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u16 ___arch__swab16(__u16 xx)
320 {
321 __u32 xw = xx;
322 __asm__("%0 <<= 8;\n %0.L = %0.L + %0.H (NS);\n": "+d"(xw));
323 return (__u16)xw;
324 }
325 # 46 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/byteorder.h"
326 # 1 "include/linux/byteorder/little_endian.h" 1
327 # 12 "include/linux/byteorder/little_endian.h"
328 # 1 "include/linux/byteorder/swab.h" 1
329 # 64 "include/linux/byteorder/swab.h"
330 static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u16 ___swab16(__u16 x)
331 {
332 return x<<8 | x>>8;
333 }
334 static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u32 ___swab32(__u32 x)
335 {
336 return x<<24 | x>>24 |
337 (x & (__u32)0x0000ff00UL)<<8 |
338 (x & (__u32)0x00ff0000UL)>>8;
339 }
340 static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u64 ___swab64(__u64 x)
341 {
342 return x<<56 | x>>56 |
343 (x & (__u64)0x000000000000ff00ULL)<<40 |
344 (x & (__u64)0x0000000000ff0000ULL)<<24 |
345 (x & (__u64)0x00000000ff000000ULL)<< 8 |
346 (x & (__u64)0x000000ff00000000ULL)>> 8 |
347 (x & (__u64)0x0000ff0000000000ULL)>>24 |
348 (x & (__u64)0x00ff000000000000ULL)>>40;
349 }
350 # 163 "include/linux/byteorder/swab.h"
351 static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u16 __fswab16(__u16 x)
352 {
353 return ___arch__swab16(x);
354 }
355 static __inline__ __attribute__((always_inline)) __u16 __swab16p(const __u16 *x)
356 {
357 return ___arch__swab16(*(x));
358 }
359 static __inline__ __attribute__((always_inline)) void __swab16s(__u16 *addr)
360 {
361 ((void)(*(addr) = ___arch__swab16(*(addr))));
362 }
363
364 static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswab32(__u32 x)
365 {
366 return ___arch__swahb32(___arch__swahw32(x));
367 }
368 static __inline__ __attribute__((always_inline)) __u32 __swab32p(const __u32 *x)
369 {
370 return ___arch__swahb32(___arch__swahw32(*(x)));
371 }
372 static __inline__ __attribute__((always_inline)) void __swab32s(__u32 *addr)
373 {
374 ((void)(*(addr) = ___arch__swahb32(___arch__swahw32(*(addr)))));
375 }
376
377
378 static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u64 __fswab64(__u64 x)
379 {
380
381 __u32 h = x >> 32;
382 __u32 l = x & ((1ULL<<32)-1);
383 return (((__u64)(__builtin_constant_p((__u32)(l)) ? ((__u32)( (((__u32)((l)) & (__u32)0x000000ffUL) << 24) | (((__u32)((l)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((l)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((l)) & (__u32)0xff000000UL) >> 24) )) : __fswab32((l)))) << 32) | ((__u64)((__builtin_constant_p((__u32)(h)) ? ((__u32)( (((__u32)((h)) & (__u32)0x000000ffUL) << 24) | (((__u32)((h)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((h)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((h)) & (__u32)0xff000000UL) >> 24) )) : __fswab32((h)))));
384
385
386
387 }
388 static __inline__ __attribute__((always_inline)) __u64 __swab64p(const __u64 *x)
389 {
390 return ___swab64(*(x));
391 }
392 static __inline__ __attribute__((always_inline)) void __swab64s(__u64 *addr)
393 {
394 ((void)(*(addr) = ___swab64(*(addr))));
395 }
396 # 13 "include/linux/byteorder/little_endian.h" 2
397 # 1 "include/linux/byteorder/swabb.h" 1
398 # 92 "include/linux/byteorder/swabb.h"
399 static inline __attribute__((always_inline)) __u32 __fswahw32(__u32 x)
400 {
401 return ___arch__swahw32(x);
402 }
403
404 static inline __attribute__((always_inline)) __u32 __swahw32p(__u32 *x)
405 {
406 return (__builtin_constant_p((__u32)(*(x))) ? ({ __u32 __x = ((*(x))); ((__u32)( (((__u32)(__x) & (__u32)0x0000ffffUL) << 16) | (((__u32)(__x) & (__u32)0xffff0000UL) >> 16) )); }) : __fswahw32((*(x))));
407 }
408
409 static inline __attribute__((always_inline)) void __swahw32s(__u32 *addr)
410 {
411 do { *(addr) = __swahw32p((addr)); } while (0);
412 }
413
414 static inline __attribute__((always_inline)) __u32 __fswahb32(__u32 x)
415 {
416 return ___arch__swahb32(x);
417 }
418
419 static inline __attribute__((always_inline)) __u32 __swahb32p(__u32 *x)
420 {
421 return (__builtin_constant_p((__u32)(*(x))) ? ({ __u32 __x = ((*(x))); ((__u32)( (((__u32)(__x) & (__u32)0x00ff00ffUL) << 8) | (((__u32)(__x) & (__u32)0xff00ff00UL) >> 8) )); }) : __fswahb32((*(x))));
422 }
423
424 static inline __attribute__((always_inline)) void __swahb32s(__u32 *addr)
425 {
426 do { *(addr) = __swahb32p((addr)); } while (0);
427 }
428 # 14 "include/linux/byteorder/little_endian.h" 2
429 # 44 "include/linux/byteorder/little_endian.h"
430 static inline __attribute__((always_inline)) __le64 __cpu_to_le64p(const __u64 *p)
431 {
432 return ( __le64)*p;
433 }
434 static inline __attribute__((always_inline)) __u64 __le64_to_cpup(const __le64 *p)
435 {
436 return ( __u64)*p;
437 }
438 static inline __attribute__((always_inline)) __le32 __cpu_to_le32p(const __u32 *p)
439 {
440 return ( __le32)*p;
441 }
442 static inline __attribute__((always_inline)) __u32 __le32_to_cpup(const __le32 *p)
443 {
444 return ( __u32)*p;
445 }
446 static inline __attribute__((always_inline)) __le16 __cpu_to_le16p(const __u16 *p)
447 {
448 return ( __le16)*p;
449 }
450 static inline __attribute__((always_inline)) __u16 __le16_to_cpup(const __le16 *p)
451 {
452 return ( __u16)*p;
453 }
454 static inline __attribute__((always_inline)) __be64 __cpu_to_be64p(const __u64 *p)
455 {
456 return ( __be64)__swab64p(p);
457 }
458 static inline __attribute__((always_inline)) __u64 __be64_to_cpup(const __be64 *p)
459 {
460 return __swab64p((__u64 *)p);
461 }
462 static inline __attribute__((always_inline)) __be32 __cpu_to_be32p(const __u32 *p)
463 {
464 return ( __be32)__swab32p(p);
465 }
466 static inline __attribute__((always_inline)) __u32 __be32_to_cpup(const __be32 *p)
467 {
468 return __swab32p((__u32 *)p);
469 }
470 static inline __attribute__((always_inline)) __be16 __cpu_to_be16p(const __u16 *p)
471 {
472 return ( __be16)__swab16p(p);
473 }
474 static inline __attribute__((always_inline)) __u16 __be16_to_cpup(const __be16 *p)
475 {
476 return __swab16p((__u16 *)p);
477 }
478 # 106 "include/linux/byteorder/little_endian.h"
479 # 1 "include/linux/byteorder/generic.h" 1
480 # 143 "include/linux/byteorder/generic.h"
481 static inline __attribute__((always_inline)) void le16_add_cpu(__le16 *var, u16 val)
482 {
483 *var = (( __le16)(__u16)((( __u16)(__le16)(*var)) + val));
484 }
485
486 static inline __attribute__((always_inline)) void le32_add_cpu(__le32 *var, u32 val)
487 {
488 *var = (( __le32)(__u32)((( __u32)(__le32)(*var)) + val));
489 }
490
491 static inline __attribute__((always_inline)) void le64_add_cpu(__le64 *var, u64 val)
492 {
493 *var = (( __le64)(__u64)((( __u64)(__le64)(*var)) + val));
494 }
495
496 static inline __attribute__((always_inline)) void be16_add_cpu(__be16 *var, u16 val)
497 {
498 *var = (( __be16)(__builtin_constant_p((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)((( __u16)(__be16)(*var))) & (__u16)0x00ffU) << 8) | (((__u16)((( __u16)(__be16)(*var))) & (__u16)0xff00U) >> 8) )) : __fswab16((( __u16)(__be16)(*var)))) + val))) ? ((__u16)( (((__u16)((((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)((( __u16)(__be16)(*var))) & (__u16)0x00ffU) << 8) | (((__u16)((( __u16)(__be16)(*var))) & (__u16)0xff00U) >> 8) )) : __fswab16((( __u16)(__be16)(*var)))) + val))) & (__u16)0x00ffU) << 8) | (((__u16)((((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)((( __u16)(__be16)(*var))) & (__u16)0x00ffU) << 8) | (((__u16)((( __u16)(__be16)(*var))) & (__u16)0xff00U) >> 8) )) : __fswab16((( __u16)(__be16)(*var)))) + val))) & (__u16)0xff00U) >> 8) )) : __fswab16((((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)((( __u16)(__be16)(*var))) & (__u16)0x00ffU) << 8) | (((__u16)((( __u16)(__be16)(*var))) & (__u16)0xff00U) >> 8) )) : __fswab16((( __u16)(__be16)(*var)))) + val)))));
499 }
500
501 static inline __attribute__((always_inline)) void be32_add_cpu(__be32 *var, u32 val)
502 {
503 *var = (( __be32)(__builtin_constant_p((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x000000ffUL) << 24) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x0000ff00UL) << 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0xff000000UL) >> 24) )) : __fswab32((( __u32)(__be32)(*var)))) + val))) ? ((__u32)( (((__u32)((((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x000000ffUL) << 24) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x0000ff00UL) << 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0xff000000UL) >> 24) )) : __fswab32((( __u32)(__be32)(*var)))) + val))) & (__u32)0x000000ffUL) << 24) | (((__u32)((((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x000000ffUL) << 24) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x0000ff00UL) << 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0xff000000UL) >> 24) )) : __fswab32((( __u32)(__be32)(*var)))) + val))) & (__u32)0x0000ff00UL) << 8) | (((__u32)((((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x000000ffUL) << 24) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x0000ff00UL) << 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0xff000000UL) >> 24) )) : __fswab32((( __u32)(__be32)(*var)))) + val))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x000000ffUL) << 24) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x0000ff00UL) << 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0xff000000UL) >> 24) )) : __fswab32((( __u32)(__be32)(*var)))) + val))) & (__u32)0xff000000UL) >> 24) )) : __fswab32((((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x000000ffUL) << 24) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x0000ff00UL) << 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0xff000000UL) >> 24) )) : __fswab32((( __u32)(__be32)(*var)))) + val)))));
504 }
505
506 static inline __attribute__((always_inline)) void be64_add_cpu(__be64 *var, u64 val)
507 {
508 *var = (( __be64)(__builtin_constant_p((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))) ? ((__u64)( (__u64)(((__u64)((((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val)))));
509 }
510 # 107 "include/linux/byteorder/little_endian.h" 2
511 # 47 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/byteorder.h" 2
512 # 10 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2
513
514
515
516
517
518
519
520 # 1 "include/asm-generic/bitops/ffs.h" 1
521 # 12 "include/asm-generic/bitops/ffs.h"
522 static inline __attribute__((always_inline)) int ffs(int x)
523 {
524 int r = 1;
525
526 if (!x)
527 return 0;
528 if (!(x & 0xffff)) {
529 x >>= 16;
530 r += 16;
531 }
532 if (!(x & 0xff)) {
533 x >>= 8;
534 r += 8;
535 }
536 if (!(x & 0xf)) {
537 x >>= 4;
538 r += 4;
539 }
540 if (!(x & 3)) {
541 x >>= 2;
542 r += 2;
543 }
544 if (!(x & 1)) {
545 x >>= 1;
546 r += 1;
547 }
548 return r;
549 }
550 # 18 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2
551 # 1 "include/asm-generic/bitops/__ffs.h" 1
552 # 12 "include/asm-generic/bitops/__ffs.h"
553 static inline __attribute__((always_inline)) unsigned long __ffs(unsigned long word)
554 {
555 int num = 0;
556
557
558
559
560
561
562
563 if ((word & 0xffff) == 0) {
564 num += 16;
565 word >>= 16;
566 }
567 if ((word & 0xff) == 0) {
568 num += 8;
569 word >>= 8;
570 }
571 if ((word & 0xf) == 0) {
572 num += 4;
573 word >>= 4;
574 }
575 if ((word & 0x3) == 0) {
576 num += 2;
577 word >>= 2;
578 }
579 if ((word & 0x1) == 0)
580 num += 1;
581 return num;
582 }
583 # 19 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2
584 # 1 "include/asm-generic/bitops/sched.h" 1
585 # 12 "include/asm-generic/bitops/sched.h"
586 static inline __attribute__((always_inline)) int sched_find_first_bit(const unsigned long *b)
587 {
588
589
590
591
592
593 if (b[0])
594 return __ffs(b[0]);
595 if (b[1])
596 return __ffs(b[1]) + 32;
597 if (b[2])
598 return __ffs(b[2]) + 64;
599 return __ffs(b[3]) + 96;
600
601
602
603 }
604 # 20 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2
605 # 1 "include/asm-generic/bitops/ffz.h" 1
606 # 21 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2
607 # 84 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h"
608 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 1
609 # 39 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h"
610 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/anomaly.h" 1
611 # 40 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2
612 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pda.h" 1
613 # 30 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pda.h"
614 struct blackfin_pda {
615 struct blackfin_pda *next;
616
617 unsigned long syscfg;
618
619
620
621
622 unsigned long *ipdt;
623 unsigned long *ipdt_swapcount;
624 unsigned long *dpdt;
625 unsigned long *dpdt_swapcount;
626
627
628
629
630
631
632
633 unsigned long ex_iptr;
634 unsigned long ex_optr;
635 unsigned long ex_buf[4];
636 unsigned long ex_imask;
637 unsigned long *ex_stack;
638
639
640 unsigned long last_cplb_fault_retx;
641
642 unsigned long dcplb_fault_addr;
643 unsigned long icplb_fault_addr;
644 unsigned long retx;
645 unsigned long seqstat;
646 unsigned int __nmi_count;
647 };
648
649 extern struct blackfin_pda cpu_pda[];
650
651 void reserve_pda(void);
652 # 41 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2
653 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h" 1
654 # 10 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h"
655 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/blackfin.h" 1
656 # 14 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/blackfin.h"
657 static inline __attribute__((always_inline)) void SSYNC(void)
658 {
659 int _tmp;
660 if ((0x0003 < 6))
661 __asm__ __volatile__(
662 "cli %0;"
663 "nop;"
664 "nop;"
665 "ssync;"
666 "sti %0;"
667 : "=d" (_tmp)
668 );
669 else if ((0x0003 < 5))
670 __asm__ __volatile__(
671 "nop;"
672 "nop;"
673 "nop;"
674 "ssync;"
675 );
676 else
677 __asm__ __volatile__("ssync;");
678 }
679
680
681 static inline __attribute__((always_inline)) void CSYNC(void)
682 {
683 int _tmp;
684 if ((0x0003 < 6))
685 __asm__ __volatile__(
686 "cli %0;"
687 "nop;"
688 "nop;"
689 "csync;"
690 "sti %0;"
691 : "=d" (_tmp)
692 );
693 else if ((0x0003 < 5))
694 __asm__ __volatile__(
695 "nop;"
696 "nop;"
697 "nop;"
698 "csync;"
699 );
700 else
701 __asm__ __volatile__("csync;");
702 }
703 # 89 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/blackfin.h"
704 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h" 1
705 # 36 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h"
706 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/bf533.h" 1
707 # 37 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h" 2
708 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/mem_map.h" 1
709 # 38 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h" 2
710 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/defBF532.h" 1
711 # 51 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/defBF532.h"
712 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/def_LPBlackfin.h" 1
713 # 52 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/defBF532.h" 2
714 # 39 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h" 2
715 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/anomaly.h" 1
716 # 40 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h" 2
717
718
719 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h" 1
720 # 34 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h"
721 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/blackfin.h" 1
722 # 35 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h" 2
723
724
725
726
727
728 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cdef_LPBlackfin.h" 1
729 # 41 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h" 2
730 # 680 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h"
731 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irq.h" 1
732 # 21 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irq.h"
733 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/irq.h" 1
734 # 22 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irq.h" 2
735
736 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h" 1
737 # 24 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irq.h" 2
738
739
740
741
742
743
744
745 extern unsigned long bfin_irq_flags;
746 # 283 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irq.h"
747 static inline __attribute__((always_inline)) int irq_canonicalize(int irq)
748 {
749 return irq;
750 }
751 # 681 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h" 2
752 # 692 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h"
753 static inline __attribute__((always_inline)) void bfin_write_FIO_FLAG_D(unsigned short val) { unsigned long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); __asm__ __volatile__( "nop;" "w[%0] = %1;" : : "a" (0xFFC00700), "d" ((uint16_t)(val)) : "memory" ); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); }
754 static inline __attribute__((always_inline)) void bfin_write_FIO_FLAG_C(unsigned short val) { unsigned long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); __asm__ __volatile__( "nop;" "w[%0] = %1;" : : "a" (0xFFC00704), "d" ((uint16_t)(val)) : "memory" ); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); }
755 static inline __attribute__((always_inline)) void bfin_write_FIO_FLAG_S(unsigned short val) { unsigned long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); __asm__ __volatile__( "nop;" "w[%0] = %1;" : : "a" (0xFFC00708), "d" ((uint16_t)(val)) : "memory" ); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); }
756 static inline __attribute__((always_inline)) void bfin_write_FIO_FLAG_T(unsigned short val) { unsigned long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); __asm__ __volatile__( "nop;" "w[%0] = %1;" : : "a" (0xFFC0070C), "d" ((uint16_t)(val)) : "memory" ); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); }
757 # 708 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h"
758 static inline __attribute__((always_inline)) u16 bfin_read_FIO_FLAG_D(void) { unsigned long flags; u16 ret; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); ret = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = w[%1] (z);" : "=d" (__v) : "a" (0xFFC00700) ); __v; }); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); return ret; }
759 static inline __attribute__((always_inline)) u16 bfin_read_FIO_FLAG_C(void) { unsigned long flags; u16 ret; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); ret = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = w[%1] (z);" : "=d" (__v) : "a" (0xFFC00704) ); __v; }); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); return ret; }
760 static inline __attribute__((always_inline)) u16 bfin_read_FIO_FLAG_S(void) { unsigned long flags; u16 ret; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); ret = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = w[%1] (z);" : "=d" (__v) : "a" (0xFFC00708) ); __v; }); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); return ret; }
761 static inline __attribute__((always_inline)) u16 bfin_read_FIO_FLAG_T(void) { unsigned long flags; u16 ret; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); ret = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = w[%1] (z);" : "=d" (__v) : "a" (0xFFC0070C) ); __v; }); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); return ret; }
762 # 725 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h"
763 static __inline__ __attribute__((always_inline)) void bfin_write_PLL_CTL(unsigned int val)
764 {
765 unsigned long flags, iwr;
766
767 if (val == ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = w[%1] (z);" : "=d" (__v) : "a" (0xFFC00000) ); __v; }))
768 return;
769
770 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
771
772 iwr = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00124) ); __v; });
773
774 __asm__ __volatile__( "nop;" "[%0] = %1;" : : "a" (0xFFC00124), "d" ((1 << (0))) : "memory" );
775
776 __asm__ __volatile__( "nop;" "w[%0] = %1;" : : "a" (0xFFC00000), "d" ((uint16_t)(val)) : "memory" );
777 SSYNC();
778 asm("IDLE;");
779
780 __asm__ __volatile__( "nop;" "[%0] = %1;" : : "a" (0xFFC00124), "d" (iwr) : "memory" );
781 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
782 }
783
784
785 static __inline__ __attribute__((always_inline)) void bfin_write_VR_CTL(unsigned int val)
786 {
787 unsigned long flags, iwr;
788
789 if (val == ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = w[%1] (z);" : "=d" (__v) : "a" (0xFFC00008) ); __v; }))
790 return;
791
792 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
793
794 iwr = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00124) ); __v; });
795
796 __asm__ __volatile__( "nop;" "[%0] = %1;" : : "a" (0xFFC00124), "d" ((1 << (0))) : "memory" );
797
798 __asm__ __volatile__( "nop;" "w[%0] = %1;" : : "a" (0xFFC00008), "d" ((uint16_t)(val)) : "memory" );
799 SSYNC();
800 asm("IDLE;");
801
802 __asm__ __volatile__( "nop;" "[%0] = %1;" : : "a" (0xFFC00124), "d" (iwr) : "memory" );
803 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
804 }
805 # 43 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h" 2
806 # 90 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/blackfin.h" 2
807 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bfin-global.h" 1
808 # 34 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bfin-global.h"
809 # 1 "include/asm-generic/sections.h" 1
810
811
812
813
814
815 extern char _text[], _stext[], _etext[];
816 extern char _data[], _sdata[], _edata[];
817 extern char __bss_start[], __bss_stop[];
818 extern char __init_begin[], __init_end[];
819 extern char _sinittext[], _einittext[];
820 extern char _end[];
821 extern char __per_cpu_start[], __per_cpu_end[];
822 extern char __kprobes_text_start[], __kprobes_text_end[];
823 extern char __initdata_begin[], __initdata_end[];
824 extern char __start_rodata[], __end_rodata[];
825 # 35 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bfin-global.h" 2
826 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ptrace.h" 1
827 # 24 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ptrace.h"
828 struct pt_regs {
829 long orig_pc;
830 long ipend;
831 long seqstat;
832 long rete;
833 long retn;
834 long retx;
835 long pc;
836 long rets;
837 long reserved;
838 long astat;
839 long lb1;
840 long lb0;
841 long lt1;
842 long lt0;
843 long lc1;
844 long lc0;
845 long a1w;
846 long a1x;
847 long a0w;
848 long a0x;
849 long b3;
850 long b2;
851 long b1;
852 long b0;
853 long l3;
854 long l2;
855 long l1;
856 long l0;
857 long m3;
858 long m2;
859 long m1;
860 long m0;
861 long i3;
862 long i2;
863 long i1;
864 long i0;
865 long usp;
866 long fp;
867 long p5;
868 long p4;
869 long p3;
870 long p2;
871 long p1;
872 long p0;
873 long r7;
874 long r6;
875 long r5;
876 long r4;
877 long r3;
878 long r2;
879 long r1;
880 long r0;
881 long orig_r0;
882 long orig_p0;
883 long syscfg;
884 };
885 # 99 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ptrace.h"
886 extern void show_regs(struct pt_regs *);
887 # 36 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bfin-global.h" 2
888 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/user.h" 1
889 # 36 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/user.h"
890 struct user_bfinfp_struct {
891 };
892
893
894
895
896 struct user_regs_struct {
897 long r0, r1, r2, r3, r4, r5, r6, r7;
898 long p0, p1, p2, p3, p4, p5, usp, fp;
899 long i0, i1, i2, i3;
900 long l0, l1, l2, l3;
901 long b0, b1, b2, b3;
902 long m0, m1, m2, m3;
903 long a0w, a1w;
904 long a0x, a1x;
905 unsigned long rets;
906 unsigned long astat;
907 unsigned long pc;
908 unsigned long orig_p0;
909 };
910
911
912
913
914
915 struct user {
916
917
918
919 struct user_regs_struct regs;
920
921
922 unsigned long int u_tsize;
923 unsigned long int u_dsize;
924 unsigned long int u_ssize;
925 unsigned long start_code;
926 unsigned long start_stack;
927
928
929
930 long int signal;
931 int reserved;
932 unsigned long u_ar0;
933
934
935 unsigned long magic;
936 char u_comm[32];
937 };
938 # 37 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bfin-global.h" 2
939 # 50 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bfin-global.h"
940 extern void bfin_setup_caches(unsigned int cpu);
941 extern void bfin_setup_cpudata(unsigned int cpu);
942
943 extern unsigned long get_cclk(void);
944 extern unsigned long get_sclk(void);
945 extern unsigned long sclk_to_usecs(unsigned long sclk);
946 extern unsigned long usecs_to_sclk(unsigned long usecs);
947
948 extern void dump_bfin_process(struct pt_regs *regs);
949 extern void dump_bfin_mem(struct pt_regs *regs);
950 extern void dump_bfin_trace_buffer(void);
951
952
953 extern int init_arch_irq(void);
954 extern void init_exception_vectors(void);
955 extern void program_IAR(void);
956
957 extern void lower_to_irq14(void);
958 extern void bfin_return_from_exception(void);
959 extern void evt14_softirq(void);
960 extern void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
961 extern int bfin_internal_set_wake(unsigned int irq, unsigned int state);
962
963 extern void *l1_data_A_sram_alloc(size_t);
964 extern void *l1_data_B_sram_alloc(size_t);
965 extern void *l1_inst_sram_alloc(size_t);
966 extern void *l1_data_sram_alloc(size_t);
967 extern void *l1_data_sram_zalloc(size_t);
968 extern void *l2_sram_alloc(size_t);
969 extern void *l2_sram_zalloc(size_t);
970 extern int l1_data_A_sram_free(const void*);
971 extern int l1_data_B_sram_free(const void*);
972 extern int l1_inst_sram_free(const void*);
973 extern int l1_data_sram_free(const void*);
974 extern int l2_sram_free(const void *);
975 extern int sram_free(const void*);
976
977
978
979
980
981
982 extern void *sram_alloc_with_lsl(size_t, unsigned long);
983 extern int sram_free_with_lsl(const void*);
984
985 extern void *isram_memcpy(void *dest, const void *src, size_t n);
986
987 extern const char bfin_board_name[];
988
989 extern unsigned long bfin_sic_iwr[];
990 extern unsigned vr_wakeup;
991 extern u16 _bfin_swrst;
992 extern unsigned long _ramstart, _ramend, _rambase;
993 extern unsigned long memory_start, memory_end, physical_mem_end;
994 extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[],
995 _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[],
996 _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[],
997 _ebss_l2[], _l2_lma_start[];
998
999
1000 extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size;
1001
1002
1003 extern void cache_grab_lock(int way);
1004 extern void bfin_cache_lock(int way);
1005 # 91 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/blackfin.h" 2
1006 # 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h" 2
1007 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/segment.h" 1
1008 # 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h" 2
1009
1010
1011 static inline __attribute__((always_inline)) unsigned long rdusp(void)
1012 {
1013 unsigned long usp;
1014
1015 __asm__ __volatile__("%0 = usp;\n\t":"=da"(usp));
1016 return usp;
1017 }
1018
1019 static inline __attribute__((always_inline)) void wrusp(unsigned long usp)
1020 {
1021 __asm__ __volatile__("usp = %0;\n\t"::"da"(usp));
1022 }
1023
1024 static inline __attribute__((always_inline)) unsigned long __get_SP(void)
1025 {
1026 unsigned long sp;
1027
1028 __asm__ __volatile__("%0 = sp;\n\t" : "=da"(sp));
1029 return sp;
1030 }
1031 # 48 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h"
1032 struct thread_struct {
1033 unsigned long ksp;
1034 unsigned long usp;
1035 unsigned short seqstat;
1036 unsigned long esp0;
1037 unsigned long pc;
1038 void * debuggerinfo;
1039 };
1040 # 94 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h"
1041 struct task_struct;
1042
1043
1044 static inline __attribute__((always_inline)) void release_thread(struct task_struct *dead_task)
1045 {
1046 }
1047
1048
1049
1050 extern int kernel_thread(int (*fn) (void *), void *arg, unsigned long flags);
1051
1052
1053
1054
1055 static inline __attribute__((always_inline)) void exit_thread(void)
1056 {
1057 }
1058
1059
1060
1061
1062
1063
1064 unsigned long get_wchan(struct task_struct *p);
1065 # 132 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h"
1066 static inline __attribute__((always_inline)) uint32_t __attribute__((pure)) bfin_revid(void)
1067 {
1068
1069 uint32_t revid = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }) >> 28;
1070 # 154 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h"
1071 return revid;
1072 }
1073
1074 static inline __attribute__((always_inline)) uint16_t __attribute__((pure)) bfin_cpuid(void)
1075 {
1076 return (({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }) & 0x0FFFF000) >> 12;
1077 }
1078
1079 static inline __attribute__((always_inline)) uint32_t __attribute__((pure)) bfin_dspid(void)
1080 {
1081 return ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFE05000) ); __v; });
1082 }
1083
1084 static inline __attribute__((always_inline)) uint32_t __attribute__((pure)) bfin_compiled_revid(void)
1085 {
1086
1087
1088
1089
1090
1091
1092
1093 return 3;
1094 # 188 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h"
1095 }
1096 # 42 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2
1097 # 133 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h"
1098 struct __xchg_dummy {
1099 unsigned long a[100];
1100 };
1101
1102
1103 static inline __attribute__((always_inline)) unsigned long __xchg(unsigned long x, volatile void *ptr,
1104 int size)
1105 {
1106 unsigned long tmp = 0;
1107 unsigned long flags = 0;
1108
1109 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
1110
1111 switch (size) {
1112 case 1:
1113 __asm__ __volatile__
1114 ("%0 = b%2 (z);\n\t"
1115 "b%2 = %1;\n\t"
1116 : "=&d" (tmp) : "d" (x), "m" (*((volatile struct __xchg_dummy *)(ptr))) : "memory");
1117 break;
1118 case 2:
1119 __asm__ __volatile__
1120 ("%0 = w%2 (z);\n\t"
1121 "w%2 = %1;\n\t"
1122 : "=&d" (tmp) : "d" (x), "m" (*((volatile struct __xchg_dummy *)(ptr))) : "memory");
1123 break;
1124 case 4:
1125 __asm__ __volatile__
1126 ("%0 = %2;\n\t"
1127 "%2 = %1;\n\t"
1128 : "=&d" (tmp) : "d" (x), "m" (*((volatile struct __xchg_dummy *)(ptr))) : "memory");
1129 break;
1130 }
1131 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
1132 return tmp;
1133 }
1134
1135 # 1 "include/asm-generic/cmpxchg-local.h" 1
1136
1137
1138
1139
1140
1141 extern unsigned long wrong_size_cmpxchg(volatile void *ptr);
1142
1143
1144
1145
1146
1147 static inline __attribute__((always_inline)) unsigned long __cmpxchg_local_generic(volatile void *ptr,
1148 unsigned long old, unsigned long new, int size)
1149 {
1150 unsigned long flags, prev;
1151
1152
1153
1154
1155 if (size == 8 && sizeof(unsigned long) != 8)
1156 wrong_size_cmpxchg(ptr);
1157
1158 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
1159 switch (size) {
1160 case 1: prev = *(u8 *)ptr;
1161 if (prev == old)
1162 *(u8 *)ptr = (u8)new;
1163 break;
1164 case 2: prev = *(u16 *)ptr;
1165 if (prev == old)
1166 *(u16 *)ptr = (u16)new;
1167 break;
1168 case 4: prev = *(u32 *)ptr;
1169 if (prev == old)
1170 *(u32 *)ptr = (u32)new;
1171 break;
1172 case 8: prev = *(u64 *)ptr;
1173 if (prev == old)
1174 *(u64 *)ptr = (u64)new;
1175 break;
1176 default:
1177 wrong_size_cmpxchg(ptr);
1178 }
1179 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
1180 return prev;
1181 }
1182
1183
1184
1185
1186 static inline __attribute__((always_inline)) u64 __cmpxchg64_local_generic(volatile void *ptr,
1187 u64 old, u64 new)
1188 {
1189 u64 prev;
1190 unsigned long flags;
1191
1192 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
1193 prev = *(u64 *)ptr;
1194 if (prev == old)
1195 *(u64 *)ptr = new;
1196 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
1197 return prev;
1198 }
1199 # 171 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2
1200 # 181 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h"
1201 # 1 "include/asm-generic/cmpxchg.h" 1
1202 # 182 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2
1203 # 195 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h"
1204 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/l1layout.h" 1
1205 # 17 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/l1layout.h"
1206 struct l1_scratch_task_info
1207 {
1208
1209 void *stack_start;
1210
1211
1212
1213 void *lowest_sp;
1214 };
1215 # 196 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2
1216 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/mem_map.h" 1
1217 # 10 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/mem_map.h"
1218 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/mem_map.h" 1
1219 # 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/mem_map.h" 2
1220 # 51 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/mem_map.h"
1221 static inline __attribute__((always_inline)) ulong get_l1_scratch_start_cpu(int cpu)
1222 {
1223 return 0xFFB00000;
1224 }
1225 static inline __attribute__((always_inline)) ulong get_l1_code_start_cpu(int cpu)
1226 {
1227 return 0xFFA00000;
1228 }
1229 static inline __attribute__((always_inline)) ulong get_l1_data_a_start_cpu(int cpu)
1230 {
1231 return 0xFF800000;
1232 }
1233 static inline __attribute__((always_inline)) ulong get_l1_data_b_start_cpu(int cpu)
1234 {
1235 return 0xFF900000;
1236 }
1237 static inline __attribute__((always_inline)) ulong get_l1_scratch_start(void)
1238 {
1239 return get_l1_scratch_start_cpu(0);
1240 }
1241 static inline __attribute__((always_inline)) ulong get_l1_code_start(void)
1242 {
1243 return get_l1_code_start_cpu(0);
1244 }
1245 static inline __attribute__((always_inline)) ulong get_l1_data_a_start(void)
1246 {
1247 return get_l1_data_a_start_cpu(0);
1248 }
1249 static inline __attribute__((always_inline)) ulong get_l1_data_b_start(void)
1250 {
1251 return get_l1_data_b_start_cpu(0);
1252 }
1253 # 197 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2
1254
1255 struct task_struct *resume(struct task_struct *prev, struct task_struct *next);
1256 # 85 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2
1257
1258 static inline __attribute__((always_inline)) void set_bit(int nr, volatile unsigned long *addr)
1259 {
1260 int *a = (int *)addr;
1261 int mask;
1262 unsigned long flags;
1263 a += nr >> 5;
1264 mask = 1 << (nr & 0x1f);
1265 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
1266 *a |= mask;
1267 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
1268 }
1269
1270 static inline __attribute__((always_inline)) void clear_bit(int nr, volatile unsigned long *addr)
1271 {
1272 int *a = (int *)addr;
1273 int mask;
1274 unsigned long flags;
1275 a += nr >> 5;
1276 mask = 1 << (nr & 0x1f);
1277 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
1278 *a &= ~mask;
1279 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
1280 }
1281
1282 static inline __attribute__((always_inline)) void change_bit(int nr, volatile unsigned long *addr)
1283 {
1284 int mask, flags;
1285 unsigned long *ADDR = (unsigned long *)addr;
1286
1287 ADDR += nr >> 5;
1288 mask = 1 << (nr & 31);
1289 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
1290 *ADDR ^= mask;
1291 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
1292 }
1293
1294 static inline __attribute__((always_inline)) int test_and_set_bit(int nr, volatile unsigned long *addr)
1295 {
1296 int mask, retval;
1297 volatile unsigned int *a = (volatile unsigned int *)addr;
1298 unsigned long flags;
1299
1300 a += nr >> 5;
1301 mask = 1 << (nr & 0x1f);
1302 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
1303 retval = (mask & *a) != 0;
1304 *a |= mask;
1305 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
1306
1307 return retval;
1308 }
1309
1310 static inline __attribute__((always_inline)) int test_and_clear_bit(int nr, volatile unsigned long *addr)
1311 {
1312 int mask, retval;
1313 volatile unsigned int *a = (volatile unsigned int *)addr;
1314 unsigned long flags;
1315
1316 a += nr >> 5;
1317 mask = 1 << (nr & 0x1f);
1318 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
1319 retval = (mask & *a) != 0;
1320 *a &= ~mask;
1321 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
1322
1323 return retval;
1324 }
1325
1326 static inline __attribute__((always_inline)) int test_and_change_bit(int nr, volatile unsigned long *addr)
1327 {
1328 int mask, retval;
1329 volatile unsigned int *a = (volatile unsigned int *)addr;
1330 unsigned long flags;
1331
1332 a += nr >> 5;
1333 mask = 1 << (nr & 0x1f);
1334 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
1335 retval = (mask & *a) != 0;
1336 *a ^= mask;
1337 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
1338 return retval;
1339 }
1340 # 177 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h"
1341 static inline __attribute__((always_inline)) void __set_bit(int nr, volatile unsigned long *addr)
1342 {
1343 int *a = (int *)addr;
1344 int mask;
1345
1346 a += nr >> 5;
1347 mask = 1 << (nr & 0x1f);
1348 *a |= mask;
1349 }
1350
1351 static inline __attribute__((always_inline)) void __clear_bit(int nr, volatile unsigned long *addr)
1352 {
1353 int *a = (int *)addr;
1354 int mask;
1355
1356 a += nr >> 5;
1357 mask = 1 << (nr & 0x1f);
1358 *a &= ~mask;
1359 }
1360
1361 static inline __attribute__((always_inline)) void __change_bit(int nr, volatile unsigned long *addr)
1362 {
1363 int mask;
1364 unsigned long *ADDR = (unsigned long *)addr;
1365
1366 ADDR += nr >> 5;
1367 mask = 1 << (nr & 31);
1368 *ADDR ^= mask;
1369 }
1370
1371 static inline __attribute__((always_inline)) int __test_and_set_bit(int nr, volatile unsigned long *addr)
1372 {
1373 int mask, retval;
1374 volatile unsigned int *a = (volatile unsigned int *)addr;
1375
1376 a += nr >> 5;
1377 mask = 1 << (nr & 0x1f);
1378 retval = (mask & *a) != 0;
1379 *a |= mask;
1380 return retval;
1381 }
1382
1383 static inline __attribute__((always_inline)) int __test_and_clear_bit(int nr, volatile unsigned long *addr)
1384 {
1385 int mask, retval;
1386 volatile unsigned int *a = (volatile unsigned int *)addr;
1387
1388 a += nr >> 5;
1389 mask = 1 << (nr & 0x1f);
1390 retval = (mask & *a) != 0;
1391 *a &= ~mask;
1392 return retval;
1393 }
1394
1395 static inline __attribute__((always_inline)) int __test_and_change_bit(int nr,
1396 volatile unsigned long *addr)
1397 {
1398 int mask, retval;
1399 volatile unsigned int *a = (volatile unsigned int *)addr;
1400
1401 a += nr >> 5;
1402 mask = 1 << (nr & 0x1f);
1403 retval = (mask & *a) != 0;
1404 *a ^= mask;
1405 return retval;
1406 }
1407
1408 static inline __attribute__((always_inline)) int __test_bit(int nr, const void *addr)
1409 {
1410 int *a = (int *)addr;
1411 int mask;
1412
1413 a += nr >> 5;
1414 mask = 1 << (nr & 0x1f);
1415 return ((mask & *a) != 0);
1416 }
1417
1418
1419
1420
1421
1422
1423 static inline __attribute__((always_inline)) int test_bit(int nr, const void *addr)
1424 {
1425 return __test_bit(nr, addr);
1426 }
1427
1428
1429 # 1 "include/asm-generic/bitops/find.h" 1
1430 # 266 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2
1431 # 1 "include/asm-generic/bitops/hweight.h" 1
1432
1433
1434
1435
1436
1437 extern unsigned int hweight32(unsigned int w);
1438 extern unsigned int hweight16(unsigned int w);
1439 extern unsigned int hweight8(unsigned int w);
1440 extern unsigned long hweight64(__u64 w);
1441 # 267 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2
1442 # 1 "include/asm-generic/bitops/lock.h" 1
1443 # 268 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2
1444
1445 # 1 "include/asm-generic/bitops/ext2-atomic.h" 1
1446 # 270 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2
1447 # 1 "include/asm-generic/bitops/ext2-non-atomic.h" 1
1448
1449
1450
1451 # 1 "include/asm-generic/bitops/le.h" 1
1452 # 5 "include/asm-generic/bitops/ext2-non-atomic.h" 2
1453 # 271 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2
1454
1455 # 1 "include/asm-generic/bitops/minix.h" 1
1456 # 273 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2
1457
1458
1459
1460 # 1 "include/asm-generic/bitops/fls.h" 1
1461 # 12 "include/asm-generic/bitops/fls.h"
1462 static inline __attribute__((always_inline)) int fls(int x)
1463 {
1464 int r = 32;
1465
1466 if (!x)
1467 return 0;
1468 if (!(x & 0xffff0000u)) {
1469 x <<= 16;
1470 r -= 16;
1471 }
1472 if (!(x & 0xff000000u)) {
1473 x <<= 8;
1474 r -= 8;
1475 }
1476 if (!(x & 0xf0000000u)) {
1477 x <<= 4;
1478 r -= 4;
1479 }
1480 if (!(x & 0xc0000000u)) {
1481 x <<= 2;
1482 r -= 2;
1483 }
1484 if (!(x & 0x80000000u)) {
1485 x <<= 1;
1486 r -= 1;
1487 }
1488 return r;
1489 }
1490 # 277 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2
1491 # 1 "include/asm-generic/bitops/fls64.h" 1
1492 # 18 "include/asm-generic/bitops/fls64.h"
1493 static inline __attribute__((always_inline)) int fls64(__u64 x)
1494 {
1495 __u32 h = x >> 32;
1496 if (h)
1497 return fls(h) + 32;
1498 return fls(x);
1499 }
1500 # 278 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2
1501 # 18 "include/linux/bitops.h" 2
1502
1503
1504
1505
1506
1507
1508
1509 static __inline__ __attribute__((always_inline)) int get_bitmask_order(unsigned int count)
1510 {
1511 int order;
1512
1513 order = fls(count);
1514 return order;
1515 }
1516
1517 static __inline__ __attribute__((always_inline)) int get_count_order(unsigned int count)
1518 {
1519 int order;
1520
1521 order = fls(count) - 1;
1522 if (count & (count - 1))
1523 order++;
1524 return order;
1525 }
1526
1527 static inline __attribute__((always_inline)) unsigned long hweight_long(unsigned long w)
1528 {
1529 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
1530 }
1531
1532
1533
1534
1535
1536
1537 static inline __attribute__((always_inline)) __u32 rol32(__u32 word, unsigned int shift)
1538 {
1539 return (word << shift) | (word >> (32 - shift));
1540 }
1541
1542
1543
1544
1545
1546
1547 static inline __attribute__((always_inline)) __u32 ror32(__u32 word, unsigned int shift)
1548 {
1549 return (word >> shift) | (word << (32 - shift));
1550 }
1551
1552
1553
1554
1555
1556
1557 static inline __attribute__((always_inline)) __u16 rol16(__u16 word, unsigned int shift)
1558 {
1559 return (word << shift) | (word >> (16 - shift));
1560 }
1561
1562
1563
1564
1565
1566
1567 static inline __attribute__((always_inline)) __u16 ror16(__u16 word, unsigned int shift)
1568 {
1569 return (word >> shift) | (word << (16 - shift));
1570 }
1571
1572
1573
1574
1575
1576
1577 static inline __attribute__((always_inline)) __u8 rol8(__u8 word, unsigned int shift)
1578 {
1579 return (word << shift) | (word >> (8 - shift));
1580 }
1581
1582
1583
1584
1585
1586
1587 static inline __attribute__((always_inline)) __u8 ror8(__u8 word, unsigned int shift)
1588 {
1589 return (word >> shift) | (word << (8 - shift));
1590 }
1591
1592 static inline __attribute__((always_inline)) unsigned fls_long(unsigned long l)
1593 {
1594 if (sizeof(l) == 4)
1595 return fls(l);
1596 return fls64(l);
1597 }
1598 # 148 "include/linux/bitops.h"
1599 extern unsigned long find_next_bit(const unsigned long *addr,
1600 unsigned long size, unsigned long offset);
1601 # 158 "include/linux/bitops.h"
1602 extern unsigned long find_next_zero_bit(const unsigned long *addr,
1603 unsigned long size,
1604 unsigned long offset);
1605 # 16 "include/linux/kernel.h" 2
1606 # 1 "include/linux/log2.h" 1
1607 # 21 "include/linux/log2.h"
1608 extern __attribute__((const, noreturn))
1609 int ____ilog2_NaN(void);
1610 # 31 "include/linux/log2.h"
1611 static inline __attribute__((always_inline)) __attribute__((const))
1612 int __ilog2_u32(u32 n)
1613 {
1614 return fls(n) - 1;
1615 }
1616
1617
1618
1619 static inline __attribute__((always_inline)) __attribute__((const))
1620 int __ilog2_u64(u64 n)
1621 {
1622 return fls64(n) - 1;
1623 }
1624
1625
1626
1627
1628
1629
1630
1631 static inline __attribute__((always_inline)) __attribute__((const))
1632 bool is_power_of_2(unsigned long n)
1633 {
1634 return (n != 0 && ((n & (n - 1)) == 0));
1635 }
1636
1637
1638
1639
1640 static inline __attribute__((always_inline)) __attribute__((const))
1641 unsigned long __roundup_pow_of_two(unsigned long n)
1642 {
1643 return 1UL << fls_long(n - 1);
1644 }
1645
1646
1647
1648
1649 static inline __attribute__((always_inline)) __attribute__((const))
1650 unsigned long __rounddown_pow_of_two(unsigned long n)
1651 {
1652 return 1UL << (fls_long(n) - 1);
1653 }
1654 # 17 "include/linux/kernel.h" 2
1655 # 1 "include/linux/typecheck.h" 1
1656 # 18 "include/linux/kernel.h" 2
1657 # 1 "include/linux/ratelimit.h" 1
1658
1659
1660 # 1 "include/linux/param.h" 1
1661
1662
1663
1664 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/param.h" 1
1665 # 5 "include/linux/param.h" 2
1666 # 4 "include/linux/ratelimit.h" 2
1667
1668
1669
1670
1671 struct ratelimit_state {
1672 int interval;
1673 int burst;
1674 int printed;
1675 int missed;
1676 unsigned long begin;
1677 };
1678
1679
1680
1681
1682 extern int __ratelimit(struct ratelimit_state *rs);
1683 # 19 "include/linux/kernel.h" 2
1684 # 1 "include/linux/dynamic_printk.h" 1
1685 # 13 "include/linux/dynamic_printk.h"
1686 extern int dynamic_enabled;
1687
1688
1689
1690
1691
1692 extern long long dynamic_printk_enabled;
1693 extern long long dynamic_printk_enabled2;
1694
1695 struct mod_debug {
1696 char *modname;
1697 char *logical_modname;
1698 char *flag_names;
1699 int type;
1700 int hash;
1701 int hash2;
1702 } __attribute__((aligned(8)));
1703
1704 int register_dynamic_debug_module(char *mod_name, int type, char *share_name,
1705 char *flags, int hash, int hash2);
1706 # 78 "include/linux/dynamic_printk.h"
1707 static inline __attribute__((always_inline)) int unregister_dynamic_debug_module(const char *mod_name)
1708 {
1709 return 0;
1710 }
1711 static inline __attribute__((always_inline)) int __dynamic_dbg_enabled_helper(char *modname, int type,
1712 int value, int hash)
1713 {
1714 return 0;
1715 }
1716 # 20 "include/linux/kernel.h" 2
1717
1718 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bug.h" 1
1719 # 15 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bug.h"
1720 # 1 "include/asm-generic/bug.h" 1
1721 # 36 "include/asm-generic/bug.h"
1722 extern void warn_on_slowpath(const char *file, const int line);
1723 extern void warn_slowpath(const char *file, const int line,
1724 const char *fmt, ...) __attribute__((format(printf, 3, 4)));
1725 # 16 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bug.h" 2
1726 # 22 "include/linux/kernel.h" 2
1727
1728 extern const char linux_banner[];
1729 extern const char linux_proc_banner[];
1730 # 56 "include/linux/kernel.h"
1731 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/div64.h" 1
1732 # 1 "include/asm-generic/div64.h" 1
1733 # 35 "include/asm-generic/div64.h"
1734 extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
1735 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/div64.h" 2
1736 # 57 "include/linux/kernel.h" 2
1737 # 101 "include/linux/kernel.h"
1738 extern int console_printk[];
1739
1740
1741
1742
1743
1744
1745 struct completion;
1746 struct pt_regs;
1747 struct user;
1748 # 144 "include/linux/kernel.h"
1749 extern struct atomic_notifier_head panic_notifier_list;
1750 extern long (*panic_blink)(long time);
1751 void panic(const char * fmt, ...)
1752 __attribute__ ((noreturn, format (printf, 1, 2))) __attribute__((__cold__));
1753 extern void oops_enter(void);
1754 extern void oops_exit(void);
1755 extern int oops_may_print(void);
1756 void do_exit(long error_code)
1757 __attribute__((noreturn));
1758 void complete_and_exit(struct completion *, long)
1759 __attribute__((noreturn));
1760 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
1761 extern long simple_strtol(const char *,char **,unsigned int);
1762 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
1763 extern long long simple_strtoll(const char *,char **,unsigned int);
1764 extern int strict_strtoul(const char *, unsigned int, unsigned long *);
1765 extern int strict_strtol(const char *, unsigned int, long *);
1766 extern int strict_strtoull(const char *, unsigned int, unsigned long long *);
1767 extern int strict_strtoll(const char *, unsigned int, long long *);
1768 extern int sprintf(char * buf, const char * fmt, ...)
1769 __attribute__ ((format (printf, 2, 3)));
1770 extern int vsprintf(char *buf, const char *, va_list)
1771 __attribute__ ((format (printf, 2, 0)));
1772 extern int snprintf(char * buf, size_t size, const char * fmt, ...)
1773 __attribute__ ((format (printf, 3, 4)));
1774 extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1775 __attribute__ ((format (printf, 3, 0)));
1776 extern int scnprintf(char * buf, size_t size, const char * fmt, ...)
1777 __attribute__ ((format (printf, 3, 4)));
1778 extern int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
1779 __attribute__ ((format (printf, 3, 0)));
1780 extern char *kasprintf(gfp_t gfp, const char *fmt, ...)
1781 __attribute__ ((format (printf, 2, 3)));
1782 extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
1783
1784 extern int sscanf(const char *, const char *, ...)
1785 __attribute__ ((format (scanf, 2, 3)));
1786 extern int vsscanf(const char *, const char *, va_list)
1787 __attribute__ ((format (scanf, 2, 0)));
1788
1789 extern int get_option(char **str, int *pint);
1790 extern char *get_options(const char *str, int nints, int *ints);
1791 extern unsigned long long memparse(const char *ptr, char **retptr);
1792
1793 extern int core_kernel_text(unsigned long addr);
1794 extern int __kernel_text_address(unsigned long addr);
1795 extern int kernel_text_address(unsigned long addr);
1796 struct pid;
1797 extern struct pid *session_of_pgrp(struct pid *pgrp);
1798 # 219 "include/linux/kernel.h"
1799 int vprintk(const char *fmt, va_list args)
1800 __attribute__ ((format (printf, 1, 0)));
1801 int printk(const char * fmt, ...)
1802 __attribute__ ((format (printf, 1, 2))) __attribute__((__cold__));
1803
1804 extern struct ratelimit_state printk_ratelimit_state;
1805 extern int printk_ratelimit(void);
1806 extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
1807 unsigned int interval_msec);
1808 # 241 "include/linux/kernel.h"
1809 extern int printk_needs_cpu(int cpu);
1810 extern void printk_tick(void);
1811
1812 extern void __attribute__((format(printf, 1, 2)))
1813 early_printk(const char *fmt, ...);
1814
1815 unsigned long int_sqrt(unsigned long);
1816
1817 static inline __attribute__((always_inline)) void console_silent(void)
1818 {
1819 (console_printk[0]) = 0;
1820 }
1821
1822 static inline __attribute__((always_inline)) void console_verbose(void)
1823 {
1824 if ((console_printk[0]))
1825 (console_printk[0]) = 15;
1826 }
1827
1828 extern void bust_spinlocks(int yes);
1829 extern void wake_up_klogd(void);
1830 extern int oops_in_progress;
1831 extern int panic_timeout;
1832 extern int panic_on_oops;
1833 extern int panic_on_unrecovered_nmi;
1834 extern const char *print_tainted(void);
1835 extern void add_taint(unsigned flag);
1836 extern int test_taint(unsigned flag);
1837 extern unsigned long get_taint(void);
1838 extern int root_mountflags;
1839
1840
1841 extern enum system_states {
1842 SYSTEM_BOOTING,
1843 SYSTEM_RUNNING,
1844 SYSTEM_HALT,
1845 SYSTEM_POWER_OFF,
1846 SYSTEM_RESTART,
1847 SYSTEM_SUSPEND_DISK,
1848 } system_state;
1849 # 294 "include/linux/kernel.h"
1850 extern void dump_stack(void) __attribute__((__cold__));
1851
1852 enum {
1853 DUMP_PREFIX_NONE,
1854 DUMP_PREFIX_ADDRESS,
1855 DUMP_PREFIX_OFFSET
1856 };
1857 extern void hex_dump_to_buffer(const void *buf, size_t len,
1858 int rowsize, int groupsize,
1859 char *linebuf, size_t linebuflen, bool ascii);
1860 extern void print_hex_dump(const char *level, const char *prefix_str,
1861 int prefix_type, int rowsize, int groupsize,
1862 const void *buf, size_t len, bool ascii);
1863 extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
1864 const void *buf, size_t len);
1865
1866 extern const char hex_asc[];
1867
1868
1869
1870 static inline __attribute__((always_inline)) char *pack_hex_byte(char *buf, u8 byte)
1871 {
1872 *buf++ = hex_asc[((byte) & 0xf0) >> 4];
1873 *buf++ = hex_asc[((byte) & 0x0f)];
1874 return buf;
1875 }
1876 # 485 "include/linux/kernel.h"
1877 struct sysinfo;
1878 extern int do_sysinfo(struct sysinfo *info);
1879
1880
1881
1882
1883 struct sysinfo {
1884 long uptime;
1885 unsigned long loads[3];
1886 unsigned long totalram;
1887 unsigned long freeram;
1888 unsigned long sharedram;
1889 unsigned long bufferram;
1890 unsigned long totalswap;
1891 unsigned long freeswap;
1892 unsigned short procs;
1893 unsigned short pad;
1894 unsigned long totalhigh;
1895 unsigned long freehigh;
1896 unsigned int mem_unit;
1897 char _f[20-2*sizeof(long)-sizeof(int)];
1898 };
1899 # 10 "include/linux/kallsyms.h" 2
1900 # 41 "include/linux/kallsyms.h"
1901 static inline __attribute__((always_inline)) unsigned long kallsyms_lookup_name(const char *name)
1902 {
1903 return 0;
1904 }
1905
1906 static inline __attribute__((always_inline)) int kallsyms_lookup_size_offset(unsigned long addr,
1907 unsigned long *symbolsize,
1908 unsigned long *offset)
1909 {
1910 return 0;
1911 }
1912
1913 static inline __attribute__((always_inline)) const char *kallsyms_lookup(unsigned long addr,
1914 unsigned long *symbolsize,
1915 unsigned long *offset,
1916 char **modname, char *namebuf)
1917 {
1918 return ((void *)0);
1919 }
1920
1921 static inline __attribute__((always_inline)) int sprint_symbol(char *buffer, unsigned long addr)
1922 {
1923 *buffer = '\0';
1924 return 0;
1925 }
1926
1927 static inline __attribute__((always_inline)) int lookup_symbol_name(unsigned long addr, char *symname)
1928 {
1929 return -34;
1930 }
1931
1932 static inline __attribute__((always_inline)) int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name)
1933 {
1934 return -34;
1935 }
1936
1937
1938
1939
1940
1941
1942 static void __check_printsym_format(const char *fmt, ...)
1943 __attribute__((format(printf,1,2)));
1944 static inline __attribute__((always_inline)) void __check_printsym_format(const char *fmt, ...)
1945 {
1946 }
1947
1948 static inline __attribute__((always_inline)) void print_symbol(const char *fmt, unsigned long addr)
1949 {
1950 __check_printsym_format(fmt, "");
1951 ;
1952
1953 }
1954
1955
1956
1957
1958
1959 static inline __attribute__((always_inline)) void __attribute__((deprecated)) print_fn_descriptor_symbol(const char *fmt, void *addr)
1960 {
1961
1962
1963
1964 print_symbol(fmt, (unsigned long)addr);
1965 }
1966
1967 static inline __attribute__((always_inline)) void print_ip_sym(unsigned long ip)
1968 {
1969 printk("[<%p>] %pS\n", (void *) ip, (void *) ip);
1970 }
1971 # 16 "kernel/trace/trace.c" 2
1972 # 1 "include/linux/seq_file.h" 1
1973
1974
1975
1976
1977 # 1 "include/linux/string.h" 1
1978 # 14 "include/linux/string.h"
1979 extern char *strndup_user(const char *, long);
1980
1981
1982
1983
1984 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/string.h" 1
1985 # 9 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/string.h"
1986 extern inline __attribute__((always_inline)) char *strcpy(char *dest, const char *src)
1987 {
1988 char *xdest = dest;
1989 char temp = 0;
1990
1991 __asm__ __volatile__ (
1992 "1:"
1993 "%2 = B [%1++] (Z);"
1994 "B [%0++] = %2;"
1995 "CC = %2;"
1996 "if cc jump 1b (bp);"
1997 : "+&a" (dest), "+&a" (src), "=&d" (temp)
1998 :
1999 : "memory", "CC");
2000
2001 return xdest;
2002 }
2003
2004
2005 extern inline __attribute__((always_inline)) char *strncpy(char *dest, const char *src, size_t n)
2006 {
2007 char *xdest = dest;
2008 char temp = 0;
2009
2010 if (n == 0)
2011 return xdest;
2012
2013 __asm__ __volatile__ (
2014 "1:"
2015 "%3 = B [%1++] (Z);"
2016 "B [%0++] = %3;"
2017 "CC = %3;"
2018 "if ! cc jump 2f;"
2019 "%2 += -1;"
2020 "CC = %2 == 0;"
2021 "if ! cc jump 1b (bp);"
2022 "jump 4f;"
2023 "2:"
2024
2025 "%3 = 0;"
2026 "3:"
2027 "%2 += -1;"
2028 "CC = %2 == 0;"
2029 "if cc jump 4f;"
2030 "B [%0++] = %3;"
2031 "jump 3b;"
2032 "4:"
2033 : "+&a" (dest), "+&a" (src), "+&da" (n), "=&d" (temp)
2034 :
2035 : "memory", "CC");
2036
2037 return xdest;
2038 }
2039
2040
2041 extern inline __attribute__((always_inline)) int strcmp(const char *cs, const char *ct)
2042 {
2043
2044
2045
2046 int __res1, __res2;
2047
2048 __asm__ __volatile__ (
2049 "1:"
2050 "%2 = B[%0++] (Z);"
2051 "%3 = B[%1++] (Z);"
2052 "CC = %2 == %3;"
2053 "if ! cc jump 2f;"
2054 "CC = %2;"
2055 "if cc jump 1b (bp);"
2056 "jump.s 3f;"
2057 "2:"
2058 "%2 = %2 - %3;"
2059 "3:"
2060 : "+&a" (cs), "+&a" (ct), "=&d" (__res1), "=&d" (__res2)
2061 :
2062 : "memory", "CC");
2063
2064 return __res1;
2065 }
2066
2067
2068 extern inline __attribute__((always_inline)) int strncmp(const char *cs, const char *ct, size_t count)
2069 {
2070
2071
2072
2073 int __res1, __res2;
2074
2075 if (!count)
2076 return 0;
2077
2078 __asm__ __volatile__ (
2079 "1:"
2080 "%3 = B[%0++] (Z);"
2081 "%4 = B[%1++] (Z);"
2082 "CC = %3 == %4;"
2083 "if ! cc jump 3f;"
2084 "CC = %3;"
2085 "if ! cc jump 4f;"
2086 "%2 += -1;"
2087 "CC = %2 == 0;"
2088 "if ! cc jump 1b;"
2089 "2:"
2090 "%3 = 0;"
2091 "jump.s 4f;"
2092 "3:"
2093 "%3 = %3 - %4;"
2094 "4:"
2095 : "+&a" (cs), "+&a" (ct), "+&da" (count), "=&d" (__res1), "=&d" (__res2)
2096 :
2097 : "memory", "CC");
2098
2099 return __res1;
2100 }
2101
2102
2103 extern void *memset(void *s, int c, size_t count);
2104
2105 extern void *memcpy(void *d, const void *s, size_t count);
2106
2107 extern int memcmp(const void *, const void *, __kernel_size_t);
2108
2109 extern void *memchr(const void *s, int c, size_t n);
2110
2111 extern void *memmove(void *dest, const void *src, size_t count);
2112 # 20 "include/linux/string.h" 2
2113 # 28 "include/linux/string.h"
2114 size_t strlcpy(char *, const char *, size_t);
2115
2116
2117 extern char * strcat(char *, const char *);
2118
2119
2120 extern char * strncat(char *, const char *, __kernel_size_t);
2121
2122
2123 extern size_t strlcat(char *, const char *, __kernel_size_t);
2124 # 46 "include/linux/string.h"
2125 extern int strnicmp(const char *, const char *, __kernel_size_t);
2126
2127
2128 extern int strcasecmp(const char *s1, const char *s2);
2129
2130
2131 extern int strncasecmp(const char *s1, const char *s2, size_t n);
2132
2133
2134 extern char * strchr(const char *,int);
2135
2136
2137 extern char * strnchr(const char *, size_t, int);
2138
2139
2140 extern char * strrchr(const char *,int);
2141
2142 extern char * strstrip(char *);
2143
2144 extern char * strstr(const char *,const char *);
2145
2146
2147 extern __kernel_size_t strlen(const char *);
2148
2149
2150 extern __kernel_size_t strnlen(const char *,__kernel_size_t);
2151
2152
2153 extern char * strpbrk(const char *,const char *);
2154
2155
2156 extern char * strsep(char **,const char *);
2157
2158
2159 extern __kernel_size_t strspn(const char *,const char *);
2160
2161
2162 extern __kernel_size_t strcspn(const char *,const char *);
2163 # 96 "include/linux/string.h"
2164 extern void * memscan(void *,int,__kernel_size_t);
2165 # 105 "include/linux/string.h"
2166 extern char *kstrdup(const char *s, gfp_t gfp);
2167 extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
2168 extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
2169
2170 extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
2171 extern void argv_free(char **argv);
2172
2173 extern bool sysfs_streq(const char *s1, const char *s2);
2174
2175 extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
2176 const void *from, size_t available);
2177 # 6 "include/linux/seq_file.h" 2
2178 # 1 "include/linux/mutex.h" 1
2179 # 13 "include/linux/mutex.h"
2180 # 1 "include/linux/list.h" 1
2181
2182
2183
2184
2185 # 1 "include/linux/poison.h" 1
2186 # 6 "include/linux/list.h" 2
2187 # 1 "include/linux/prefetch.h" 1
2188 # 15 "include/linux/prefetch.h"
2189 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cache.h" 1
2190 # 16 "include/linux/prefetch.h" 2
2191 # 53 "include/linux/prefetch.h"
2192 static inline __attribute__((always_inline)) void prefetch_range(void *addr, size_t len)
2193 {
2194
2195
2196
2197
2198
2199
2200
2201 }
2202 # 7 "include/linux/list.h" 2
2203 # 19 "include/linux/list.h"
2204 struct list_head {
2205 struct list_head *next, *prev;
2206 };
2207
2208
2209
2210
2211
2212
2213 static inline __attribute__((always_inline)) void INIT_LIST_HEAD(struct list_head *list)
2214 {
2215 list->next = list;
2216 list->prev = list;
2217 }
2218 # 51 "include/linux/list.h"
2219 extern void __list_add(struct list_head *new,
2220 struct list_head *prev,
2221 struct list_head *next);
2222 # 64 "include/linux/list.h"
2223 static inline __attribute__((always_inline)) void list_add(struct list_head *new, struct list_head *head)
2224 {
2225 __list_add(new, head, head->next);
2226 }
2227 # 78 "include/linux/list.h"
2228 static inline __attribute__((always_inline)) void list_add_tail(struct list_head *new, struct list_head *head)
2229 {
2230 __list_add(new, head->prev, head);
2231 }
2232 # 90 "include/linux/list.h"
2233 static inline __attribute__((always_inline)) void __list_del(struct list_head * prev, struct list_head * next)
2234 {
2235 next->prev = prev;
2236 prev->next = next;
2237 }
2238 # 110 "include/linux/list.h"
2239 extern void list_del(struct list_head *entry);
2240 # 120 "include/linux/list.h"
2241 static inline __attribute__((always_inline)) void list_replace(struct list_head *old,
2242 struct list_head *new)
2243 {
2244 new->next = old->next;
2245 new->next->prev = new;
2246 new->prev = old->prev;
2247 new->prev->next = new;
2248 }
2249
2250 static inline __attribute__((always_inline)) void list_replace_init(struct list_head *old,
2251 struct list_head *new)
2252 {
2253 list_replace(old, new);
2254 INIT_LIST_HEAD(old);
2255 }
2256
2257
2258
2259
2260
2261 static inline __attribute__((always_inline)) void list_del_init(struct list_head *entry)
2262 {
2263 __list_del(entry->prev, entry->next);
2264 INIT_LIST_HEAD(entry);
2265 }
2266
2267
2268
2269
2270
2271
2272 static inline __attribute__((always_inline)) void list_move(struct list_head *list, struct list_head *head)
2273 {
2274 __list_del(list->prev, list->next);
2275 list_add(list, head);
2276 }
2277
2278
2279
2280
2281
2282
2283 static inline __attribute__((always_inline)) void list_move_tail(struct list_head *list,
2284 struct list_head *head)
2285 {
2286 __list_del(list->prev, list->next);
2287 list_add_tail(list, head);
2288 }
2289
2290
2291
2292
2293
2294
2295 static inline __attribute__((always_inline)) int list_is_last(const struct list_head *list,
2296 const struct list_head *head)
2297 {
2298 return list->next == head;
2299 }
2300
2301
2302
2303
2304
2305 static inline __attribute__((always_inline)) int list_empty(const struct list_head *head)
2306 {
2307 return head->next == head;
2308 }
2309 # 202 "include/linux/list.h"
2310 static inline __attribute__((always_inline)) int list_empty_careful(const struct list_head *head)
2311 {
2312 struct list_head *next = head->next;
2313 return (next == head) && (next == head->prev);
2314 }
2315
2316
2317
2318
2319
2320 static inline __attribute__((always_inline)) int list_is_singular(const struct list_head *head)
2321 {
2322 return !list_empty(head) && (head->next == head->prev);
2323 }
2324
2325 static inline __attribute__((always_inline)) void __list_cut_position(struct list_head *list,
2326 struct list_head *head, struct list_head *entry)
2327 {
2328 struct list_head *new_first = entry->next;
2329 list->next = head->next;
2330 list->next->prev = list;
2331 list->prev = entry;
2332 entry->next = list;
2333 head->next = new_first;
2334 new_first->prev = head;
2335 }
2336 # 243 "include/linux/list.h"
2337 static inline __attribute__((always_inline)) void list_cut_position(struct list_head *list,
2338 struct list_head *head, struct list_head *entry)
2339 {
2340 if (list_empty(head))
2341 return;
2342 if (list_is_singular(head) &&
2343 (head->next != entry && head != entry))
2344 return;
2345 if (entry == head)
2346 INIT_LIST_HEAD(list);
2347 else
2348 __list_cut_position(list, head, entry);
2349 }
2350
2351 static inline __attribute__((always_inline)) void __list_splice(const struct list_head *list,
2352 struct list_head *prev,
2353 struct list_head *next)
2354 {
2355 struct list_head *first = list->next;
2356 struct list_head *last = list->prev;
2357
2358 first->prev = prev;
2359 prev->next = first;
2360
2361 last->next = next;
2362 next->prev = last;
2363 }
2364
2365
2366
2367
2368
2369
2370 static inline __attribute__((always_inline)) void list_splice(const struct list_head *list,
2371 struct list_head *head)
2372 {
2373 if (!list_empty(list))
2374 __list_splice(list, head, head->next);
2375 }
2376
2377
2378
2379
2380
2381
2382 static inline __attribute__((always_inline)) void list_splice_tail(struct list_head *list,
2383 struct list_head *head)
2384 {
2385 if (!list_empty(list))
2386 __list_splice(list, head->prev, head);
2387 }
2388 # 302 "include/linux/list.h"
2389 static inline __attribute__((always_inline)) void list_splice_init(struct list_head *list,
2390 struct list_head *head)
2391 {
2392 if (!list_empty(list)) {
2393 __list_splice(list, head, head->next);
2394 INIT_LIST_HEAD(list);
2395 }
2396 }
2397 # 319 "include/linux/list.h"
2398 static inline __attribute__((always_inline)) void list_splice_tail_init(struct list_head *list,
2399 struct list_head *head)
2400 {
2401 if (!list_empty(list)) {
2402 __list_splice(list, head->prev, head);
2403 INIT_LIST_HEAD(list);
2404 }
2405 }
2406 # 540 "include/linux/list.h"
2407 struct hlist_head {
2408 struct hlist_node *first;
2409 };
2410
2411 struct hlist_node {
2412 struct hlist_node *next, **pprev;
2413 };
2414
2415
2416
2417
2418 static inline __attribute__((always_inline)) void INIT_HLIST_NODE(struct hlist_node *h)
2419 {
2420 h->next = ((void *)0);
2421 h->pprev = ((void *)0);
2422 }
2423
2424 static inline __attribute__((always_inline)) int hlist_unhashed(const struct hlist_node *h)
2425 {
2426 return !h->pprev;
2427 }
2428
2429 static inline __attribute__((always_inline)) int hlist_empty(const struct hlist_head *h)
2430 {
2431 return !h->first;
2432 }
2433
2434 static inline __attribute__((always_inline)) void __hlist_del(struct hlist_node *n)
2435 {
2436 struct hlist_node *next = n->next;
2437 struct hlist_node **pprev = n->pprev;
2438 *pprev = next;
2439 if (next)
2440 next->pprev = pprev;
2441 }
2442
2443 static inline __attribute__((always_inline)) void hlist_del(struct hlist_node *n)
2444 {
2445 __hlist_del(n);
2446 n->next = ((void *) 0x00100100);
2447 n->pprev = ((void *) 0x00200200);
2448 }
2449
2450 static inline __attribute__((always_inline)) void hlist_del_init(struct hlist_node *n)
2451 {
2452 if (!hlist_unhashed(n)) {
2453 __hlist_del(n);
2454 INIT_HLIST_NODE(n);
2455 }
2456 }
2457
2458 static inline __attribute__((always_inline)) void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
2459 {
2460 struct hlist_node *first = h->first;
2461 n->next = first;
2462 if (first)
2463 first->pprev = &n->next;
2464 h->first = n;
2465 n->pprev = &h->first;
2466 }
2467
2468
2469 static inline __attribute__((always_inline)) void hlist_add_before(struct hlist_node *n,
2470 struct hlist_node *next)
2471 {
2472 n->pprev = next->pprev;
2473 n->next = next;
2474 next->pprev = &n->next;
2475 *(n->pprev) = n;
2476 }
2477
2478 static inline __attribute__((always_inline)) void hlist_add_after(struct hlist_node *n,
2479 struct hlist_node *next)
2480 {
2481 next->next = n->next;
2482 n->next = next;
2483 next->pprev = &n->next;
2484
2485 if(next->next)
2486 next->next->pprev = &next->next;
2487 }
2488
2489
2490
2491
2492
2493 static inline __attribute__((always_inline)) void hlist_move_list(struct hlist_head *old,
2494 struct hlist_head *new)
2495 {
2496 new->first = old->first;
2497 if (new->first)
2498 new->first->pprev = &new->first;
2499 old->first = ((void *)0);
2500 }
2501 # 14 "include/linux/mutex.h" 2
2502 # 1 "include/linux/spinlock_types.h" 1
2503 # 15 "include/linux/spinlock_types.h"
2504 # 1 "include/linux/spinlock_types_up.h" 1
2505 # 17 "include/linux/spinlock_types_up.h"
2506 typedef struct {
2507 volatile unsigned int slock;
2508 } raw_spinlock_t;
2509 # 31 "include/linux/spinlock_types_up.h"
2510 typedef struct {
2511
2512 } raw_rwlock_t;
2513 # 16 "include/linux/spinlock_types.h" 2
2514
2515
2516 # 1 "include/linux/lockdep.h" 1
2517 # 12 "include/linux/lockdep.h"
2518 struct task_struct;
2519 struct lockdep_map;
2520 # 321 "include/linux/lockdep.h"
2521 static inline __attribute__((always_inline)) void lockdep_off(void)
2522 {
2523 }
2524
2525 static inline __attribute__((always_inline)) void lockdep_on(void)
2526 {
2527 }
2528 # 350 "include/linux/lockdep.h"
2529 struct lock_class_key { };
2530 # 383 "include/linux/lockdep.h"
2531 static inline __attribute__((always_inline)) void early_init_irq_lock_class(void)
2532 {
2533 }
2534
2535
2536
2537
2538
2539
2540
2541 static inline __attribute__((always_inline)) void early_boot_irqs_off(void)
2542 {
2543 }
2544 static inline __attribute__((always_inline)) void early_boot_irqs_on(void)
2545 {
2546 }
2547 static inline __attribute__((always_inline)) void print_irqtrace_events(struct task_struct *curr)
2548 {
2549 }
2550 # 19 "include/linux/spinlock_types.h" 2
2551
2552 typedef struct {
2553 raw_spinlock_t raw_lock;
2554
2555
2556
2557
2558 unsigned int magic, owner_cpu;
2559 void *owner;
2560
2561
2562
2563
2564 } spinlock_t;
2565
2566
2567
2568 typedef struct {
2569 raw_rwlock_t raw_lock;
2570
2571
2572
2573
2574 unsigned int magic, owner_cpu;
2575 void *owner;
2576
2577
2578
2579
2580 } rwlock_t;
2581 # 15 "include/linux/mutex.h" 2
2582
2583
2584
2585 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/atomic.h" 1
2586 # 16 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/atomic.h"
2587 typedef struct { volatile int counter; } atomic_t;
2588 # 92 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/atomic.h"
2589 static inline __attribute__((always_inline)) void atomic_add(int i, atomic_t *v)
2590 {
2591 long flags;
2592
2593 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
2594 v->counter += i;
2595 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
2596 }
2597
2598 static inline __attribute__((always_inline)) void atomic_sub(int i, atomic_t *v)
2599 {
2600 long flags;
2601
2602 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
2603 v->counter -= i;
2604 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
2605
2606 }
2607
2608 static inline __attribute__((always_inline)) int atomic_add_return(int i, atomic_t *v)
2609 {
2610 int __temp = 0;
2611 long flags;
2612
2613 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
2614 v->counter += i;
2615 __temp = v->counter;
2616 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
2617
2618
2619 return __temp;
2620 }
2621
2622 static inline __attribute__((always_inline)) int atomic_sub_return(int i, atomic_t *v)
2623 {
2624 int __temp = 0;
2625 long flags;
2626
2627 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
2628 v->counter -= i;
2629 __temp = v->counter;
2630 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
2631
2632 return __temp;
2633 }
2634
2635 static inline __attribute__((always_inline)) void atomic_inc(volatile atomic_t *v)
2636 {
2637 long flags;
2638
2639 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
2640 v->counter++;
2641 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
2642 }
2643
2644 static inline __attribute__((always_inline)) void atomic_dec(volatile atomic_t *v)
2645 {
2646 long flags;
2647
2648 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
2649 v->counter--;
2650 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
2651 }
2652
2653 static inline __attribute__((always_inline)) void atomic_clear_mask(unsigned int mask, atomic_t *v)
2654 {
2655 long flags;
2656
2657 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
2658 v->counter &= ~mask;
2659 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
2660 }
2661
2662 static inline __attribute__((always_inline)) void atomic_set_mask(unsigned int mask, atomic_t *v)
2663 {
2664 long flags;
2665
2666 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
2667 v->counter |= mask;
2668 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
2669 }
2670 # 212 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/atomic.h"
2671 # 1 "include/asm-generic/atomic.h" 1
2672 # 141 "include/asm-generic/atomic.h"
2673 typedef atomic_t atomic_long_t;
2674
2675
2676 static inline __attribute__((always_inline)) long atomic_long_read(atomic_long_t *l)
2677 {
2678 atomic_t *v = (atomic_t *)l;
2679
2680 return (long)((v)->counter);
2681 }
2682
2683 static inline __attribute__((always_inline)) void atomic_long_set(atomic_long_t *l, long i)
2684 {
2685 atomic_t *v = (atomic_t *)l;
2686
2687 (((v)->counter) = i);
2688 }
2689
2690 static inline __attribute__((always_inline)) void atomic_long_inc(atomic_long_t *l)
2691 {
2692 atomic_t *v = (atomic_t *)l;
2693
2694 atomic_inc(v);
2695 }
2696
2697 static inline __attribute__((always_inline)) void atomic_long_dec(atomic_long_t *l)
2698 {
2699 atomic_t *v = (atomic_t *)l;
2700
2701 atomic_dec(v);
2702 }
2703
2704 static inline __attribute__((always_inline)) void atomic_long_add(long i, atomic_long_t *l)
2705 {
2706 atomic_t *v = (atomic_t *)l;
2707
2708 atomic_add(i, v);
2709 }
2710
2711 static inline __attribute__((always_inline)) void atomic_long_sub(long i, atomic_long_t *l)
2712 {
2713 atomic_t *v = (atomic_t *)l;
2714
2715 atomic_sub(i, v);
2716 }
2717
2718 static inline __attribute__((always_inline)) int atomic_long_sub_and_test(long i, atomic_long_t *l)
2719 {
2720 atomic_t *v = (atomic_t *)l;
2721
2722 return (atomic_sub_return((i), (v)) == 0);
2723 }
2724
2725 static inline __attribute__((always_inline)) int atomic_long_dec_and_test(atomic_long_t *l)
2726 {
2727 atomic_t *v = (atomic_t *)l;
2728
2729 return (atomic_sub_return(1, (v)) == 0);
2730 }
2731
2732 static inline __attribute__((always_inline)) int atomic_long_inc_and_test(atomic_long_t *l)
2733 {
2734 atomic_t *v = (atomic_t *)l;
2735
2736 return (atomic_add_return(1,(v)) == 0);
2737 }
2738
2739 static inline __attribute__((always_inline)) int atomic_long_add_negative(long i, atomic_long_t *l)
2740 {
2741 atomic_t *v = (atomic_t *)l;
2742
2743 return (atomic_add_return((i), (v)) < 0);
2744 }
2745
2746 static inline __attribute__((always_inline)) long atomic_long_add_return(long i, atomic_long_t *l)
2747 {
2748 atomic_t *v = (atomic_t *)l;
2749
2750 return (long)atomic_add_return(i, v);
2751 }
2752
2753 static inline __attribute__((always_inline)) long atomic_long_sub_return(long i, atomic_long_t *l)
2754 {
2755 atomic_t *v = (atomic_t *)l;
2756
2757 return (long)atomic_sub_return(i, v);
2758 }
2759
2760 static inline __attribute__((always_inline)) long atomic_long_inc_return(atomic_long_t *l)
2761 {
2762 atomic_t *v = (atomic_t *)l;
2763
2764 return (long)atomic_add_return(1,(v));
2765 }
2766
2767 static inline __attribute__((always_inline)) long atomic_long_dec_return(atomic_long_t *l)
2768 {
2769 atomic_t *v = (atomic_t *)l;
2770
2771 return (long)atomic_sub_return(1,(v));
2772 }
2773
2774 static inline __attribute__((always_inline)) long atomic_long_add_unless(atomic_long_t *l, long a, long u)
2775 {
2776 atomic_t *v = (atomic_t *)l;
2777
2778 return (long)({ int c, old; c = ((v)->counter); while (c != (u) && (old = ((int)((__typeof__(*((&(((v))->counter)))))__cmpxchg_local_generic(((&(((v))->counter))), (unsigned long)(((c))), (unsigned long)(((c + (a)))), sizeof(*((&(((v))->counter)))))))) != c) c = old; c != (u); });
2779 }
2780 # 213 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/atomic.h" 2
2781 # 19 "include/linux/mutex.h" 2
2782 # 48 "include/linux/mutex.h"
2783 struct mutex {
2784
2785 atomic_t count;
2786 spinlock_t wait_lock;
2787 struct list_head wait_list;
2788
2789 struct thread_info *owner;
2790 const char *name;
2791 void *magic;
2792
2793
2794
2795
2796 };
2797
2798
2799
2800
2801
2802 struct mutex_waiter {
2803 struct list_head list;
2804 struct task_struct *task;
2805
2806 struct mutex *lock;
2807 void *magic;
2808
2809 };
2810
2811
2812 # 1 "include/linux/mutex-debug.h" 1
2813 # 21 "include/linux/mutex-debug.h"
2814 extern void mutex_destroy(struct mutex *lock);
2815 # 78 "include/linux/mutex.h" 2
2816 # 106 "include/linux/mutex.h"
2817 extern void __mutex_init(struct mutex *lock, const char *name,
2818 struct lock_class_key *key);
2819
2820
2821
2822
2823
2824
2825
2826 static inline __attribute__((always_inline)) int mutex_is_locked(struct mutex *lock)
2827 {
2828 return ((&lock->count)->counter) != 1;
2829 }
2830 # 135 "include/linux/mutex.h"
2831 extern void mutex_lock(struct mutex *lock);
2832 extern int __attribute__((warn_unused_result)) mutex_lock_interruptible(struct mutex *lock);
2833 extern int __attribute__((warn_unused_result)) mutex_lock_killable(struct mutex *lock);
2834 # 148 "include/linux/mutex.h"
2835 extern int mutex_trylock(struct mutex *lock);
2836 extern void mutex_unlock(struct mutex *lock);
2837 # 7 "include/linux/seq_file.h" 2
2838 # 1 "include/linux/cpumask.h" 1
2839 # 141 "include/linux/cpumask.h"
2840 # 1 "include/linux/threads.h" 1
2841 # 142 "include/linux/cpumask.h" 2
2842 # 1 "include/linux/bitmap.h" 1
2843 # 87 "include/linux/bitmap.h"
2844 extern int __bitmap_empty(const unsigned long *bitmap, int bits);
2845 extern int __bitmap_full(const unsigned long *bitmap, int bits);
2846 extern int __bitmap_equal(const unsigned long *bitmap1,
2847 const unsigned long *bitmap2, int bits);
2848 extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
2849 int bits);
2850 extern void __bitmap_shift_right(unsigned long *dst,
2851 const unsigned long *src, int shift, int bits);
2852 extern void __bitmap_shift_left(unsigned long *dst,
2853 const unsigned long *src, int shift, int bits);
2854 extern void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
2855 const unsigned long *bitmap2, int bits);
2856 extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
2857 const unsigned long *bitmap2, int bits);
2858 extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
2859 const unsigned long *bitmap2, int bits);
2860 extern void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
2861 const unsigned long *bitmap2, int bits);
2862 extern int __bitmap_intersects(const unsigned long *bitmap1,
2863 const unsigned long *bitmap2, int bits);
2864 extern int __bitmap_subset(const unsigned long *bitmap1,
2865 const unsigned long *bitmap2, int bits);
2866 extern int __bitmap_weight(const unsigned long *bitmap, int bits);
2867
2868 extern int bitmap_scnprintf(char *buf, unsigned int len,
2869 const unsigned long *src, int nbits);
2870 extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
2871 unsigned long *dst, int nbits);
2872 extern int bitmap_parse_user(const char *ubuf, unsigned int ulen,
2873 unsigned long *dst, int nbits);
2874 extern int bitmap_scnlistprintf(char *buf, unsigned int len,
2875 const unsigned long *src, int nbits);
2876 extern int bitmap_parselist(const char *buf, unsigned long *maskp,
2877 int nmaskbits);
2878 extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
2879 const unsigned long *old, const unsigned long *new, int bits);
2880 extern int bitmap_bitremap(int oldbit,
2881 const unsigned long *old, const unsigned long *new, int bits);
2882 extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
2883 const unsigned long *relmap, int bits);
2884 extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
2885 int sz, int bits);
2886 extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
2887 extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
2888 extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
2889 extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
2890
2891
2892
2893
2894
2895
2896
2897 static inline __attribute__((always_inline)) void bitmap_zero(unsigned long *dst, int nbits)
2898 {
2899 if (nbits <= 32)
2900 *dst = 0UL;
2901 else {
2902 int len = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(unsigned long);
2903 memset(dst, 0, len);
2904 }
2905 }
2906
2907 static inline __attribute__((always_inline)) void bitmap_fill(unsigned long *dst, int nbits)
2908 {
2909 size_t nlongs = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)));
2910 if (nlongs > 1) {
2911 int len = (nlongs - 1) * sizeof(unsigned long);
2912 memset(dst, 0xff, len);
2913 }
2914 dst[nlongs - 1] = ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL );
2915 }
2916
2917 static inline __attribute__((always_inline)) void bitmap_copy(unsigned long *dst, const unsigned long *src,
2918 int nbits)
2919 {
2920 if (nbits <= 32)
2921 *dst = *src;
2922 else {
2923 int len = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(unsigned long);
2924 memcpy(dst, src, len);
2925 }
2926 }
2927
2928 static inline __attribute__((always_inline)) void bitmap_and(unsigned long *dst, const unsigned long *src1,
2929 const unsigned long *src2, int nbits)
2930 {
2931 if (nbits <= 32)
2932 *dst = *src1 & *src2;
2933 else
2934 __bitmap_and(dst, src1, src2, nbits);
2935 }
2936
2937 static inline __attribute__((always_inline)) void bitmap_or(unsigned long *dst, const unsigned long *src1,
2938 const unsigned long *src2, int nbits)
2939 {
2940 if (nbits <= 32)
2941 *dst = *src1 | *src2;
2942 else
2943 __bitmap_or(dst, src1, src2, nbits);
2944 }
2945
2946 static inline __attribute__((always_inline)) void bitmap_xor(unsigned long *dst, const unsigned long *src1,
2947 const unsigned long *src2, int nbits)
2948 {
2949 if (nbits <= 32)
2950 *dst = *src1 ^ *src2;
2951 else
2952 __bitmap_xor(dst, src1, src2, nbits);
2953 }
2954
2955 static inline __attribute__((always_inline)) void bitmap_andnot(unsigned long *dst, const unsigned long *src1,
2956 const unsigned long *src2, int nbits)
2957 {
2958 if (nbits <= 32)
2959 *dst = *src1 & ~(*src2);
2960 else
2961 __bitmap_andnot(dst, src1, src2, nbits);
2962 }
2963
2964 static inline __attribute__((always_inline)) void bitmap_complement(unsigned long *dst, const unsigned long *src,
2965 int nbits)
2966 {
2967 if (nbits <= 32)
2968 *dst = ~(*src) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL );
2969 else
2970 __bitmap_complement(dst, src, nbits);
2971 }
2972
2973 static inline __attribute__((always_inline)) int bitmap_equal(const unsigned long *src1,
2974 const unsigned long *src2, int nbits)
2975 {
2976 if (nbits <= 32)
2977 return ! ((*src1 ^ *src2) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
2978 else
2979 return __bitmap_equal(src1, src2, nbits);
2980 }
2981
2982 static inline __attribute__((always_inline)) int bitmap_intersects(const unsigned long *src1,
2983 const unsigned long *src2, int nbits)
2984 {
2985 if (nbits <= 32)
2986 return ((*src1 & *src2) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )) != 0;
2987 else
2988 return __bitmap_intersects(src1, src2, nbits);
2989 }
2990
2991 static inline __attribute__((always_inline)) int bitmap_subset(const unsigned long *src1,
2992 const unsigned long *src2, int nbits)
2993 {
2994 if (nbits <= 32)
2995 return ! ((*src1 & ~(*src2)) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
2996 else
2997 return __bitmap_subset(src1, src2, nbits);
2998 }
2999
3000 static inline __attribute__((always_inline)) int bitmap_empty(const unsigned long *src, int nbits)
3001 {
3002 if (nbits <= 32)
3003 return ! (*src & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
3004 else
3005 return __bitmap_empty(src, nbits);
3006 }
3007
3008 static inline __attribute__((always_inline)) int bitmap_full(const unsigned long *src, int nbits)
3009 {
3010 if (nbits <= 32)
3011 return ! (~(*src) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
3012 else
3013 return __bitmap_full(src, nbits);
3014 }
3015
3016 static inline __attribute__((always_inline)) int bitmap_weight(const unsigned long *src, int nbits)
3017 {
3018 if (nbits <= 32)
3019 return hweight_long(*src & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
3020 return __bitmap_weight(src, nbits);
3021 }
3022
3023 static inline __attribute__((always_inline)) void bitmap_shift_right(unsigned long *dst,
3024 const unsigned long *src, int n, int nbits)
3025 {
3026 if (nbits <= 32)
3027 *dst = *src >> n;
3028 else
3029 __bitmap_shift_right(dst, src, n, nbits);
3030 }
3031
3032 static inline __attribute__((always_inline)) void bitmap_shift_left(unsigned long *dst,
3033 const unsigned long *src, int n, int nbits)
3034 {
3035 if (nbits <= 32)
3036 *dst = (*src << n) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL );
3037 else
3038 __bitmap_shift_left(dst, src, n, nbits);
3039 }
3040
3041 static inline __attribute__((always_inline)) int bitmap_parse(const char *buf, unsigned int buflen,
3042 unsigned long *maskp, int nmaskbits)
3043 {
3044 return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
3045 }
3046 # 143 "include/linux/cpumask.h" 2
3047
3048 typedef struct cpumask { unsigned long bits[(((1) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; } cpumask_t;
3049 extern cpumask_t _unused_cpumask_arg_;
3050
3051
3052 static inline __attribute__((always_inline)) void __cpu_set(int cpu, volatile cpumask_t *dstp)
3053 {
3054 set_bit(cpu, dstp->bits);
3055 }
3056
3057
3058 static inline __attribute__((always_inline)) void __cpu_clear(int cpu, volatile cpumask_t *dstp)
3059 {
3060 clear_bit(cpu, dstp->bits);
3061 }
3062
3063
3064 static inline __attribute__((always_inline)) void __cpus_setall(cpumask_t *dstp, int nbits)
3065 {
3066 bitmap_fill(dstp->bits, nbits);
3067 }
3068
3069
3070 static inline __attribute__((always_inline)) void __cpus_clear(cpumask_t *dstp, int nbits)
3071 {
3072 bitmap_zero(dstp->bits, nbits);
3073 }
3074
3075
3076
3077
3078
3079 static inline __attribute__((always_inline)) int __cpu_test_and_set(int cpu, cpumask_t *addr)
3080 {
3081 return test_and_set_bit(cpu, addr->bits);
3082 }
3083
3084
3085 static inline __attribute__((always_inline)) void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
3086 const cpumask_t *src2p, int nbits)
3087 {
3088 bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
3089 }
3090
3091
3092 static inline __attribute__((always_inline)) void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
3093 const cpumask_t *src2p, int nbits)
3094 {
3095 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
3096 }
3097
3098
3099 static inline __attribute__((always_inline)) void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
3100 const cpumask_t *src2p, int nbits)
3101 {
3102 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
3103 }
3104
3105
3106
3107 static inline __attribute__((always_inline)) void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
3108 const cpumask_t *src2p, int nbits)
3109 {
3110 bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
3111 }
3112
3113
3114 static inline __attribute__((always_inline)) void __cpus_complement(cpumask_t *dstp,
3115 const cpumask_t *srcp, int nbits)
3116 {
3117 bitmap_complement(dstp->bits, srcp->bits, nbits);
3118 }
3119
3120
3121 static inline __attribute__((always_inline)) int __cpus_equal(const cpumask_t *src1p,
3122 const cpumask_t *src2p, int nbits)
3123 {
3124 return bitmap_equal(src1p->bits, src2p->bits, nbits);
3125 }
3126
3127
3128 static inline __attribute__((always_inline)) int __cpus_intersects(const cpumask_t *src1p,
3129 const cpumask_t *src2p, int nbits)
3130 {
3131 return bitmap_intersects(src1p->bits, src2p->bits, nbits);
3132 }
3133
3134
3135 static inline __attribute__((always_inline)) int __cpus_subset(const cpumask_t *src1p,
3136 const cpumask_t *src2p, int nbits)
3137 {
3138 return bitmap_subset(src1p->bits, src2p->bits, nbits);
3139 }
3140
3141
3142 static inline __attribute__((always_inline)) int __cpus_empty(const cpumask_t *srcp, int nbits)
3143 {
3144 return bitmap_empty(srcp->bits, nbits);
3145 }
3146
3147
3148 static inline __attribute__((always_inline)) int __cpus_full(const cpumask_t *srcp, int nbits)
3149 {
3150 return bitmap_full(srcp->bits, nbits);
3151 }
3152
3153
3154 static inline __attribute__((always_inline)) int __cpus_weight(const cpumask_t *srcp, int nbits)
3155 {
3156 return bitmap_weight(srcp->bits, nbits);
3157 }
3158
3159
3160
3161 static inline __attribute__((always_inline)) void __cpus_shift_right(cpumask_t *dstp,
3162 const cpumask_t *srcp, int n, int nbits)
3163 {
3164 bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
3165 }
3166
3167
3168
3169 static inline __attribute__((always_inline)) void __cpus_shift_left(cpumask_t *dstp,
3170 const cpumask_t *srcp, int n, int nbits)
3171 {
3172 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
3173 }
3174 # 278 "include/linux/cpumask.h"
3175 extern const unsigned long
3176 cpu_bit_bitmap[32 +1][(((1) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))];
3177
3178 static inline __attribute__((always_inline)) const cpumask_t *get_cpu_mask(unsigned int cpu)
3179 {
3180 const unsigned long *p = cpu_bit_bitmap[1 + cpu % 32];
3181 p -= cpu / 32;
3182 return (const cpumask_t *)p;
3183 }
3184 # 344 "include/linux/cpumask.h"
3185 static inline __attribute__((always_inline)) int __cpumask_scnprintf(char *buf, int len,
3186 const cpumask_t *srcp, int nbits)
3187 {
3188 return bitmap_scnprintf(buf, len, srcp->bits, nbits);
3189 }
3190
3191
3192
3193 static inline __attribute__((always_inline)) int __cpumask_parse_user(const char *buf, int len,
3194 cpumask_t *dstp, int nbits)
3195 {
3196 return bitmap_parse_user(buf, len, dstp->bits, nbits);
3197 }
3198
3199
3200
3201 static inline __attribute__((always_inline)) int __cpulist_scnprintf(char *buf, int len,
3202 const cpumask_t *srcp, int nbits)
3203 {
3204 return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
3205 }
3206
3207
3208 static inline __attribute__((always_inline)) int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits)
3209 {
3210 return bitmap_parselist(buf, dstp->bits, nbits);
3211 }
3212
3213
3214
3215 static inline __attribute__((always_inline)) int __cpu_remap(int oldbit,
3216 const cpumask_t *oldp, const cpumask_t *newp, int nbits)
3217 {
3218 return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
3219 }
3220
3221
3222
3223 static inline __attribute__((always_inline)) void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp,
3224 const cpumask_t *oldp, const cpumask_t *newp, int nbits)
3225 {
3226 bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
3227 }
3228
3229
3230
3231 static inline __attribute__((always_inline)) void __cpus_onto(cpumask_t *dstp, const cpumask_t *origp,
3232 const cpumask_t *relmapp, int nbits)
3233 {
3234 bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
3235 }
3236
3237
3238
3239 static inline __attribute__((always_inline)) void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
3240 int sz, int nbits)
3241 {
3242 bitmap_fold(dstp->bits, origp->bits, sz, nbits);
3243 }
3244 # 504 "include/linux/cpumask.h"
3245 extern cpumask_t cpu_possible_map;
3246 extern cpumask_t cpu_online_map;
3247 extern cpumask_t cpu_present_map;
3248 extern cpumask_t cpu_active_map;
3249 # 558 "include/linux/cpumask.h"
3250 static inline __attribute__((always_inline)) unsigned int cpumask_check(unsigned int cpu)
3251 {
3252
3253
3254
3255 return cpu;
3256 }
3257
3258
3259
3260 static inline __attribute__((always_inline)) unsigned int cpumask_first(const struct cpumask *srcp)
3261 {
3262 return 0;
3263 }
3264
3265
3266 static inline __attribute__((always_inline)) unsigned int cpumask_next(int n, const struct cpumask *srcp)
3267 {
3268 return n+1;
3269 }
3270
3271 static inline __attribute__((always_inline)) unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
3272 {
3273 return n+1;
3274 }
3275
3276 static inline __attribute__((always_inline)) unsigned int cpumask_next_and(int n,
3277 const struct cpumask *srcp,
3278 const struct cpumask *andp)
3279 {
3280 return n+1;
3281 }
3282
3283
3284 static inline __attribute__((always_inline)) unsigned int cpumask_any_but(const struct cpumask *mask,
3285 unsigned int cpu)
3286 {
3287 return 1;
3288 }
3289 # 694 "include/linux/cpumask.h"
3290 static inline __attribute__((always_inline)) void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
3291 {
3292 set_bit(cpumask_check(cpu), ((dstp)->bits));
3293 }
3294
3295
3296
3297
3298
3299
3300 static inline __attribute__((always_inline)) void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
3301 {
3302 clear_bit(cpumask_check(cpu), ((dstp)->bits));
3303 }
3304 # 726 "include/linux/cpumask.h"
3305 static inline __attribute__((always_inline)) int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
3306 {
3307 return test_and_set_bit(cpumask_check(cpu), ((cpumask)->bits));
3308 }
3309
3310
3311
3312
3313
3314 static inline __attribute__((always_inline)) void cpumask_setall(struct cpumask *dstp)
3315 {
3316 bitmap_fill(((dstp)->bits), 1);
3317 }
3318
3319
3320
3321
3322
3323 static inline __attribute__((always_inline)) void cpumask_clear(struct cpumask *dstp)
3324 {
3325 bitmap_zero(((dstp)->bits), 1);
3326 }
3327
3328
3329
3330
3331
3332
3333
3334 static inline __attribute__((always_inline)) void cpumask_and(struct cpumask *dstp,
3335 const struct cpumask *src1p,
3336 const struct cpumask *src2p)
3337 {
3338 bitmap_and(((dstp)->bits), ((src1p)->bits),
3339 ((src2p)->bits), 1);
3340 }
3341
3342
3343
3344
3345
3346
3347
3348 static inline __attribute__((always_inline)) void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
3349 const struct cpumask *src2p)
3350 {
3351 bitmap_or(((dstp)->bits), ((src1p)->bits),
3352 ((src2p)->bits), 1);
3353 }
3354
3355
3356
3357
3358
3359
3360
3361 static inline __attribute__((always_inline)) void cpumask_xor(struct cpumask *dstp,
3362 const struct cpumask *src1p,
3363 const struct cpumask *src2p)
3364 {
3365 bitmap_xor(((dstp)->bits), ((src1p)->bits),
3366 ((src2p)->bits), 1);
3367 }
3368
3369
3370
3371
3372
3373
3374
3375 static inline __attribute__((always_inline)) void cpumask_andnot(struct cpumask *dstp,
3376 const struct cpumask *src1p,
3377 const struct cpumask *src2p)
3378 {
3379 bitmap_andnot(((dstp)->bits), ((src1p)->bits),
3380 ((src2p)->bits), 1);
3381 }
3382
3383
3384
3385
3386
3387
3388 static inline __attribute__((always_inline)) void cpumask_complement(struct cpumask *dstp,
3389 const struct cpumask *srcp)
3390 {
3391 bitmap_complement(((dstp)->bits), ((srcp)->bits),
3392 1);
3393 }
3394
3395
3396
3397
3398
3399
3400 static inline __attribute__((always_inline)) bool cpumask_equal(const struct cpumask *src1p,
3401 const struct cpumask *src2p)
3402 {
3403 return bitmap_equal(((src1p)->bits), ((src2p)->bits),
3404 1);
3405 }
3406
3407
3408
3409
3410
3411
3412 static inline __attribute__((always_inline)) bool cpumask_intersects(const struct cpumask *src1p,
3413 const struct cpumask *src2p)
3414 {
3415 return bitmap_intersects(((src1p)->bits), ((src2p)->bits),
3416 1);
3417 }
3418
3419
3420
3421
3422
3423
3424 static inline __attribute__((always_inline)) int cpumask_subset(const struct cpumask *src1p,
3425 const struct cpumask *src2p)
3426 {
3427 return bitmap_subset(((src1p)->bits), ((src2p)->bits),
3428 1);
3429 }
3430
3431
3432
3433
3434
3435 static inline __attribute__((always_inline)) bool cpumask_empty(const struct cpumask *srcp)
3436 {
3437 return bitmap_empty(((srcp)->bits), 1);
3438 }
3439
3440
3441
3442
3443
3444 static inline __attribute__((always_inline)) bool cpumask_full(const struct cpumask *srcp)
3445 {
3446 return bitmap_full(((srcp)->bits), 1);
3447 }
3448
3449
3450
3451
3452
3453 static inline __attribute__((always_inline)) unsigned int cpumask_weight(const struct cpumask *srcp)
3454 {
3455 return bitmap_weight(((srcp)->bits), 1);
3456 }
3457
3458
3459
3460
3461
3462
3463
3464 static inline __attribute__((always_inline)) void cpumask_shift_right(struct cpumask *dstp,
3465 const struct cpumask *srcp, int n)
3466 {
3467 bitmap_shift_right(((dstp)->bits), ((srcp)->bits), n,
3468 1);
3469 }
3470
3471
3472
3473
3474
3475
3476
3477 static inline __attribute__((always_inline)) void cpumask_shift_left(struct cpumask *dstp,
3478 const struct cpumask *srcp, int n)
3479 {
3480 bitmap_shift_left(((dstp)->bits), ((srcp)->bits), n,
3481 1);
3482 }
3483
3484
3485
3486
3487
3488
3489 static inline __attribute__((always_inline)) void cpumask_copy(struct cpumask *dstp,
3490 const struct cpumask *srcp)
3491 {
3492 bitmap_copy(((dstp)->bits), ((srcp)->bits), 1);
3493 }
3494 # 962 "include/linux/cpumask.h"
3495 static inline __attribute__((always_inline)) int __check_is_bitmap(const unsigned long *bitmap)
3496 {
3497 return 1;
3498 }
3499
3500
3501
3502
3503
3504
3505 static inline __attribute__((always_inline)) size_t cpumask_size(void)
3506 {
3507
3508
3509 return (((1) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(long);
3510 }
3511 # 1004 "include/linux/cpumask.h"
3512 typedef struct cpumask cpumask_var_t[1];
3513
3514 static inline __attribute__((always_inline)) bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
3515 {
3516 return true;
3517 }
3518
3519 static inline __attribute__((always_inline)) void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
3520 {
3521 }
3522
3523 static inline __attribute__((always_inline)) void free_cpumask_var(cpumask_var_t mask)
3524 {
3525 }
3526
3527 static inline __attribute__((always_inline)) void free_bootmem_cpumask_var(cpumask_var_t mask)
3528 {
3529 }
3530 # 1032 "include/linux/cpumask.h"
3531 extern const unsigned long cpu_all_bits[(((1) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))];
3532
3533
3534
3535
3536
3537
3538 static inline __attribute__((always_inline)) void set_cpu_possible(unsigned int cpu, bool possible)
3539 {
3540 if (possible)
3541 cpumask_set_cpu(cpu, &cpu_possible_map);
3542 else
3543 cpumask_clear_cpu(cpu, &cpu_possible_map);
3544 }
3545
3546 static inline __attribute__((always_inline)) void set_cpu_present(unsigned int cpu, bool present)
3547 {
3548 if (present)
3549 cpumask_set_cpu(cpu, &cpu_present_map);
3550 else
3551 cpumask_clear_cpu(cpu, &cpu_present_map);
3552 }
3553
3554 static inline __attribute__((always_inline)) void set_cpu_online(unsigned int cpu, bool online)
3555 {
3556 if (online)
3557 cpumask_set_cpu(cpu, &cpu_online_map);
3558 else
3559 cpumask_clear_cpu(cpu, &cpu_online_map);
3560 }
3561
3562 static inline __attribute__((always_inline)) void set_cpu_active(unsigned int cpu, bool active)
3563 {
3564 if (active)
3565 cpumask_set_cpu(cpu, &cpu_active_map);
3566 else
3567 cpumask_clear_cpu(cpu, &cpu_active_map);
3568 }
3569
3570 static inline __attribute__((always_inline)) void init_cpu_present(const struct cpumask *src)
3571 {
3572 cpumask_copy(&cpu_present_map, src);
3573 }
3574
3575 static inline __attribute__((always_inline)) void init_cpu_possible(const struct cpumask *src)
3576 {
3577 cpumask_copy(&cpu_possible_map, src);
3578 }
3579
3580 static inline __attribute__((always_inline)) void init_cpu_online(const struct cpumask *src)
3581 {
3582 cpumask_copy(&cpu_online_map, src);
3583 }
3584 # 8 "include/linux/seq_file.h" 2
3585 # 1 "include/linux/nodemask.h" 1
3586 # 90 "include/linux/nodemask.h"
3587 # 1 "include/linux/numa.h" 1
3588 # 91 "include/linux/nodemask.h" 2
3589
3590 typedef struct { unsigned long bits[((((1 << 0)) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; } nodemask_t;
3591 extern nodemask_t _unused_nodemask_arg_;
3592
3593
3594 static inline __attribute__((always_inline)) void __node_set(int node, volatile nodemask_t *dstp)
3595 {
3596 set_bit(node, dstp->bits);
3597 }
3598
3599
3600 static inline __attribute__((always_inline)) void __node_clear(int node, volatile nodemask_t *dstp)
3601 {
3602 clear_bit(node, dstp->bits);
3603 }
3604
3605
3606 static inline __attribute__((always_inline)) void __nodes_setall(nodemask_t *dstp, int nbits)
3607 {
3608 bitmap_fill(dstp->bits, nbits);
3609 }
3610
3611
3612 static inline __attribute__((always_inline)) void __nodes_clear(nodemask_t *dstp, int nbits)
3613 {
3614 bitmap_zero(dstp->bits, nbits);
3615 }
3616
3617
3618
3619
3620
3621
3622 static inline __attribute__((always_inline)) int __node_test_and_set(int node, nodemask_t *addr)
3623 {
3624 return test_and_set_bit(node, addr->bits);
3625 }
3626
3627
3628
3629 static inline __attribute__((always_inline)) void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
3630 const nodemask_t *src2p, int nbits)
3631 {
3632 bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
3633 }
3634
3635
3636
3637 static inline __attribute__((always_inline)) void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
3638 const nodemask_t *src2p, int nbits)
3639 {
3640 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
3641 }
3642
3643
3644
3645 static inline __attribute__((always_inline)) void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
3646 const nodemask_t *src2p, int nbits)
3647 {
3648 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
3649 }
3650
3651
3652
3653 static inline __attribute__((always_inline)) void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
3654 const nodemask_t *src2p, int nbits)
3655 {
3656 bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
3657 }
3658
3659
3660
3661 static inline __attribute__((always_inline)) void __nodes_complement(nodemask_t *dstp,
3662 const nodemask_t *srcp, int nbits)
3663 {
3664 bitmap_complement(dstp->bits, srcp->bits, nbits);
3665 }
3666
3667
3668
3669 static inline __attribute__((always_inline)) int __nodes_equal(const nodemask_t *src1p,
3670 const nodemask_t *src2p, int nbits)
3671 {
3672 return bitmap_equal(src1p->bits, src2p->bits, nbits);
3673 }
3674
3675
3676
3677 static inline __attribute__((always_inline)) int __nodes_intersects(const nodemask_t *src1p,
3678 const nodemask_t *src2p, int nbits)
3679 {
3680 return bitmap_intersects(src1p->bits, src2p->bits, nbits);
3681 }
3682
3683
3684
3685 static inline __attribute__((always_inline)) int __nodes_subset(const nodemask_t *src1p,
3686 const nodemask_t *src2p, int nbits)
3687 {
3688 return bitmap_subset(src1p->bits, src2p->bits, nbits);
3689 }
3690
3691
3692 static inline __attribute__((always_inline)) int __nodes_empty(const nodemask_t *srcp, int nbits)
3693 {
3694 return bitmap_empty(srcp->bits, nbits);
3695 }
3696
3697
3698 static inline __attribute__((always_inline)) int __nodes_full(const nodemask_t *srcp, int nbits)
3699 {
3700 return bitmap_full(srcp->bits, nbits);
3701 }
3702
3703
3704 static inline __attribute__((always_inline)) int __nodes_weight(const nodemask_t *srcp, int nbits)
3705 {
3706 return bitmap_weight(srcp->bits, nbits);
3707 }
3708
3709
3710
3711 static inline __attribute__((always_inline)) void __nodes_shift_right(nodemask_t *dstp,
3712 const nodemask_t *srcp, int n, int nbits)
3713 {
3714 bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
3715 }
3716
3717
3718
3719 static inline __attribute__((always_inline)) void __nodes_shift_left(nodemask_t *dstp,
3720 const nodemask_t *srcp, int n, int nbits)
3721 {
3722 bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
3723 }
3724
3725
3726
3727
3728
3729 static inline __attribute__((always_inline)) int __first_node(const nodemask_t *srcp)
3730 {
3731 return ({ int __min1 = ((1 << 0)); int __min2 = (find_next_bit((srcp->bits), ((1 << 0)), 0)); __min1 < __min2 ? __min1: __min2; });
3732 }
3733
3734
3735 static inline __attribute__((always_inline)) int __next_node(int n, const nodemask_t *srcp)
3736 {
3737 return ({ int __min1 = ((1 << 0)); int __min2 = (find_next_bit(srcp->bits, (1 << 0), n+1)); __min1 < __min2 ? __min1: __min2; });
3738 }
3739 # 255 "include/linux/nodemask.h"
3740 static inline __attribute__((always_inline)) int __first_unset_node(const nodemask_t *maskp)
3741 {
3742 return ({ int __min1 = ((1 << 0)); int __min2 = (find_next_zero_bit((maskp->bits), ((1 << 0)), 0)); __min1 < __min2 ? __min1: __min2; });
3743
3744 }
3745 # 289 "include/linux/nodemask.h"
3746 static inline __attribute__((always_inline)) int __nodemask_scnprintf(char *buf, int len,
3747 const nodemask_t *srcp, int nbits)
3748 {
3749 return bitmap_scnprintf(buf, len, srcp->bits, nbits);
3750 }
3751
3752
3753
3754 static inline __attribute__((always_inline)) int __nodemask_parse_user(const char *buf, int len,
3755 nodemask_t *dstp, int nbits)
3756 {
3757 return bitmap_parse_user(buf, len, dstp->bits, nbits);
3758 }
3759
3760
3761
3762 static inline __attribute__((always_inline)) int __nodelist_scnprintf(char *buf, int len,
3763 const nodemask_t *srcp, int nbits)
3764 {
3765 return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
3766 }
3767
3768
3769 static inline __attribute__((always_inline)) int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
3770 {
3771 return bitmap_parselist(buf, dstp->bits, nbits);
3772 }
3773
3774
3775
3776 static inline __attribute__((always_inline)) int __node_remap(int oldbit,
3777 const nodemask_t *oldp, const nodemask_t *newp, int nbits)
3778 {
3779 return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
3780 }
3781
3782
3783
3784 static inline __attribute__((always_inline)) void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
3785 const nodemask_t *oldp, const nodemask_t *newp, int nbits)
3786 {
3787 bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
3788 }
3789
3790
3791
3792 static inline __attribute__((always_inline)) void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
3793 const nodemask_t *relmapp, int nbits)
3794 {
3795 bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
3796 }
3797
3798
3799
3800 static inline __attribute__((always_inline)) void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
3801 int sz, int nbits)
3802 {
3803 bitmap_fold(dstp->bits, origp->bits, sz, nbits);
3804 }
3805 # 363 "include/linux/nodemask.h"
3806 enum node_states {
3807 N_POSSIBLE,
3808 N_ONLINE,
3809 N_NORMAL_MEMORY,
3810
3811
3812
3813 N_HIGH_MEMORY = N_NORMAL_MEMORY,
3814
3815 N_CPU,
3816 NR_NODE_STATES
3817 };
3818
3819
3820
3821
3822
3823
3824 extern nodemask_t node_states[NR_NODE_STATES];
3825 # 413 "include/linux/nodemask.h"
3826 static inline __attribute__((always_inline)) int node_state(int node, enum node_states state)
3827 {
3828 return node == 0;
3829 }
3830
3831 static inline __attribute__((always_inline)) void node_set_state(int node, enum node_states state)
3832 {
3833 }
3834
3835 static inline __attribute__((always_inline)) void node_clear_state(int node, enum node_states state)
3836 {
3837 }
3838
3839 static inline __attribute__((always_inline)) int num_node_state(enum node_states state)
3840 {
3841 return 1;
3842 }
3843 # 9 "include/linux/seq_file.h" 2
3844
3845 struct seq_operations;
3846 struct file;
3847 struct path;
3848 struct inode;
3849 struct dentry;
3850
3851 struct seq_file {
3852 char *buf;
3853 size_t size;
3854 size_t from;
3855 size_t count;
3856 loff_t index;
3857 u64 version;
3858 struct mutex lock;
3859 const struct seq_operations *op;
3860 void *private;
3861 };
3862
3863 struct seq_operations {
3864 void * (*start) (struct seq_file *m, loff_t *pos);
3865 void (*stop) (struct seq_file *m, void *v);
3866 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
3867 int (*show) (struct seq_file *m, void *v);
3868 };
3869
3870
3871
3872 int seq_open(struct file *, const struct seq_operations *);
3873 ssize_t seq_read(struct file *, char *, size_t, loff_t *);
3874 loff_t seq_lseek(struct file *, loff_t, int);
3875 int seq_release(struct inode *, struct file *);
3876 int seq_escape(struct seq_file *, const char *, const char *);
3877 int seq_putc(struct seq_file *m, char c);
3878 int seq_puts(struct seq_file *m, const char *s);
3879
3880 int seq_printf(struct seq_file *, const char *, ...)
3881 __attribute__ ((format (printf,2,3)));
3882
3883 int seq_path(struct seq_file *, struct path *, char *);
3884 int seq_dentry(struct seq_file *, struct dentry *, char *);
3885 int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
3886 char *esc);
3887 int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits);
3888 static inline __attribute__((always_inline)) int seq_cpumask(struct seq_file *m, cpumask_t *mask)
3889 {
3890 return seq_bitmap(m, mask->bits, 1);
3891 }
3892
3893 static inline __attribute__((always_inline)) int seq_nodemask(struct seq_file *m, nodemask_t *mask)
3894 {
3895 return seq_bitmap(m, mask->bits, (1 << 0));
3896 }
3897
3898 int seq_bitmap_list(struct seq_file *m, unsigned long *bits,
3899 unsigned int nr_bits);
3900
3901 static inline __attribute__((always_inline)) int seq_cpumask_list(struct seq_file *m, cpumask_t *mask)
3902 {
3903 return seq_bitmap_list(m, mask->bits, 1);
3904 }
3905
3906 static inline __attribute__((always_inline)) int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
3907 {
3908 return seq_bitmap_list(m, mask->bits, (1 << 0));
3909 }
3910
3911 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
3912 int single_release(struct inode *, struct file *);
3913 void *__seq_open_private(struct file *, const struct seq_operations *, int);
3914 int seq_open_private(struct file *, const struct seq_operations *, int);
3915 int seq_release_private(struct inode *, struct file *);
3916
3917
3918
3919
3920
3921
3922
3923 extern struct list_head *seq_list_start(struct list_head *head,
3924 loff_t pos);
3925 extern struct list_head *seq_list_start_head(struct list_head *head,
3926 loff_t pos);
3927 extern struct list_head *seq_list_next(void *v, struct list_head *head,
3928 loff_t *ppos);
3929 # 17 "kernel/trace/trace.c" 2
3930 # 1 "include/linux/notifier.h" 1
3931 # 14 "include/linux/notifier.h"
3932 # 1 "include/linux/rwsem.h" 1
3933 # 17 "include/linux/rwsem.h"
3934 struct rw_semaphore;
3935
3936
3937 # 1 "include/linux/rwsem-spinlock.h" 1
3938 # 15 "include/linux/rwsem-spinlock.h"
3939 # 1 "include/linux/spinlock.h" 1
3940 # 50 "include/linux/spinlock.h"
3941 # 1 "include/linux/preempt.h" 1
3942 # 9 "include/linux/preempt.h"
3943 # 1 "include/linux/thread_info.h" 1
3944 # 12 "include/linux/thread_info.h"
3945 struct timespec;
3946 struct compat_timespec;
3947
3948
3949
3950
3951 struct restart_block {
3952 long (*fn)(struct restart_block *);
3953 union {
3954 struct {
3955 unsigned long arg0, arg1, arg2, arg3;
3956 };
3957
3958 struct {
3959 u32 *uaddr;
3960 u32 val;
3961 u32 flags;
3962 u32 bitset;
3963 u64 time;
3964 } futex;
3965
3966 struct {
3967 clockid_t index;
3968 struct timespec *rmtp;
3969
3970
3971
3972 u64 expires;
3973 } nanosleep;
3974
3975 struct {
3976 struct pollfd *ufds;
3977 int nfds;
3978 int has_timeout;
3979 unsigned long tv_sec;
3980 unsigned long tv_nsec;
3981 } poll;
3982 };
3983 };
3984
3985 extern long do_no_restart_syscall(struct restart_block *parm);
3986
3987
3988 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/thread_info.h" 1
3989 # 30 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/thread_info.h"
3990 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h" 1
3991 # 14 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h"
3992 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/setup.h" 1
3993 # 15 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h" 2
3994 # 30 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h"
3995 typedef struct {
3996 unsigned long pte;
3997 } pte_t;
3998 typedef struct {
3999 unsigned long pmd[16];
4000 } pmd_t;
4001 typedef struct {
4002 unsigned long pgd;
4003 } pgd_t;
4004 typedef struct {
4005 unsigned long pgprot;
4006 } pgprot_t;
4007 typedef struct page *pgtable_t;
4008 # 54 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h"
4009 extern unsigned long memory_start;
4010 extern unsigned long memory_end;
4011
4012
4013
4014 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page_offset.h" 1
4015 # 60 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h" 2
4016 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/io.h" 1
4017 # 23 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/io.h"
4018 static inline __attribute__((always_inline)) unsigned char readb(const volatile void *addr)
4019 {
4020 unsigned int val;
4021 int tmp;
4022
4023 __asm__ __volatile__ ("cli %1;\n\t"
4024 "NOP; NOP; SSYNC;\n\t"
4025 "%0 = b [%2] (z);\n\t"
4026 "sti %1;\n\t"
4027 : "=d"(val), "=d"(tmp): "a"(addr)
4028 );
4029
4030 return (unsigned char) val;
4031 }
4032
4033 static inline __attribute__((always_inline)) unsigned short readw(const volatile void *addr)
4034 {
4035 unsigned int val;
4036 int tmp;
4037
4038 __asm__ __volatile__ ("cli %1;\n\t"
4039 "NOP; NOP; SSYNC;\n\t"
4040 "%0 = w [%2] (z);\n\t"
4041 "sti %1;\n\t"
4042 : "=d"(val), "=d"(tmp): "a"(addr)
4043 );
4044
4045 return (unsigned short) val;
4046 }
4047
4048 static inline __attribute__((always_inline)) unsigned int readl(const volatile void *addr)
4049 {
4050 unsigned int val;
4051 int tmp;
4052
4053 __asm__ __volatile__ ("cli %1;\n\t"
4054 "NOP; NOP; SSYNC;\n\t"
4055 "%0 = [%2];\n\t"
4056 "sti %1;\n\t"
4057 : "=d"(val), "=d"(tmp): "a"(addr)
4058 );
4059 return val;
4060 }
4061 # 120 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/io.h"
4062 extern void outsb(unsigned long port, const void *addr, unsigned long count);
4063 extern void outsw(unsigned long port, const void *addr, unsigned long count);
4064 extern void outsw_8(unsigned long port, const void *addr, unsigned long count);
4065 extern void outsl(unsigned long port, const void *addr, unsigned long count);
4066
4067 extern void insb(unsigned long port, void *addr, unsigned long count);
4068 extern void insw(unsigned long port, void *addr, unsigned long count);
4069 extern void insw_8(unsigned long port, void *addr, unsigned long count);
4070 extern void insl(unsigned long port, void *addr, unsigned long count);
4071 extern void insl_16(unsigned long port, void *addr, unsigned long count);
4072
4073 extern void dma_outsb(unsigned long port, const void *addr, unsigned short count);
4074 extern void dma_outsw(unsigned long port, const void *addr, unsigned short count);
4075 extern void dma_outsl(unsigned long port, const void *addr, unsigned short count);
4076
4077 extern void dma_insb(unsigned long port, void *addr, unsigned short count);
4078 extern void dma_insw(unsigned long port, void *addr, unsigned short count);
4079 extern void dma_insl(unsigned long port, void *addr, unsigned short count);
4080
4081 static inline __attribute__((always_inline)) void readsl(const void *addr, void *buf, int len)
4082 {
4083 insl((unsigned long)addr, buf, len);
4084 }
4085
4086 static inline __attribute__((always_inline)) void readsw(const void *addr, void *buf, int len)
4087 {
4088 insw((unsigned long)addr, buf, len);
4089 }
4090
4091 static inline __attribute__((always_inline)) void readsb(const void *addr, void *buf, int len)
4092 {
4093 insb((unsigned long)addr, buf, len);
4094 }
4095
4096 static inline __attribute__((always_inline)) void writesl(const void *addr, const void *buf, int len)
4097 {
4098 outsl((unsigned long)addr, buf, len);
4099 }
4100
4101 static inline __attribute__((always_inline)) void writesw(const void *addr, const void *buf, int len)
4102 {
4103 outsw((unsigned long)addr, buf, len);
4104 }
4105
4106 static inline __attribute__((always_inline)) void writesb(const void *addr, const void *buf, int len)
4107 {
4108 outsb((unsigned long)addr, buf, len);
4109 }
4110
4111
4112
4113
4114 static inline __attribute__((always_inline)) void *__ioremap(unsigned long physaddr, unsigned long size,
4115 int cacheflag)
4116 {
4117 return (void *)physaddr;
4118 }
4119
4120
4121
4122
4123 static inline __attribute__((always_inline)) void iounmap(void *addr)
4124 {
4125 }
4126
4127
4128
4129
4130
4131
4132 static inline __attribute__((always_inline)) void __iounmap(void *addr, unsigned long size)
4133 {
4134 }
4135
4136
4137
4138
4139
4140
4141 static inline __attribute__((always_inline)) void kernel_set_cachemode(void *addr, unsigned long size,
4142 int cmode)
4143 {
4144 }
4145
4146 static inline __attribute__((always_inline)) void *ioremap(unsigned long physaddr, unsigned long size)
4147 {
4148 return __ioremap(physaddr, size, 1);
4149 }
4150 static inline __attribute__((always_inline)) void *ioremap_nocache(unsigned long physaddr,
4151 unsigned long size)
4152 {
4153 return __ioremap(physaddr, size, 1);
4154 }
4155
4156 extern void blkfin_inv_cache_all(void);
4157 # 61 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h" 2
4158 # 84 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h"
4159 # 1 "include/asm-generic/page.h" 1
4160 # 9 "include/asm-generic/page.h"
4161 static __inline__ __attribute__((always_inline)) __attribute__((__const__)) int get_order(unsigned long size)
4162 {
4163 int order;
4164
4165 size = (size - 1) >> (12 - 1);
4166 order = -1;
4167 do {
4168 size >>= 1;
4169 order++;
4170 } while (size);
4171 return order;
4172 }
4173 # 85 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h" 2
4174 # 31 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/thread_info.h" 2
4175 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/entry.h" 1
4176 # 32 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/thread_info.h" 2
4177 # 51 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/thread_info.h"
4178 typedef unsigned long mm_segment_t;
4179
4180
4181
4182
4183
4184
4185 struct thread_info {
4186 struct task_struct *task;
4187 struct exec_domain *exec_domain;
4188 unsigned long flags;
4189 int cpu;
4190 int preempt_count;
4191 mm_segment_t addr_limit;
4192 struct restart_block restart_block;
4193
4194 struct l1_scratch_task_info l1_task_info;
4195
4196 };
4197 # 92 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/thread_info.h"
4198 __attribute__((__const__))
4199 static inline __attribute__((always_inline)) struct thread_info *current_thread_info(void)
4200 {
4201 struct thread_info *ti;
4202 __asm__("%0 = sp;" : "=da"(ti) :
4203 );
4204 return (struct thread_info *)((long)ti & ~((long)8192 -1));
4205 }
4206 # 56 "include/linux/thread_info.h" 2
4207 # 64 "include/linux/thread_info.h"
4208 static inline __attribute__((always_inline)) void set_ti_thread_flag(struct thread_info *ti, int flag)
4209 {
4210 set_bit(flag, (unsigned long *)&ti->flags);
4211 }
4212
4213 static inline __attribute__((always_inline)) void clear_ti_thread_flag(struct thread_info *ti, int flag)
4214 {
4215 clear_bit(flag, (unsigned long *)&ti->flags);
4216 }
4217
4218 static inline __attribute__((always_inline)) int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
4219 {
4220 return test_and_set_bit(flag, (unsigned long *)&ti->flags);
4221 }
4222
4223 static inline __attribute__((always_inline)) int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
4224 {
4225 return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
4226 }
4227
4228 static inline __attribute__((always_inline)) int test_ti_thread_flag(struct thread_info *ti, int flag)
4229 {
4230 return test_bit(flag, (unsigned long *)&ti->flags);
4231 }
4232 # 121 "include/linux/thread_info.h"
4233 static inline __attribute__((always_inline)) void set_restore_sigmask(void)
4234 {
4235 set_ti_thread_flag(current_thread_info(), 5);
4236 set_ti_thread_flag(current_thread_info(), 1);
4237 }
4238 # 10 "include/linux/preempt.h" 2
4239 # 51 "include/linux/spinlock.h" 2
4240
4241
4242
4243
4244 # 1 "include/linux/stringify.h" 1
4245 # 56 "include/linux/spinlock.h" 2
4246 # 1 "include/linux/bottom_half.h" 1
4247
4248
4249
4250 extern void local_bh_disable(void);
4251 extern void __local_bh_enable(void);
4252 extern void _local_bh_enable(void);
4253 extern void local_bh_enable(void);
4254 extern void local_bh_enable_ip(unsigned long ip);
4255 # 57 "include/linux/spinlock.h" 2
4256 # 82 "include/linux/spinlock.h"
4257 extern int __attribute__((section(".spinlock.text"))) generic__raw_read_trylock(raw_rwlock_t *lock);
4258
4259
4260
4261
4262
4263
4264
4265 # 1 "include/linux/spinlock_up.h" 1
4266 # 23 "include/linux/spinlock_up.h"
4267 static inline __attribute__((always_inline)) void __raw_spin_lock(raw_spinlock_t *lock)
4268 {
4269 lock->slock = 0;
4270 }
4271
4272 static inline __attribute__((always_inline)) void
4273 __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
4274 {
4275 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
4276 lock->slock = 0;
4277 }
4278
4279 static inline __attribute__((always_inline)) int __raw_spin_trylock(raw_spinlock_t *lock)
4280 {
4281 char oldval = lock->slock;
4282
4283 lock->slock = 0;
4284
4285 return oldval > 0;
4286 }
4287
4288 static inline __attribute__((always_inline)) void __raw_spin_unlock(raw_spinlock_t *lock)
4289 {
4290 lock->slock = 1;
4291 }
4292 # 91 "include/linux/spinlock.h" 2
4293
4294
4295
4296 extern void __spin_lock_init(spinlock_t *lock, const char *name,
4297 struct lock_class_key *key);
4298 # 109 "include/linux/spinlock.h"
4299 extern void __rwlock_init(rwlock_t *lock, const char *name,
4300 struct lock_class_key *key);
4301 # 140 "include/linux/spinlock.h"
4302 # 1 "include/linux/spinlock_api_smp.h" 1
4303 # 18 "include/linux/spinlock_api_smp.h"
4304 int in_lock_functions(unsigned long addr);
4305
4306
4307
4308 void __attribute__((section(".spinlock.text"))) _spin_lock(spinlock_t *lock) ;
4309 void __attribute__((section(".spinlock.text"))) _spin_lock_nested(spinlock_t *lock, int subclass)
4310 ;
4311 void __attribute__((section(".spinlock.text"))) _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
4312 ;
4313 void __attribute__((section(".spinlock.text"))) _read_lock(rwlock_t *lock) ;
4314 void __attribute__((section(".spinlock.text"))) _write_lock(rwlock_t *lock) ;
4315 void __attribute__((section(".spinlock.text"))) _spin_lock_bh(spinlock_t *lock) ;
4316 void __attribute__((section(".spinlock.text"))) _read_lock_bh(rwlock_t *lock) ;
4317 void __attribute__((section(".spinlock.text"))) _write_lock_bh(rwlock_t *lock) ;
4318 void __attribute__((section(".spinlock.text"))) _spin_lock_irq(spinlock_t *lock) ;
4319 void __attribute__((section(".spinlock.text"))) _read_lock_irq(rwlock_t *lock) ;
4320 void __attribute__((section(".spinlock.text"))) _write_lock_irq(rwlock_t *lock) ;
4321 unsigned long __attribute__((section(".spinlock.text"))) _spin_lock_irqsave(spinlock_t *lock)
4322 ;
4323 unsigned long __attribute__((section(".spinlock.text"))) _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
4324 ;
4325 unsigned long __attribute__((section(".spinlock.text"))) _read_lock_irqsave(rwlock_t *lock)
4326 ;
4327 unsigned long __attribute__((section(".spinlock.text"))) _write_lock_irqsave(rwlock_t *lock)
4328 ;
4329 int __attribute__((section(".spinlock.text"))) _spin_trylock(spinlock_t *lock);
4330 int __attribute__((section(".spinlock.text"))) _read_trylock(rwlock_t *lock);
4331 int __attribute__((section(".spinlock.text"))) _write_trylock(rwlock_t *lock);
4332 int __attribute__((section(".spinlock.text"))) _spin_trylock_bh(spinlock_t *lock);
4333 void __attribute__((section(".spinlock.text"))) _spin_unlock(spinlock_t *lock) ;
4334 void __attribute__((section(".spinlock.text"))) _read_unlock(rwlock_t *lock) ;
4335 void __attribute__((section(".spinlock.text"))) _write_unlock(rwlock_t *lock) ;
4336 void __attribute__((section(".spinlock.text"))) _spin_unlock_bh(spinlock_t *lock) ;
4337 void __attribute__((section(".spinlock.text"))) _read_unlock_bh(rwlock_t *lock) ;
4338 void __attribute__((section(".spinlock.text"))) _write_unlock_bh(rwlock_t *lock) ;
4339 void __attribute__((section(".spinlock.text"))) _spin_unlock_irq(spinlock_t *lock) ;
4340 void __attribute__((section(".spinlock.text"))) _read_unlock_irq(rwlock_t *lock) ;
4341 void __attribute__((section(".spinlock.text"))) _write_unlock_irq(rwlock_t *lock) ;
4342 void __attribute__((section(".spinlock.text"))) _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
4343 ;
4344 void __attribute__((section(".spinlock.text"))) _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
4345 ;
4346 void __attribute__((section(".spinlock.text"))) _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
4347 ;
4348 # 141 "include/linux/spinlock.h" 2
4349
4350
4351
4352
4353
4354 extern void _raw_spin_lock(spinlock_t *lock);
4355
4356 extern int _raw_spin_trylock(spinlock_t *lock);
4357 extern void _raw_spin_unlock(spinlock_t *lock);
4358 extern void _raw_read_lock(rwlock_t *lock);
4359 extern int _raw_read_trylock(rwlock_t *lock);
4360 extern void _raw_read_unlock(rwlock_t *lock);
4361 extern void _raw_write_lock(rwlock_t *lock);
4362 extern int _raw_write_trylock(rwlock_t *lock);
4363 extern void _raw_write_unlock(rwlock_t *lock);
4364 # 357 "include/linux/spinlock.h"
4365 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
4366 # 16 "include/linux/rwsem-spinlock.h" 2
4367
4368
4369
4370
4371
4372
4373 struct rwsem_waiter;
4374 # 31 "include/linux/rwsem-spinlock.h"
4375 struct rw_semaphore {
4376 __s32 activity;
4377 spinlock_t wait_lock;
4378 struct list_head wait_list;
4379
4380
4381
4382 };
4383 # 53 "include/linux/rwsem-spinlock.h"
4384 extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
4385 struct lock_class_key *key);
4386 # 63 "include/linux/rwsem-spinlock.h"
4387 extern void __down_read(struct rw_semaphore *sem);
4388 extern int __down_read_trylock(struct rw_semaphore *sem);
4389 extern void __down_write(struct rw_semaphore *sem);
4390 extern void __down_write_nested(struct rw_semaphore *sem, int subclass);
4391 extern int __down_write_trylock(struct rw_semaphore *sem);
4392 extern void __up_read(struct rw_semaphore *sem);
4393 extern void __up_write(struct rw_semaphore *sem);
4394 extern void __downgrade_write(struct rw_semaphore *sem);
4395
4396 static inline __attribute__((always_inline)) int rwsem_is_locked(struct rw_semaphore *sem)
4397 {
4398 return (sem->activity != 0);
4399 }
4400 # 21 "include/linux/rwsem.h" 2
4401
4402
4403
4404
4405
4406
4407
4408 extern void down_read(struct rw_semaphore *sem);
4409
4410
4411
4412
4413 extern int down_read_trylock(struct rw_semaphore *sem);
4414
4415
4416
4417
4418 extern void down_write(struct rw_semaphore *sem);
4419
4420
4421
4422
4423 extern int down_write_trylock(struct rw_semaphore *sem);
4424
4425
4426
4427
4428 extern void up_read(struct rw_semaphore *sem);
4429
4430
4431
4432
4433 extern void up_write(struct rw_semaphore *sem);
4434
4435
4436
4437
4438 extern void downgrade_write(struct rw_semaphore *sem);
4439 # 15 "include/linux/notifier.h" 2
4440 # 1 "include/linux/srcu.h" 1
4441 # 30 "include/linux/srcu.h"
4442 struct srcu_struct_array {
4443 int c[2];
4444 };
4445
4446 struct srcu_struct {
4447 int completed;
4448 struct srcu_struct_array *per_cpu_ref;
4449 struct mutex mutex;
4450 };
4451
4452
4453
4454
4455
4456
4457
4458 int init_srcu_struct(struct srcu_struct *sp);
4459 void cleanup_srcu_struct(struct srcu_struct *sp);
4460 int srcu_read_lock(struct srcu_struct *sp) ;
4461 void srcu_read_unlock(struct srcu_struct *sp, int idx) ;
4462 void synchronize_srcu(struct srcu_struct *sp);
4463 long srcu_batches_completed(struct srcu_struct *sp);
4464 # 16 "include/linux/notifier.h" 2
4465 # 50 "include/linux/notifier.h"
4466 struct notifier_block {
4467 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
4468 struct notifier_block *next;
4469 int priority;
4470 };
4471
4472 struct atomic_notifier_head {
4473 spinlock_t lock;
4474 struct notifier_block *head;
4475 };
4476
4477 struct blocking_notifier_head {
4478 struct rw_semaphore rwsem;
4479 struct notifier_block *head;
4480 };
4481
4482 struct raw_notifier_head {
4483 struct notifier_block *head;
4484 };
4485
4486 struct srcu_notifier_head {
4487 struct mutex mutex;
4488 struct srcu_struct srcu;
4489 struct notifier_block *head;
4490 };
4491 # 89 "include/linux/notifier.h"
4492 extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
4493 # 115 "include/linux/notifier.h"
4494 extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
4495 struct notifier_block *nb);
4496 extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
4497 struct notifier_block *nb);
4498 extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
4499 struct notifier_block *nb);
4500 extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
4501 struct notifier_block *nb);
4502
4503 extern int blocking_notifier_chain_cond_register(
4504 struct blocking_notifier_head *nh,
4505 struct notifier_block *nb);
4506
4507 extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
4508 struct notifier_block *nb);
4509 extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
4510 struct notifier_block *nb);
4511 extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
4512 struct notifier_block *nb);
4513 extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
4514 struct notifier_block *nb);
4515
4516 extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
4517 unsigned long val, void *v);
4518 extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
4519 unsigned long val, void *v, int nr_to_call, int *nr_calls);
4520 extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
4521 unsigned long val, void *v);
4522 extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
4523 unsigned long val, void *v, int nr_to_call, int *nr_calls);
4524 extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
4525 unsigned long val, void *v);
4526 extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
4527 unsigned long val, void *v, int nr_to_call, int *nr_calls);
4528 extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
4529 unsigned long val, void *v);
4530 extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
4531 unsigned long val, void *v, int nr_to_call, int *nr_calls);
4532 # 165 "include/linux/notifier.h"
4533 static inline __attribute__((always_inline)) int notifier_from_errno(int err)
4534 {
4535 return 0x8000 | (0x0001 - err);
4536 }
4537
4538
4539 static inline __attribute__((always_inline)) int notifier_to_errno(int ret)
4540 {
4541 ret &= ~0x8000;
4542 return ret > 0x0001 ? 0x0001 - ret : 0;
4543 }
4544 # 258 "include/linux/notifier.h"
4545 extern struct blocking_notifier_head reboot_notifier_list;
4546 # 18 "kernel/trace/trace.c" 2
4547 # 1 "include/linux/debugfs.h" 1
4548 # 18 "include/linux/debugfs.h"
4549 # 1 "include/linux/fs.h" 1
4550 # 9 "include/linux/fs.h"
4551 # 1 "include/linux/limits.h" 1
4552 # 10 "include/linux/fs.h" 2
4553 # 1 "include/linux/ioctl.h" 1
4554
4555
4556
4557 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ioctl.h" 1
4558 # 1 "include/asm-generic/ioctl.h" 1
4559 # 73 "include/asm-generic/ioctl.h"
4560 extern unsigned int __invalid_size_argument_for_IOC;
4561 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ioctl.h" 2
4562 # 5 "include/linux/ioctl.h" 2
4563 # 11 "include/linux/fs.h" 2
4564 # 24 "include/linux/fs.h"
4565 extern int sysctl_nr_open;
4566 # 36 "include/linux/fs.h"
4567 struct files_stat_struct {
4568 int nr_files;
4569 int nr_free_files;
4570 int max_files;
4571 };
4572 extern struct files_stat_struct files_stat;
4573 extern int get_max_files(void);
4574
4575 struct inodes_stat_t {
4576 int nr_inodes;
4577 int nr_unused;
4578 int dummy[5];
4579 };
4580 extern struct inodes_stat_t inodes_stat;
4581
4582 extern int leases_enable, lease_break_time;
4583
4584
4585 extern int dir_notify_enable;
4586 # 288 "include/linux/fs.h"
4587 # 1 "include/linux/wait.h" 1
4588 # 26 "include/linux/wait.h"
4589 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/current.h" 1
4590 # 13 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/current.h"
4591 struct task_struct;
4592
4593 static inline __attribute__((always_inline)) struct task_struct *get_current(void) __attribute__ ((__const__));
4594 static inline __attribute__((always_inline)) struct task_struct *get_current(void)
4595 {
4596 return (current_thread_info()->task);
4597 }
4598 # 27 "include/linux/wait.h" 2
4599
4600 typedef struct __wait_queue wait_queue_t;
4601 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync, void *key);
4602 int default_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
4603
4604 struct __wait_queue {
4605 unsigned int flags;
4606
4607 void *private;
4608 wait_queue_func_t func;
4609 struct list_head task_list;
4610 };
4611
4612 struct wait_bit_key {
4613 void *flags;
4614 int bit_nr;
4615 };
4616
4617 struct wait_bit_queue {
4618 struct wait_bit_key key;
4619 wait_queue_t wait;
4620 };
4621
4622 struct __wait_queue_head {
4623 spinlock_t lock;
4624 struct list_head task_list;
4625 };
4626 typedef struct __wait_queue_head wait_queue_head_t;
4627
4628 struct task_struct;
4629 # 80 "include/linux/wait.h"
4630 extern void init_waitqueue_head(wait_queue_head_t *q);
4631 # 91 "include/linux/wait.h"
4632 static inline __attribute__((always_inline)) void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
4633 {
4634 q->flags = 0;
4635 q->private = p;
4636 q->func = default_wake_function;
4637 }
4638
4639 static inline __attribute__((always_inline)) void init_waitqueue_func_entry(wait_queue_t *q,
4640 wait_queue_func_t func)
4641 {
4642 q->flags = 0;
4643 q->private = ((void *)0);
4644 q->func = func;
4645 }
4646
4647 static inline __attribute__((always_inline)) int waitqueue_active(wait_queue_head_t *q)
4648 {
4649 return !list_empty(&q->task_list);
4650 }
4651
4652 extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
4653 extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
4654 extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
4655
4656 static inline __attribute__((always_inline)) void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
4657 {
4658 list_add(&new->task_list, &head->task_list);
4659 }
4660
4661
4662
4663
4664 static inline __attribute__((always_inline)) void __add_wait_queue_tail(wait_queue_head_t *head,
4665 wait_queue_t *new)
4666 {
4667 list_add_tail(&new->task_list, &head->task_list);
4668 }
4669
4670 static inline __attribute__((always_inline)) void __remove_wait_queue(wait_queue_head_t *head,
4671 wait_queue_t *old)
4672 {
4673 list_del(&old->task_list);
4674 }
4675
4676 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
4677 extern void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
4678 extern void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
4679 void __wake_up_bit(wait_queue_head_t *, void *, int);
4680 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
4681 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
4682 void wake_up_bit(void *, int);
4683 int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
4684 int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
4685 wait_queue_head_t *bit_waitqueue(void *, int);
4686 # 400 "include/linux/wait.h"
4687 static inline __attribute__((always_inline)) void add_wait_queue_exclusive_locked(wait_queue_head_t *q,
4688 wait_queue_t * wait)
4689 {
4690 wait->flags |= 0x01;
4691 __add_wait_queue_tail(q, wait);
4692 }
4693
4694
4695
4696
4697 static inline __attribute__((always_inline)) void remove_wait_queue_locked(wait_queue_head_t *q,
4698 wait_queue_t * wait)
4699 {
4700 __remove_wait_queue(q, wait);
4701 }
4702
4703
4704
4705
4706
4707
4708 extern void sleep_on(wait_queue_head_t *q);
4709 extern long sleep_on_timeout(wait_queue_head_t *q,
4710 signed long timeout);
4711 extern void interruptible_sleep_on(wait_queue_head_t *q);
4712 extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
4713 signed long timeout);
4714
4715
4716
4717
4718 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
4719 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
4720 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
4721 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
4722 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
4723 # 476 "include/linux/wait.h"
4724 static inline __attribute__((always_inline)) int wait_on_bit(void *word, int bit,
4725 int (*action)(void *), unsigned mode)
4726 {
4727 if (!test_bit(bit, word))
4728 return 0;
4729 return out_of_line_wait_on_bit(word, bit, action, mode);
4730 }
4731 # 500 "include/linux/wait.h"
4732 static inline __attribute__((always_inline)) int wait_on_bit_lock(void *word, int bit,
4733 int (*action)(void *), unsigned mode)
4734 {
4735 if (!test_and_set_bit(bit, word))
4736 return 0;
4737 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
4738 }
4739 # 289 "include/linux/fs.h" 2
4740
4741 # 1 "include/linux/kdev_t.h" 1
4742 # 21 "include/linux/kdev_t.h"
4743 static inline __attribute__((always_inline)) int old_valid_dev(dev_t dev)
4744 {
4745 return ((unsigned int) ((dev) >> 20)) < 256 && ((unsigned int) ((dev) & ((1U << 20) - 1))) < 256;
4746 }
4747
4748 static inline __attribute__((always_inline)) u16 old_encode_dev(dev_t dev)
4749 {
4750 return (((unsigned int) ((dev) >> 20)) << 8) | ((unsigned int) ((dev) & ((1U << 20) - 1)));
4751 }
4752
4753 static inline __attribute__((always_inline)) dev_t old_decode_dev(u16 val)
4754 {
4755 return ((((val >> 8) & 255) << 20) | (val & 255));
4756 }
4757
4758 static inline __attribute__((always_inline)) int new_valid_dev(dev_t dev)
4759 {
4760 return 1;
4761 }
4762
4763 static inline __attribute__((always_inline)) u32 new_encode_dev(dev_t dev)
4764 {
4765 unsigned major = ((unsigned int) ((dev) >> 20));
4766 unsigned minor = ((unsigned int) ((dev) & ((1U << 20) - 1)));
4767 return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
4768 }
4769
4770 static inline __attribute__((always_inline)) dev_t new_decode_dev(u32 dev)
4771 {
4772 unsigned major = (dev & 0xfff00) >> 8;
4773 unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
4774 return (((major) << 20) | (minor));
4775 }
4776
4777 static inline __attribute__((always_inline)) int huge_valid_dev(dev_t dev)
4778 {
4779 return 1;
4780 }
4781
4782 static inline __attribute__((always_inline)) u64 huge_encode_dev(dev_t dev)
4783 {
4784 return new_encode_dev(dev);
4785 }
4786
4787 static inline __attribute__((always_inline)) dev_t huge_decode_dev(u64 dev)
4788 {
4789 return new_decode_dev(dev);
4790 }
4791
4792 static inline __attribute__((always_inline)) int sysv_valid_dev(dev_t dev)
4793 {
4794 return ((unsigned int) ((dev) >> 20)) < (1<<14) && ((unsigned int) ((dev) & ((1U << 20) - 1))) < (1<<18);
4795 }
4796
4797 static inline __attribute__((always_inline)) u32 sysv_encode_dev(dev_t dev)
4798 {
4799 return ((unsigned int) ((dev) & ((1U << 20) - 1))) | (((unsigned int) ((dev) >> 20)) << 18);
4800 }
4801
4802 static inline __attribute__((always_inline)) unsigned sysv_major(u32 dev)
4803 {
4804 return (dev >> 18) & 0x3fff;
4805 }
4806
4807 static inline __attribute__((always_inline)) unsigned sysv_minor(u32 dev)
4808 {
4809 return dev & 0x3ffff;
4810 }
4811 # 291 "include/linux/fs.h" 2
4812 # 1 "include/linux/dcache.h" 1
4813
4814
4815
4816
4817
4818 # 1 "include/linux/rculist.h" 1
4819 # 10 "include/linux/rculist.h"
4820 # 1 "include/linux/rcupdate.h" 1
4821 # 36 "include/linux/rcupdate.h"
4822 # 1 "include/linux/cache.h" 1
4823 # 37 "include/linux/rcupdate.h" 2
4824
4825
4826 # 1 "include/linux/percpu.h" 1
4827
4828
4829
4830
4831 # 1 "include/linux/slab.h" 1
4832 # 12 "include/linux/slab.h"
4833 # 1 "include/linux/gfp.h" 1
4834
4835
4836
4837 # 1 "include/linux/mmzone.h" 1
4838 # 14 "include/linux/mmzone.h"
4839 # 1 "include/linux/init.h" 1
4840 # 139 "include/linux/init.h"
4841 typedef int (*initcall_t)(void);
4842 typedef void (*exitcall_t)(void);
4843
4844 extern initcall_t __con_initcall_start[], __con_initcall_end[];
4845 extern initcall_t __security_initcall_start[], __security_initcall_end[];
4846
4847
4848 extern int do_one_initcall(initcall_t fn);
4849 extern char __attribute__ ((__section__(".init.data"))) boot_command_line[];
4850 extern char *saved_command_line;
4851 extern unsigned int reset_devices;
4852
4853
4854 void setup_arch(char **);
4855 void prepare_namespace(void);
4856
4857 extern void (*late_time_init)(void);
4858 # 221 "include/linux/init.h"
4859 struct obs_kernel_param {
4860 const char *str;
4861 int (*setup_func)(char *);
4862 int early;
4863 };
4864 # 249 "include/linux/init.h"
4865 void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) parse_early_param(void);
4866 # 15 "include/linux/mmzone.h" 2
4867 # 1 "include/linux/seqlock.h" 1
4868 # 32 "include/linux/seqlock.h"
4869 typedef struct {
4870 unsigned sequence;
4871 spinlock_t lock;
4872 } seqlock_t;
4873 # 60 "include/linux/seqlock.h"
4874 static inline __attribute__((always_inline)) void write_seqlock(seqlock_t *sl)
4875 {
4876 _spin_lock(&sl->lock);
4877 ++sl->sequence;
4878 __asm__ __volatile__("": : :"memory");
4879 }
4880
4881 static inline __attribute__((always_inline)) void write_sequnlock(seqlock_t *sl)
4882 {
4883 __asm__ __volatile__("": : :"memory");
4884 sl->sequence++;
4885 _spin_unlock(&sl->lock);
4886 }
4887
4888 static inline __attribute__((always_inline)) int write_tryseqlock(seqlock_t *sl)
4889 {
4890 int ret = (_spin_trylock(&sl->lock));
4891
4892 if (ret) {
4893 ++sl->sequence;
4894 __asm__ __volatile__("": : :"memory");
4895 }
4896 return ret;
4897 }
4898
4899
4900 static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned read_seqbegin(const seqlock_t *sl)
4901 {
4902 unsigned ret;
4903
4904 repeat:
4905 ret = sl->sequence;
4906 __asm__ __volatile__("": : :"memory");
4907 if (__builtin_expect(!!(ret & 1), 0)) {
4908 __asm__ __volatile__("": : :"memory");
4909 goto repeat;
4910 }
4911
4912 return ret;
4913 }
4914
4915
4916
4917
4918
4919
4920 static inline __attribute__((always_inline)) __attribute__((always_inline)) int read_seqretry(const seqlock_t *sl, unsigned start)
4921 {
4922 __asm__ __volatile__("": : :"memory");
4923
4924 return (sl->sequence != start);
4925 }
4926 # 121 "include/linux/seqlock.h"
4927 typedef struct seqcount {
4928 unsigned sequence;
4929 } seqcount_t;
4930
4931
4932
4933
4934
4935 static inline __attribute__((always_inline)) unsigned read_seqcount_begin(const seqcount_t *s)
4936 {
4937 unsigned ret;
4938
4939 repeat:
4940 ret = s->sequence;
4941 __asm__ __volatile__("": : :"memory");
4942 if (__builtin_expect(!!(ret & 1), 0)) {
4943 __asm__ __volatile__("": : :"memory");
4944 goto repeat;
4945 }
4946 return ret;
4947 }
4948
4949
4950
4951
4952 static inline __attribute__((always_inline)) int read_seqcount_retry(const seqcount_t *s, unsigned start)
4953 {
4954 __asm__ __volatile__("": : :"memory");
4955
4956 return s->sequence != start;
4957 }
4958
4959
4960
4961
4962
4963
4964 static inline __attribute__((always_inline)) void write_seqcount_begin(seqcount_t *s)
4965 {
4966 s->sequence++;
4967 __asm__ __volatile__("": : :"memory");
4968 }
4969
4970 static inline __attribute__((always_inline)) void write_seqcount_end(seqcount_t *s)
4971 {
4972 __asm__ __volatile__("": : :"memory");
4973 s->sequence++;
4974 }
4975 # 16 "include/linux/mmzone.h" 2
4976
4977 # 1 "include/linux/pageblock-flags.h" 1
4978 # 29 "include/linux/pageblock-flags.h"
4979 enum pageblock_bits {
4980 PB_migrate,
4981 PB_migrate_end = PB_migrate + 3 - 1,
4982
4983 NR_PAGEBLOCK_BITS
4984 };
4985 # 60 "include/linux/pageblock-flags.h"
4986 struct page;
4987
4988
4989 unsigned long get_pageblock_flags_group(struct page *page,
4990 int start_bitidx, int end_bitidx);
4991 void set_pageblock_flags_group(struct page *page, unsigned long flags,
4992 int start_bitidx, int end_bitidx);
4993 # 18 "include/linux/mmzone.h" 2
4994 # 1 "include/linux/bounds.h" 1
4995 # 19 "include/linux/mmzone.h" 2
4996 # 49 "include/linux/mmzone.h"
4997 extern int page_group_by_mobility_disabled;
4998
4999 static inline __attribute__((always_inline)) int get_pageblock_migratetype(struct page *page)
5000 {
5001 if (__builtin_expect(!!(page_group_by_mobility_disabled), 0))
5002 return 0;
5003
5004 return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
5005 }
5006
5007 struct free_area {
5008 struct list_head free_list[5];
5009 unsigned long nr_free;
5010 };
5011
5012 struct pglist_data;
5013 # 81 "include/linux/mmzone.h"
5014 enum zone_stat_item {
5015
5016 NR_FREE_PAGES,
5017 NR_LRU_BASE,
5018 NR_INACTIVE_ANON = NR_LRU_BASE,
5019 NR_ACTIVE_ANON,
5020 NR_INACTIVE_FILE,
5021 NR_ACTIVE_FILE,
5022
5023
5024
5025
5026 NR_UNEVICTABLE = NR_ACTIVE_FILE,
5027 NR_MLOCK = NR_ACTIVE_FILE,
5028
5029 NR_ANON_PAGES,
5030 NR_FILE_MAPPED,
5031
5032 NR_FILE_PAGES,
5033 NR_FILE_DIRTY,
5034 NR_WRITEBACK,
5035 NR_SLAB_RECLAIMABLE,
5036 NR_SLAB_UNRECLAIMABLE,
5037 NR_PAGETABLE,
5038 NR_UNSTABLE_NFS,
5039 NR_BOUNCE,
5040 NR_VMSCAN_WRITE,
5041
5042 NR_WRITEBACK_TEMP,
5043 # 118 "include/linux/mmzone.h"
5044 NR_VM_ZONE_STAT_ITEMS };
5045 # 133 "include/linux/mmzone.h"
5046 enum lru_list {
5047 LRU_INACTIVE_ANON = 0,
5048 LRU_ACTIVE_ANON = 0 + 1,
5049 LRU_INACTIVE_FILE = 0 + 2,
5050 LRU_ACTIVE_FILE = 0 + 2 + 1,
5051
5052
5053
5054 LRU_UNEVICTABLE = LRU_ACTIVE_FILE,
5055
5056 NR_LRU_LISTS
5057 };
5058
5059
5060
5061
5062
5063 static inline __attribute__((always_inline)) int is_file_lru(enum lru_list l)
5064 {
5065 return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE);
5066 }
5067
5068 static inline __attribute__((always_inline)) int is_active_lru(enum lru_list l)
5069 {
5070 return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE);
5071 }
5072
5073 static inline __attribute__((always_inline)) int is_unevictable_lru(enum lru_list l)
5074 {
5075
5076
5077
5078 return 0;
5079
5080 }
5081
5082 struct per_cpu_pages {
5083 int count;
5084 int high;
5085 int batch;
5086 struct list_head list;
5087 };
5088
5089 struct per_cpu_pageset {
5090 struct per_cpu_pages pcp;
5091
5092
5093
5094
5095
5096
5097
5098 } ;
5099 # 195 "include/linux/mmzone.h"
5100 enum zone_type {
5101 # 215 "include/linux/mmzone.h"
5102 ZONE_DMA,
5103 # 230 "include/linux/mmzone.h"
5104 ZONE_NORMAL,
5105 # 242 "include/linux/mmzone.h"
5106 ZONE_MOVABLE,
5107 __MAX_NR_ZONES
5108 };
5109 # 266 "include/linux/mmzone.h"
5110 struct zone {
5111
5112 unsigned long pages_min, pages_low, pages_high;
5113 # 277 "include/linux/mmzone.h"
5114 unsigned long lowmem_reserve[3];
5115 # 288 "include/linux/mmzone.h"
5116 struct per_cpu_pageset pageset[1];
5117
5118
5119
5120
5121 spinlock_t lock;
5122
5123
5124
5125
5126 struct free_area free_area[14];
5127
5128
5129
5130
5131
5132
5133 unsigned long *pageblock_flags;
5134
5135
5136
5137
5138
5139
5140 spinlock_t lru_lock;
5141 struct {
5142 struct list_head list;
5143 unsigned long nr_scan;
5144 } lru[NR_LRU_LISTS];
5145 # 326 "include/linux/mmzone.h"
5146 unsigned long recent_rotated[2];
5147 unsigned long recent_scanned[2];
5148
5149 unsigned long pages_scanned;
5150 unsigned long flags;
5151
5152
5153 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
5154 # 348 "include/linux/mmzone.h"
5155 int prev_priority;
5156
5157
5158
5159
5160
5161 unsigned int inactive_ratio;
5162
5163
5164
5165 # 384 "include/linux/mmzone.h"
5166 wait_queue_head_t * wait_table;
5167 unsigned long wait_table_hash_nr_entries;
5168 unsigned long wait_table_bits;
5169
5170
5171
5172
5173 struct pglist_data *zone_pgdat;
5174
5175 unsigned long zone_start_pfn;
5176 # 405 "include/linux/mmzone.h"
5177 unsigned long spanned_pages;
5178 unsigned long present_pages;
5179
5180
5181
5182
5183 const char *name;
5184 } ;
5185
5186 typedef enum {
5187 ZONE_ALL_UNRECLAIMABLE,
5188 ZONE_RECLAIM_LOCKED,
5189 ZONE_OOM_LOCKED,
5190 } zone_flags_t;
5191
5192 static inline __attribute__((always_inline)) void zone_set_flag(struct zone *zone, zone_flags_t flag)
5193 {
5194 set_bit(flag, &zone->flags);
5195 }
5196
5197 static inline __attribute__((always_inline)) int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
5198 {
5199 return test_and_set_bit(flag, &zone->flags);
5200 }
5201
5202 static inline __attribute__((always_inline)) void zone_clear_flag(struct zone *zone, zone_flags_t flag)
5203 {
5204 clear_bit(flag, &zone->flags);
5205 }
5206
5207 static inline __attribute__((always_inline)) int zone_is_all_unreclaimable(const struct zone *zone)
5208 {
5209 return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags);
5210 }
5211
5212 static inline __attribute__((always_inline)) int zone_is_reclaim_locked(const struct zone *zone)
5213 {
5214 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
5215 }
5216
5217 static inline __attribute__((always_inline)) int zone_is_oom_locked(const struct zone *zone)
5218 {
5219 return test_bit(ZONE_OOM_LOCKED, &zone->flags);
5220 }
5221 # 538 "include/linux/mmzone.h"
5222 struct zonelist_cache;
5223
5224
5225
5226
5227
5228
5229 struct zoneref {
5230 struct zone *zone;
5231 int zone_idx;
5232 };
5233 # 567 "include/linux/mmzone.h"
5234 struct zonelist {
5235 struct zonelist_cache *zlcache_ptr;
5236 struct zoneref _zonerefs[((1 << 0) * 3) + 1];
5237
5238
5239
5240 };
5241 # 585 "include/linux/mmzone.h"
5242 extern struct page *mem_map;
5243 # 599 "include/linux/mmzone.h"
5244 struct bootmem_data;
5245 typedef struct pglist_data {
5246 struct zone node_zones[3];
5247 struct zonelist node_zonelists[1];
5248 int nr_zones;
5249
5250 struct page *node_mem_map;
5251
5252
5253
5254
5255 struct bootmem_data *bdata;
5256 # 621 "include/linux/mmzone.h"
5257 unsigned long node_start_pfn;
5258 unsigned long node_present_pages;
5259 unsigned long node_spanned_pages;
5260
5261 int node_id;
5262 wait_queue_head_t kswapd_wait;
5263 struct task_struct *kswapd;
5264 int kswapd_max_order;
5265 } pg_data_t;
5266 # 640 "include/linux/mmzone.h"
5267 # 1 "include/linux/memory_hotplug.h" 1
5268
5269
5270
5271 # 1 "include/linux/mmzone.h" 1
5272 # 5 "include/linux/memory_hotplug.h" 2
5273
5274
5275
5276 struct page;
5277 struct zone;
5278 struct pglist_data;
5279 struct mem_section;
5280 # 165 "include/linux/memory_hotplug.h"
5281 static inline __attribute__((always_inline)) void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
5282 static inline __attribute__((always_inline)) void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
5283 static inline __attribute__((always_inline)) void pgdat_resize_init(struct pglist_data *pgdat) {}
5284
5285 static inline __attribute__((always_inline)) unsigned zone_span_seqbegin(struct zone *zone)
5286 {
5287 return 0;
5288 }
5289 static inline __attribute__((always_inline)) int zone_span_seqretry(struct zone *zone, unsigned iv)
5290 {
5291 return 0;
5292 }
5293 static inline __attribute__((always_inline)) void zone_span_writelock(struct zone *zone) {}
5294 static inline __attribute__((always_inline)) void zone_span_writeunlock(struct zone *zone) {}
5295 static inline __attribute__((always_inline)) void zone_seqlock_init(struct zone *zone) {}
5296
5297 static inline __attribute__((always_inline)) int mhp_notimplemented(const char *func)
5298 {
5299 printk("<4>" "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
5300 dump_stack();
5301 return -38;
5302 }
5303
5304 static inline __attribute__((always_inline)) void register_page_bootmem_info_node(struct pglist_data *pgdat)
5305 {
5306 }
5307
5308
5309
5310
5311
5312
5313
5314 extern int walk_memory_resource(unsigned long start_pfn,
5315 unsigned long nr_pages, void *arg,
5316 int (*func)(unsigned long, unsigned long, void *));
5317
5318
5319
5320
5321
5322
5323 static inline __attribute__((always_inline)) int is_mem_section_removable(unsigned long pfn,
5324 unsigned long nr_pages)
5325 {
5326 return 0;
5327 }
5328
5329
5330 extern int add_memory(int nid, u64 start, u64 size);
5331 extern int arch_add_memory(int nid, u64 start, u64 size);
5332 extern int remove_memory(u64 start, u64 size);
5333 extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
5334 int nr_pages);
5335 extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
5336 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
5337 unsigned long pnum);
5338 # 641 "include/linux/mmzone.h" 2
5339
5340 void get_zone_counts(unsigned long *active, unsigned long *inactive,
5341 unsigned long *free);
5342 void build_all_zonelists(void);
5343 void wakeup_kswapd(struct zone *zone, int order);
5344 int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
5345 int classzone_idx, int alloc_flags);
5346 enum memmap_context {
5347 MEMMAP_EARLY,
5348 MEMMAP_HOTPLUG,
5349 };
5350 extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
5351 unsigned long size,
5352 enum memmap_context context);
5353
5354
5355
5356
5357 static inline __attribute__((always_inline)) void memory_present(int nid, unsigned long start, unsigned long end) {}
5358 # 671 "include/linux/mmzone.h"
5359 static inline __attribute__((always_inline)) int populated_zone(struct zone *zone)
5360 {
5361 return (!!zone->present_pages);
5362 }
5363
5364 extern int movable_zone;
5365
5366 static inline __attribute__((always_inline)) int zone_movable_is_highmem(void)
5367 {
5368
5369
5370
5371 return 0;
5372
5373 }
5374
5375 static inline __attribute__((always_inline)) int is_highmem_idx(enum zone_type idx)
5376 {
5377
5378
5379
5380
5381 return 0;
5382
5383 }
5384
5385 static inline __attribute__((always_inline)) int is_normal_idx(enum zone_type idx)
5386 {
5387 return (idx == ZONE_NORMAL);
5388 }
5389
5390
5391
5392
5393
5394
5395
5396 static inline __attribute__((always_inline)) int is_highmem(struct zone *zone)
5397 {
5398
5399
5400
5401
5402
5403
5404 return 0;
5405
5406 }
5407
5408 static inline __attribute__((always_inline)) int is_normal(struct zone *zone)
5409 {
5410 return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
5411 }
5412
5413 static inline __attribute__((always_inline)) int is_dma32(struct zone *zone)
5414 {
5415
5416
5417
5418 return 0;
5419
5420 }
5421
5422 static inline __attribute__((always_inline)) int is_dma(struct zone *zone)
5423 {
5424
5425 return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
5426
5427
5428
5429 }
5430
5431
5432 struct ctl_table;
5433 struct file;
5434 int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *,
5435 void *, size_t *, loff_t *);
5436 extern int sysctl_lowmem_reserve_ratio[3 -1];
5437 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
5438 void *, size_t *, loff_t *);
5439 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
5440 void *, size_t *, loff_t *);
5441 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
5442 struct file *, void *, size_t *, loff_t *);
5443 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
5444 struct file *, void *, size_t *, loff_t *);
5445
5446 extern int numa_zonelist_order_handler(struct ctl_table *, int,
5447 struct file *, void *, size_t *, loff_t *);
5448 extern char numa_zonelist_order[];
5449
5450
5451 # 1 "include/linux/topology.h" 1
5452 # 33 "include/linux/topology.h"
5453 # 1 "include/linux/smp.h" 1
5454 # 14 "include/linux/smp.h"
5455 extern void cpu_idle(void);
5456
5457 struct call_single_data {
5458 struct list_head list;
5459 void (*func) (void *info);
5460 void *info;
5461 u16 flags;
5462 u16 priv;
5463 };
5464 # 123 "include/linux/smp.h"
5465 static inline __attribute__((always_inline)) int up_smp_call_function(void (*func)(void *), void *info)
5466 {
5467 return 0;
5468 }
5469 # 136 "include/linux/smp.h"
5470 static inline __attribute__((always_inline)) void smp_send_reschedule(int cpu) { }
5471 # 151 "include/linux/smp.h"
5472 static inline __attribute__((always_inline)) void init_call_single_data(void)
5473 {
5474 }
5475 # 182 "include/linux/smp.h"
5476 void smp_setup_processor_id(void);
5477 # 34 "include/linux/topology.h" 2
5478 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/topology.h" 1
5479
5480
5481
5482 # 1 "include/asm-generic/topology.h" 1
5483 # 5 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/topology.h" 2
5484 # 35 "include/linux/topology.h" 2
5485 # 52 "include/linux/topology.h"
5486 void arch_update_cpu_topology(void);
5487 # 764 "include/linux/mmzone.h" 2
5488
5489
5490
5491
5492
5493
5494
5495 extern struct pglist_data contig_page_data;
5496 # 781 "include/linux/mmzone.h"
5497 extern struct pglist_data *first_online_pgdat(void);
5498 extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
5499 extern struct zone *next_zone(struct zone *zone);
5500 # 805 "include/linux/mmzone.h"
5501 static inline __attribute__((always_inline)) struct zone *zonelist_zone(struct zoneref *zoneref)
5502 {
5503 return zoneref->zone;
5504 }
5505
5506 static inline __attribute__((always_inline)) int zonelist_zone_idx(struct zoneref *zoneref)
5507 {
5508 return zoneref->zone_idx;
5509 }
5510
5511 static inline __attribute__((always_inline)) int zonelist_node_idx(struct zoneref *zoneref)
5512 {
5513
5514
5515
5516
5517 return 0;
5518
5519 }
5520 # 838 "include/linux/mmzone.h"
5521 struct zoneref *next_zones_zonelist(struct zoneref *z,
5522 enum zone_type highest_zoneidx,
5523 nodemask_t *nodes,
5524 struct zone **zone);
5525 # 855 "include/linux/mmzone.h"
5526 static inline __attribute__((always_inline)) struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
5527 enum zone_type highest_zoneidx,
5528 nodemask_t *nodes,
5529 struct zone **zone)
5530 {
5531 return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
5532 zone);
5533 }
5534 # 898 "include/linux/mmzone.h"
5535 static inline __attribute__((always_inline)) unsigned long early_pfn_to_nid(unsigned long pfn)
5536 {
5537 return 0;
5538 }
5539 # 1079 "include/linux/mmzone.h"
5540 void memory_present(int nid, unsigned long start, unsigned long end);
5541 unsigned long __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) node_memmap_size_bytes(int, unsigned long, unsigned long);
5542 # 5 "include/linux/gfp.h" 2
5543
5544
5545
5546 struct vm_area_struct;
5547 # 108 "include/linux/gfp.h"
5548 static inline __attribute__((always_inline)) int allocflags_to_migratetype(gfp_t gfp_flags)
5549 {
5550 ({ int __ret_warn_on = !!((gfp_flags & ((( gfp_t)0x80000u)|(( gfp_t)0x100000u))) == ((( gfp_t)0x80000u)|(( gfp_t)0x100000u))); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("include/linux/gfp.h", 110); __builtin_expect(!!(__ret_warn_on), 0); });
5551
5552 if (__builtin_expect(!!(page_group_by_mobility_disabled), 0))
5553 return 0;
5554
5555
5556 return (((gfp_flags & (( gfp_t)0x100000u)) != 0) << 1) |
5557 ((gfp_flags & (( gfp_t)0x80000u)) != 0);
5558 }
5559
5560 static inline __attribute__((always_inline)) enum zone_type gfp_zone(gfp_t flags)
5561 {
5562
5563 if (flags & (( gfp_t)0x01u))
5564 return ZONE_DMA;
5565
5566
5567
5568
5569
5570 if ((flags & ((( gfp_t)0x02u) | (( gfp_t)0x100000u))) ==
5571 ((( gfp_t)0x02u) | (( gfp_t)0x100000u)))
5572 return ZONE_MOVABLE;
5573
5574
5575
5576
5577 return ZONE_NORMAL;
5578 }
5579 # 147 "include/linux/gfp.h"
5580 static inline __attribute__((always_inline)) int gfp_zonelist(gfp_t flags)
5581 {
5582 if (0 && __builtin_expect(!!(flags & (( gfp_t)0x40000u)), 0))
5583 return 1;
5584
5585 return 0;
5586 }
5587 # 164 "include/linux/gfp.h"
5588 static inline __attribute__((always_inline)) struct zonelist *node_zonelist(int nid, gfp_t flags)
5589 {
5590 return (&contig_page_data)->node_zonelists + gfp_zonelist(flags);
5591 }
5592
5593
5594 static inline __attribute__((always_inline)) void arch_free_page(struct page *page, int order) { }
5595
5596
5597 static inline __attribute__((always_inline)) void arch_alloc_page(struct page *page, int order) { }
5598
5599
5600 struct page *
5601 __alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
5602 struct zonelist *zonelist, nodemask_t *nodemask);
5603
5604 static inline __attribute__((always_inline)) struct page *
5605 __alloc_pages(gfp_t gfp_mask, unsigned int order,
5606 struct zonelist *zonelist)
5607 {
5608 return __alloc_pages_internal(gfp_mask, order, zonelist, ((void *)0));
5609 }
5610
5611 static inline __attribute__((always_inline)) struct page *
5612 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
5613 struct zonelist *zonelist, nodemask_t *nodemask)
5614 {
5615 return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask);
5616 }
5617
5618
5619 static inline __attribute__((always_inline)) struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
5620 unsigned int order)
5621 {
5622 if (__builtin_expect(!!(order >= 14), 0))
5623 return ((void *)0);
5624
5625
5626 if (nid < 0)
5627 nid = (((void)(0),0));
5628
5629 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
5630 }
5631 # 228 "include/linux/gfp.h"
5632 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
5633 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
5634
5635 void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
5636 void free_pages_exact(void *virt, size_t size);
5637
5638
5639
5640
5641
5642
5643
5644 extern void __free_pages(struct page *page, unsigned int order);
5645 extern void free_pages(unsigned long addr, unsigned int order);
5646 extern void free_hot_page(struct page *page);
5647 extern void free_cold_page(struct page *page);
5648
5649
5650
5651
5652 void page_alloc_init(void);
5653 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
5654 void drain_all_pages(void);
5655 void drain_local_pages(void *dummy);
5656 # 13 "include/linux/slab.h" 2
5657 # 84 "include/linux/slab.h"
5658 void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) kmem_cache_init(void);
5659 int slab_is_available(void);
5660
5661 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
5662 unsigned long,
5663 void (*)(void *));
5664 void kmem_cache_destroy(struct kmem_cache *);
5665 int kmem_cache_shrink(struct kmem_cache *);
5666 void kmem_cache_free(struct kmem_cache *, void *);
5667 unsigned int kmem_cache_size(struct kmem_cache *);
5668 const char *kmem_cache_name(struct kmem_cache *);
5669 int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
5670 # 127 "include/linux/slab.h"
5671 void * __attribute__((warn_unused_result)) __krealloc(const void *, size_t, gfp_t);
5672 void * __attribute__((warn_unused_result)) krealloc(const void *, size_t, gfp_t);
5673 void kfree(const void *);
5674 size_t ksize(const void *);
5675 # 152 "include/linux/slab.h"
5676 # 1 "include/linux/slub_def.h" 1
5677 # 11 "include/linux/slub_def.h"
5678 # 1 "include/linux/workqueue.h" 1
5679
5680
5681
5682
5683
5684
5685
5686 # 1 "include/linux/timer.h" 1
5687
5688
5689
5690
5691 # 1 "include/linux/ktime.h" 1
5692 # 24 "include/linux/ktime.h"
5693 # 1 "include/linux/time.h" 1
5694 # 9 "include/linux/time.h"
5695 # 1 "include/linux/math64.h" 1
5696
5697
5698
5699
5700 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/div64.h" 1
5701 # 6 "include/linux/math64.h" 2
5702 # 41 "include/linux/math64.h"
5703 static inline __attribute__((always_inline)) u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
5704 {
5705 *remainder = ({ uint32_t __base = (divisor); uint32_t __rem; (void)(((typeof((dividend)) *)0) == ((uint64_t *)0)); if (__builtin_expect(!!(((dividend) >> 32) == 0), 1)) { __rem = (uint32_t)(dividend) % __base; (dividend) = (uint32_t)(dividend) / __base; } else __rem = __div64_32(&(dividend), __base); __rem; });
5706 return dividend;
5707 }
5708
5709
5710
5711 extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
5712
5713
5714
5715 extern u64 div64_u64(u64 dividend, u64 divisor);
5716 # 66 "include/linux/math64.h"
5717 static inline __attribute__((always_inline)) u64 div_u64(u64 dividend, u32 divisor)
5718 {
5719 u32 remainder;
5720 return div_u64_rem(dividend, divisor, &remainder);
5721 }
5722
5723
5724
5725
5726
5727
5728 static inline __attribute__((always_inline)) s64 div_s64(s64 dividend, s32 divisor)
5729 {
5730 s32 remainder;
5731 return div_s64_rem(dividend, divisor, &remainder);
5732 }
5733
5734
5735 u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
5736
5737 static inline __attribute__((always_inline)) __attribute__((always_inline)) u32
5738 __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
5739 {
5740 u32 ret = 0;
5741
5742 while (dividend >= divisor) {
5743
5744
5745 asm("" : "+rm"(dividend));
5746
5747 dividend -= divisor;
5748 ret++;
5749 }
5750
5751 *remainder = dividend;
5752
5753 return ret;
5754 }
5755 # 10 "include/linux/time.h" 2
5756
5757
5758
5759
5760 struct timespec {
5761 time_t tv_sec;
5762 long tv_nsec;
5763 };
5764
5765
5766 struct timeval {
5767 time_t tv_sec;
5768 suseconds_t tv_usec;
5769 };
5770
5771 struct timezone {
5772 int tz_minuteswest;
5773 int tz_dsttime;
5774 };
5775
5776
5777
5778 extern struct timezone sys_tz;
5779 # 45 "include/linux/time.h"
5780 static inline __attribute__((always_inline)) int timespec_equal(const struct timespec *a,
5781 const struct timespec *b)
5782 {
5783 return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
5784 }
5785
5786
5787
5788
5789
5790
5791 static inline __attribute__((always_inline)) int timespec_compare(const struct timespec *lhs, const struct timespec *rhs)
5792 {
5793 if (lhs->tv_sec < rhs->tv_sec)
5794 return -1;
5795 if (lhs->tv_sec > rhs->tv_sec)
5796 return 1;
5797 return lhs->tv_nsec - rhs->tv_nsec;
5798 }
5799
5800 static inline __attribute__((always_inline)) int timeval_compare(const struct timeval *lhs, const struct timeval *rhs)
5801 {
5802 if (lhs->tv_sec < rhs->tv_sec)
5803 return -1;
5804 if (lhs->tv_sec > rhs->tv_sec)
5805 return 1;
5806 return lhs->tv_usec - rhs->tv_usec;
5807 }
5808
5809 extern unsigned long mktime(const unsigned int year, const unsigned int mon,
5810 const unsigned int day, const unsigned int hour,
5811 const unsigned int min, const unsigned int sec);
5812
5813 extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec);
5814 extern struct timespec timespec_add_safe(const struct timespec lhs,
5815 const struct timespec rhs);
5816
5817
5818
5819
5820 static inline __attribute__((always_inline)) struct timespec timespec_sub(struct timespec lhs,
5821 struct timespec rhs)
5822 {
5823 struct timespec ts_delta;
5824 set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec,
5825 lhs.tv_nsec - rhs.tv_nsec);
5826 return ts_delta;
5827 }
5828
5829
5830
5831
5832
5833
5834
5835 extern struct timespec xtime;
5836 extern struct timespec wall_to_monotonic;
5837 extern seqlock_t xtime_lock;
5838
5839 extern unsigned long read_persistent_clock(void);
5840 extern int update_persistent_clock(struct timespec now);
5841 extern int no_sync_cmos_clock ;
5842 void timekeeping_init(void);
5843
5844 unsigned long get_seconds(void);
5845 struct timespec current_kernel_time(void);
5846
5847
5848
5849
5850 extern void do_gettimeofday(struct timeval *tv);
5851 extern int do_settimeofday(struct timespec *tv);
5852 extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz);
5853
5854 extern long do_utimes(int dfd, char *filename, struct timespec *times, int flags);
5855 struct itimerval;
5856 extern int do_setitimer(int which, struct itimerval *value,
5857 struct itimerval *ovalue);
5858 extern unsigned int alarm_setitimer(unsigned int seconds);
5859 extern int do_getitimer(int which, struct itimerval *value);
5860 extern void getnstimeofday(struct timespec *tv);
5861 extern void getrawmonotonic(struct timespec *ts);
5862 extern void getboottime(struct timespec *ts);
5863 extern void monotonic_to_bootbased(struct timespec *ts);
5864
5865 extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
5866 extern int timekeeping_valid_for_hres(void);
5867 extern void update_wall_time(void);
5868 extern void update_xtime_cache(u64 nsec);
5869
5870 struct tms;
5871 extern void do_sys_times(struct tms *);
5872 # 145 "include/linux/time.h"
5873 static inline __attribute__((always_inline)) s64 timespec_to_ns(const struct timespec *ts)
5874 {
5875 return ((s64) ts->tv_sec * 1000000000L) + ts->tv_nsec;
5876 }
5877 # 157 "include/linux/time.h"
5878 static inline __attribute__((always_inline)) s64 timeval_to_ns(const struct timeval *tv)
5879 {
5880 return ((s64) tv->tv_sec * 1000000000L) +
5881 tv->tv_usec * 1000L;
5882 }
5883
5884
5885
5886
5887
5888
5889
5890 extern struct timespec ns_to_timespec(const s64 nsec);
5891
5892
5893
5894
5895
5896
5897
5898 extern struct timeval ns_to_timeval(const s64 nsec);
5899 # 187 "include/linux/time.h"
5900 static inline __attribute__((always_inline)) __attribute__((always_inline)) void timespec_add_ns(struct timespec *a, u64 ns)
5901 {
5902 a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, 1000000000L, &ns);
5903 a->tv_nsec = ns;
5904 }
5905 # 210 "include/linux/time.h"
5906 struct itimerspec {
5907 struct timespec it_interval;
5908 struct timespec it_value;
5909 };
5910
5911 struct itimerval {
5912 struct timeval it_interval;
5913 struct timeval it_value;
5914 };
5915 # 25 "include/linux/ktime.h" 2
5916 # 1 "include/linux/jiffies.h" 1
5917
5918
5919
5920
5921
5922
5923
5924 # 1 "include/linux/timex.h" 1
5925 # 100 "include/linux/timex.h"
5926 struct timex {
5927 unsigned int modes;
5928 long offset;
5929 long freq;
5930 long maxerror;
5931 long esterror;
5932 int status;
5933 long constant;
5934 long precision;
5935 long tolerance;
5936
5937
5938 struct timeval time;
5939 long tick;
5940
5941 long ppsfreq;
5942 long jitter;
5943 int shift;
5944 long stabil;
5945 long jitcnt;
5946 long calcnt;
5947 long errcnt;
5948 long stbcnt;
5949
5950 int tai;
5951
5952 int :32; int :32; int :32; int :32;
5953 int :32; int :32; int :32; int :32;
5954 int :32; int :32; int :32;
5955 };
5956 # 202 "include/linux/timex.h"
5957 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/timex.h" 1
5958 # 14 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/timex.h"
5959 typedef unsigned long long cycles_t;
5960
5961 static inline __attribute__((always_inline)) cycles_t get_cycles(void)
5962 {
5963 unsigned long tmp, tmp2;
5964 __asm__ __volatile__("%0 = cycles; %1 = cycles2;" : "=d"(tmp), "=d"(tmp2));
5965 return tmp | ((cycles_t)tmp2 << 32);
5966 }
5967 # 203 "include/linux/timex.h" 2
5968
5969
5970
5971
5972
5973
5974 extern unsigned long tick_usec;
5975 extern unsigned long tick_nsec;
5976 extern int tickadj;
5977
5978
5979
5980
5981 extern int time_status;
5982 extern long time_maxerror;
5983 extern long time_esterror;
5984
5985 extern long time_adjust;
5986
5987 extern void ntp_init(void);
5988 extern void ntp_clear(void);
5989
5990
5991
5992
5993
5994 static inline __attribute__((always_inline)) int ntp_synced(void)
5995 {
5996 return !(time_status & 0x0040);
5997 }
5998 # 251 "include/linux/timex.h"
5999 extern u64 tick_length;
6000
6001 extern void second_overflow(void);
6002 extern void update_ntp_one_tick(void);
6003 extern int do_adjtimex(struct timex *);
6004
6005
6006
6007
6008 int read_current_timer(unsigned long *timer_val);
6009 # 9 "include/linux/jiffies.h" 2
6010 # 81 "include/linux/jiffies.h"
6011 extern u64 __attribute__((section(".data"))) jiffies_64;
6012 extern unsigned long volatile __attribute__((section(".data"))) jiffies;
6013
6014
6015 u64 get_jiffies_64(void);
6016 # 173 "include/linux/jiffies.h"
6017 extern unsigned long preset_lpj;
6018 # 286 "include/linux/jiffies.h"
6019 extern unsigned int jiffies_to_msecs(const unsigned long j);
6020 extern unsigned int jiffies_to_usecs(const unsigned long j);
6021 extern unsigned long msecs_to_jiffies(const unsigned int m);
6022 extern unsigned long usecs_to_jiffies(const unsigned int u);
6023 extern unsigned long timespec_to_jiffies(const struct timespec *value);
6024 extern void jiffies_to_timespec(const unsigned long jiffies,
6025 struct timespec *value);
6026 extern unsigned long timeval_to_jiffies(const struct timeval *value);
6027 extern void jiffies_to_timeval(const unsigned long jiffies,
6028 struct timeval *value);
6029 extern clock_t jiffies_to_clock_t(long x);
6030 extern unsigned long clock_t_to_jiffies(unsigned long x);
6031 extern u64 jiffies_64_to_clock_t(u64 x);
6032 extern u64 nsec_to_clock_t(u64 x);
6033 # 26 "include/linux/ktime.h" 2
6034 # 46 "include/linux/ktime.h"
6035 union ktime {
6036 s64 tv64;
6037
6038 struct {
6039
6040
6041
6042 s32 nsec, sec;
6043
6044 } tv;
6045
6046 };
6047
6048 typedef union ktime ktime_t;
6049 # 151 "include/linux/ktime.h"
6050 static inline __attribute__((always_inline)) ktime_t ktime_set(const long secs, const unsigned long nsecs)
6051 {
6052 return (ktime_t) { .tv = { .sec = secs, .nsec = nsecs } };
6053 }
6054 # 163 "include/linux/ktime.h"
6055 static inline __attribute__((always_inline)) ktime_t ktime_sub(const ktime_t lhs, const ktime_t rhs)
6056 {
6057 ktime_t res;
6058
6059 res.tv64 = lhs.tv64 - rhs.tv64;
6060 if (res.tv.nsec < 0)
6061 res.tv.nsec += 1000000000L;
6062
6063 return res;
6064 }
6065 # 181 "include/linux/ktime.h"
6066 static inline __attribute__((always_inline)) ktime_t ktime_add(const ktime_t add1, const ktime_t add2)
6067 {
6068 ktime_t res;
6069
6070 res.tv64 = add1.tv64 + add2.tv64;
6071 # 194 "include/linux/ktime.h"
6072 if (res.tv.nsec >= 1000000000L)
6073 res.tv64 += (u32)-1000000000L;
6074
6075 return res;
6076 }
6077 # 207 "include/linux/ktime.h"
6078 extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec);
6079 # 216 "include/linux/ktime.h"
6080 extern ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec);
6081
6082
6083
6084
6085
6086
6087
6088 static inline __attribute__((always_inline)) ktime_t timespec_to_ktime(const struct timespec ts)
6089 {
6090 return (ktime_t) { .tv = { .sec = (s32)ts.tv_sec,
6091 .nsec = (s32)ts.tv_nsec } };
6092 }
6093
6094
6095
6096
6097
6098
6099
6100 static inline __attribute__((always_inline)) ktime_t timeval_to_ktime(const struct timeval tv)
6101 {
6102 return (ktime_t) { .tv = { .sec = (s32)tv.tv_sec,
6103 .nsec = (s32)tv.tv_usec * 1000 } };
6104 }
6105
6106
6107
6108
6109
6110
6111
6112 static inline __attribute__((always_inline)) struct timespec ktime_to_timespec(const ktime_t kt)
6113 {
6114 return (struct timespec) { .tv_sec = (time_t) kt.tv.sec,
6115 .tv_nsec = (long) kt.tv.nsec };
6116 }
6117
6118
6119
6120
6121
6122
6123
6124 static inline __attribute__((always_inline)) struct timeval ktime_to_timeval(const ktime_t kt)
6125 {
6126 return (struct timeval) {
6127 .tv_sec = (time_t) kt.tv.sec,
6128 .tv_usec = (suseconds_t) (kt.tv.nsec / 1000L) };
6129 }
6130
6131
6132
6133
6134
6135
6136
6137 static inline __attribute__((always_inline)) s64 ktime_to_ns(const ktime_t kt)
6138 {
6139 return (s64) kt.tv.sec * 1000000000L + kt.tv.nsec;
6140 }
6141 # 287 "include/linux/ktime.h"
6142 static inline __attribute__((always_inline)) int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
6143 {
6144 return cmp1.tv64 == cmp2.tv64;
6145 }
6146
6147 static inline __attribute__((always_inline)) s64 ktime_to_us(const ktime_t kt)
6148 {
6149 struct timeval tv = ktime_to_timeval(kt);
6150 return (s64) tv.tv_sec * 1000000L + tv.tv_usec;
6151 }
6152
6153 static inline __attribute__((always_inline)) s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
6154 {
6155 return ktime_to_us(ktime_sub(later, earlier));
6156 }
6157
6158 static inline __attribute__((always_inline)) ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
6159 {
6160 return ktime_add_ns(kt, usec * 1000);
6161 }
6162
6163 static inline __attribute__((always_inline)) ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
6164 {
6165 return ktime_sub_ns(kt, usec * 1000);
6166 }
6167
6168 extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
6169 # 325 "include/linux/ktime.h"
6170 extern void ktime_get_ts(struct timespec *ts);
6171
6172
6173
6174
6175 static inline __attribute__((always_inline)) ktime_t ns_to_ktime(u64 ns)
6176 {
6177 static const ktime_t ktime_zero = { .tv64 = 0 };
6178 return ktime_add_ns(ktime_zero, ns);
6179 }
6180 # 6 "include/linux/timer.h" 2
6181
6182 # 1 "include/linux/debugobjects.h" 1
6183
6184
6185
6186
6187
6188
6189 enum debug_obj_state {
6190 ODEBUG_STATE_NONE,
6191 ODEBUG_STATE_INIT,
6192 ODEBUG_STATE_INACTIVE,
6193 ODEBUG_STATE_ACTIVE,
6194 ODEBUG_STATE_DESTROYED,
6195 ODEBUG_STATE_NOTAVAILABLE,
6196 ODEBUG_STATE_MAX,
6197 };
6198
6199 struct debug_obj_descr;
6200 # 26 "include/linux/debugobjects.h"
6201 struct debug_obj {
6202 struct hlist_node node;
6203 enum debug_obj_state state;
6204 void *object;
6205 struct debug_obj_descr *descr;
6206 };
6207 # 45 "include/linux/debugobjects.h"
6208 struct debug_obj_descr {
6209 const char *name;
6210
6211 int (*fixup_init) (void *addr, enum debug_obj_state state);
6212 int (*fixup_activate) (void *addr, enum debug_obj_state state);
6213 int (*fixup_destroy) (void *addr, enum debug_obj_state state);
6214 int (*fixup_free) (void *addr, enum debug_obj_state state);
6215 };
6216
6217
6218 extern void debug_object_init (void *addr, struct debug_obj_descr *descr);
6219 extern void
6220 debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr);
6221 extern void debug_object_activate (void *addr, struct debug_obj_descr *descr);
6222 extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
6223 extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr);
6224 extern void debug_object_free (void *addr, struct debug_obj_descr *descr);
6225
6226 extern void debug_objects_early_init(void);
6227 extern void debug_objects_mem_init(void);
6228 # 84 "include/linux/debugobjects.h"
6229 extern void debug_check_no_obj_freed(const void *address, unsigned long size);
6230 # 8 "include/linux/timer.h" 2
6231
6232 struct tvec_base;
6233
6234 struct timer_list {
6235 struct list_head entry;
6236 unsigned long expires;
6237
6238 void (*function)(unsigned long);
6239 unsigned long data;
6240
6241 struct tvec_base *base;
6242
6243 void *start_site;
6244 char start_comm[16];
6245 int start_pid;
6246
6247 };
6248
6249 extern struct tvec_base boot_tvec_bases;
6250 # 40 "include/linux/timer.h"
6251 void init_timer(struct timer_list *timer);
6252 void init_timer_deferrable(struct timer_list *timer);
6253
6254
6255
6256
6257
6258 static inline __attribute__((always_inline)) void destroy_timer_on_stack(struct timer_list *timer) { }
6259 static inline __attribute__((always_inline)) void init_timer_on_stack(struct timer_list *timer)
6260 {
6261 init_timer(timer);
6262 }
6263
6264
6265 static inline __attribute__((always_inline)) void setup_timer(struct timer_list * timer,
6266 void (*function)(unsigned long),
6267 unsigned long data)
6268 {
6269 timer->function = function;
6270 timer->data = data;
6271 init_timer(timer);
6272 }
6273
6274 static inline __attribute__((always_inline)) void setup_timer_on_stack(struct timer_list *timer,
6275 void (*function)(unsigned long),
6276 unsigned long data)
6277 {
6278 timer->function = function;
6279 timer->data = data;
6280 init_timer_on_stack(timer);
6281 }
6282 # 82 "include/linux/timer.h"
6283 static inline __attribute__((always_inline)) int timer_pending(const struct timer_list * timer)
6284 {
6285 return timer->entry.next != ((void *)0);
6286 }
6287
6288 extern void add_timer_on(struct timer_list *timer, int cpu);
6289 extern int del_timer(struct timer_list * timer);
6290 extern int __mod_timer(struct timer_list *timer, unsigned long expires);
6291 extern int mod_timer(struct timer_list *timer, unsigned long expires);
6292 # 102 "include/linux/timer.h"
6293 extern unsigned long next_timer_interrupt(void);
6294
6295
6296
6297
6298
6299 extern unsigned long get_next_timer_interrupt(unsigned long now);
6300 # 117 "include/linux/timer.h"
6301 extern void init_timer_stats(void);
6302
6303 extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
6304 void *timerf, char *comm,
6305 unsigned int timer_flag);
6306
6307 extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
6308 void *addr);
6309
6310 static inline __attribute__((always_inline)) void timer_stats_timer_set_start_info(struct timer_list *timer)
6311 {
6312 __timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
6313 }
6314
6315 static inline __attribute__((always_inline)) void timer_stats_timer_clear_start_info(struct timer_list *timer)
6316 {
6317 timer->start_site = ((void *)0);
6318 }
6319 # 163 "include/linux/timer.h"
6320 static inline __attribute__((always_inline)) void add_timer(struct timer_list *timer)
6321 {
6322 do { if (__builtin_expect(!!(timer_pending(timer)), 0)) do { dump_bfin_trace_buffer(); printk("<0>" "BUG: failure at %s:%d/%s()!\n", "include/linux/timer.h", 165, __func__); panic("BUG!"); } while (0); } while(0);
6323 __mod_timer(timer, timer->expires);
6324 }
6325 # 179 "include/linux/timer.h"
6326 extern void init_timers(void);
6327 extern void run_local_timers(void);
6328 struct hrtimer;
6329 extern enum hrtimer_restart it_real_fn(struct hrtimer *);
6330
6331 unsigned long __round_jiffies(unsigned long j, int cpu);
6332 unsigned long __round_jiffies_relative(unsigned long j, int cpu);
6333 unsigned long round_jiffies(unsigned long j);
6334 unsigned long round_jiffies_relative(unsigned long j);
6335
6336 unsigned long __round_jiffies_up(unsigned long j, int cpu);
6337 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
6338 unsigned long round_jiffies_up(unsigned long j);
6339 unsigned long round_jiffies_up_relative(unsigned long j);
6340 # 9 "include/linux/workqueue.h" 2
6341
6342
6343
6344
6345
6346 struct workqueue_struct;
6347
6348 struct work_struct;
6349 typedef void (*work_func_t)(struct work_struct *work);
6350
6351
6352
6353
6354
6355
6356
6357 struct work_struct {
6358 atomic_long_t data;
6359
6360
6361
6362 struct list_head entry;
6363 work_func_t func;
6364
6365
6366
6367 };
6368
6369
6370
6371 struct delayed_work {
6372 struct work_struct work;
6373 struct timer_list timer;
6374 };
6375
6376 struct execute_work {
6377 struct work_struct work;
6378 };
6379 # 150 "include/linux/workqueue.h"
6380 extern struct workqueue_struct *
6381 __create_workqueue_key(const char *name, int singlethread,
6382 int freezeable, int rt, struct lock_class_key *key,
6383 const char *lock_name);
6384 # 181 "include/linux/workqueue.h"
6385 extern void destroy_workqueue(struct workqueue_struct *wq);
6386
6387 extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
6388 extern int queue_work_on(int cpu, struct workqueue_struct *wq,
6389 struct work_struct *work);
6390 extern int queue_delayed_work(struct workqueue_struct *wq,
6391 struct delayed_work *work, unsigned long delay);
6392 extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
6393 struct delayed_work *work, unsigned long delay);
6394
6395 extern void flush_workqueue(struct workqueue_struct *wq);
6396 extern void flush_scheduled_work(void);
6397
6398 extern int schedule_work(struct work_struct *work);
6399 extern int schedule_work_on(int cpu, struct work_struct *work);
6400 extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
6401 extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
6402 unsigned long delay);
6403 extern int schedule_on_each_cpu(work_func_t func);
6404 extern int current_is_keventd(void);
6405 extern int keventd_up(void);
6406
6407 extern void init_workqueues(void);
6408 int execute_in_process_context(work_func_t fn, struct execute_work *);
6409
6410 extern int flush_work(struct work_struct *work);
6411
6412 extern int cancel_work_sync(struct work_struct *work);
6413
6414
6415
6416
6417
6418
6419
6420 static inline __attribute__((always_inline)) int cancel_delayed_work(struct delayed_work *work)
6421 {
6422 int ret;
6423
6424 ret = del_timer(&work->timer);
6425 if (ret)
6426 clear_bit(0, ((unsigned long *)(&(&work->work)->data)));
6427 return ret;
6428 }
6429
6430 extern int cancel_delayed_work_sync(struct delayed_work *work);
6431
6432
6433 static inline __attribute__((always_inline))
6434 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
6435 struct delayed_work *work)
6436 {
6437 cancel_delayed_work_sync(work);
6438 }
6439
6440
6441 static inline __attribute__((always_inline))
6442 void cancel_rearming_delayed_work(struct delayed_work *work)
6443 {
6444 cancel_delayed_work_sync(work);
6445 }
6446
6447
6448 static inline __attribute__((always_inline)) long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
6449 {
6450 return fn(arg);
6451 }
6452 # 12 "include/linux/slub_def.h" 2
6453 # 1 "include/linux/kobject.h" 1
6454 # 21 "include/linux/kobject.h"
6455 # 1 "include/linux/sysfs.h" 1
6456 # 20 "include/linux/sysfs.h"
6457 struct kobject;
6458 struct module;
6459
6460
6461
6462
6463
6464
6465 struct attribute {
6466 const char *name;
6467 struct module *owner;
6468 mode_t mode;
6469 };
6470
6471 struct attribute_group {
6472 const char *name;
6473 mode_t (*is_visible)(struct kobject *,
6474 struct attribute *, int);
6475 struct attribute **attrs;
6476 };
6477 # 63 "include/linux/sysfs.h"
6478 struct vm_area_struct;
6479
6480 struct bin_attribute {
6481 struct attribute attr;
6482 size_t size;
6483 void *private;
6484 ssize_t (*read)(struct kobject *, struct bin_attribute *,
6485 char *, loff_t, size_t);
6486 ssize_t (*write)(struct kobject *, struct bin_attribute *,
6487 char *, loff_t, size_t);
6488 int (*mmap)(struct kobject *, struct bin_attribute *attr,
6489 struct vm_area_struct *vma);
6490 };
6491
6492 struct sysfs_ops {
6493 ssize_t (*show)(struct kobject *, struct attribute *,char *);
6494 ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
6495 };
6496
6497 struct sysfs_dirent;
6498 # 134 "include/linux/sysfs.h"
6499 static inline __attribute__((always_inline)) int sysfs_schedule_callback(struct kobject *kobj,
6500 void (*func)(void *), void *data, struct module *owner)
6501 {
6502 return -38;
6503 }
6504
6505 static inline __attribute__((always_inline)) int sysfs_create_dir(struct kobject *kobj)
6506 {
6507 return 0;
6508 }
6509
6510 static inline __attribute__((always_inline)) void sysfs_remove_dir(struct kobject *kobj)
6511 {
6512 }
6513
6514 static inline __attribute__((always_inline)) int sysfs_rename_dir(struct kobject *kobj, const char *new_name)
6515 {
6516 return 0;
6517 }
6518
6519 static inline __attribute__((always_inline)) int sysfs_move_dir(struct kobject *kobj,
6520 struct kobject *new_parent_kobj)
6521 {
6522 return 0;
6523 }
6524
6525 static inline __attribute__((always_inline)) int sysfs_create_file(struct kobject *kobj,
6526 const struct attribute *attr)
6527 {
6528 return 0;
6529 }
6530
6531 static inline __attribute__((always_inline)) int sysfs_chmod_file(struct kobject *kobj,
6532 struct attribute *attr, mode_t mode)
6533 {
6534 return 0;
6535 }
6536
6537 static inline __attribute__((always_inline)) void sysfs_remove_file(struct kobject *kobj,
6538 const struct attribute *attr)
6539 {
6540 }
6541
6542 static inline __attribute__((always_inline)) int sysfs_create_bin_file(struct kobject *kobj,
6543 struct bin_attribute *attr)
6544 {
6545 return 0;
6546 }
6547
6548 static inline __attribute__((always_inline)) void sysfs_remove_bin_file(struct kobject *kobj,
6549 struct bin_attribute *attr)
6550 {
6551 }
6552
6553 static inline __attribute__((always_inline)) int sysfs_create_link(struct kobject *kobj,
6554 struct kobject *target, const char *name)
6555 {
6556 return 0;
6557 }
6558
6559 static inline __attribute__((always_inline)) int sysfs_create_link_nowarn(struct kobject *kobj,
6560 struct kobject *target,
6561 const char *name)
6562 {
6563 return 0;
6564 }
6565
6566 static inline __attribute__((always_inline)) void sysfs_remove_link(struct kobject *kobj, const char *name)
6567 {
6568 }
6569
6570 static inline __attribute__((always_inline)) int sysfs_create_group(struct kobject *kobj,
6571 const struct attribute_group *grp)
6572 {
6573 return 0;
6574 }
6575
6576 static inline __attribute__((always_inline)) int sysfs_update_group(struct kobject *kobj,
6577 const struct attribute_group *grp)
6578 {
6579 return 0;
6580 }
6581
6582 static inline __attribute__((always_inline)) void sysfs_remove_group(struct kobject *kobj,
6583 const struct attribute_group *grp)
6584 {
6585 }
6586
6587 static inline __attribute__((always_inline)) int sysfs_add_file_to_group(struct kobject *kobj,
6588 const struct attribute *attr, const char *group)
6589 {
6590 return 0;
6591 }
6592
6593 static inline __attribute__((always_inline)) void sysfs_remove_file_from_group(struct kobject *kobj,
6594 const struct attribute *attr, const char *group)
6595 {
6596 }
6597
6598 static inline __attribute__((always_inline)) void sysfs_notify(struct kobject *kobj, const char *dir,
6599 const char *attr)
6600 {
6601 }
6602 static inline __attribute__((always_inline)) void sysfs_notify_dirent(struct sysfs_dirent *sd)
6603 {
6604 }
6605 static inline __attribute__((always_inline))
6606 struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
6607 const unsigned char *name)
6608 {
6609 return ((void *)0);
6610 }
6611 static inline __attribute__((always_inline)) struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd)
6612 {
6613 return ((void *)0);
6614 }
6615 static inline __attribute__((always_inline)) void sysfs_put(struct sysfs_dirent *sd)
6616 {
6617 }
6618
6619 static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) sysfs_init(void)
6620 {
6621 return 0;
6622 }
6623
6624 static inline __attribute__((always_inline)) void sysfs_printk_last_file(void)
6625 {
6626 }
6627 # 22 "include/linux/kobject.h" 2
6628
6629
6630 # 1 "include/linux/kref.h" 1
6631 # 21 "include/linux/kref.h"
6632 struct kref {
6633 atomic_t refcount;
6634 };
6635
6636 void kref_set(struct kref *kref, int num);
6637 void kref_init(struct kref *kref);
6638 void kref_get(struct kref *kref);
6639 int kref_put(struct kref *kref, void (*release) (struct kref *kref));
6640 # 25 "include/linux/kobject.h" 2
6641 # 34 "include/linux/kobject.h"
6642 extern char uevent_helper[];
6643
6644
6645 extern u64 uevent_seqnum;
6646 # 49 "include/linux/kobject.h"
6647 enum kobject_action {
6648 KOBJ_ADD,
6649 KOBJ_REMOVE,
6650 KOBJ_CHANGE,
6651 KOBJ_MOVE,
6652 KOBJ_ONLINE,
6653 KOBJ_OFFLINE,
6654 KOBJ_MAX
6655 };
6656
6657 struct kobject {
6658 const char *name;
6659 struct list_head entry;
6660 struct kobject *parent;
6661 struct kset *kset;
6662 struct kobj_type *ktype;
6663 struct sysfs_dirent *sd;
6664 struct kref kref;
6665 unsigned int state_initialized:1;
6666 unsigned int state_in_sysfs:1;
6667 unsigned int state_add_uevent_sent:1;
6668 unsigned int state_remove_uevent_sent:1;
6669 };
6670
6671 extern int kobject_set_name(struct kobject *kobj, const char *name, ...)
6672 __attribute__((format(printf, 2, 3)));
6673
6674 static inline __attribute__((always_inline)) const char *kobject_name(const struct kobject *kobj)
6675 {
6676 return kobj->name;
6677 }
6678
6679 extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype);
6680 extern int __attribute__((warn_unused_result)) kobject_add(struct kobject *kobj,
6681 struct kobject *parent,
6682 const char *fmt, ...);
6683 extern int __attribute__((warn_unused_result)) kobject_init_and_add(struct kobject *kobj,
6684 struct kobj_type *ktype,
6685 struct kobject *parent,
6686 const char *fmt, ...);
6687
6688 extern void kobject_del(struct kobject *kobj);
6689
6690 extern struct kobject * __attribute__((warn_unused_result)) kobject_create(void);
6691 extern struct kobject * __attribute__((warn_unused_result)) kobject_create_and_add(const char *name,
6692 struct kobject *parent);
6693
6694 extern int __attribute__((warn_unused_result)) kobject_rename(struct kobject *, const char *new_name);
6695 extern int __attribute__((warn_unused_result)) kobject_move(struct kobject *, struct kobject *);
6696
6697 extern struct kobject *kobject_get(struct kobject *kobj);
6698 extern void kobject_put(struct kobject *kobj);
6699
6700 extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
6701
6702 struct kobj_type {
6703 void (*release)(struct kobject *kobj);
6704 struct sysfs_ops *sysfs_ops;
6705 struct attribute **default_attrs;
6706 };
6707
6708 struct kobj_uevent_env {
6709 char *envp[32];
6710 int envp_idx;
6711 char buf[2048];
6712 int buflen;
6713 };
6714
6715 struct kset_uevent_ops {
6716 int (*filter)(struct kset *kset, struct kobject *kobj);
6717 const char *(*name)(struct kset *kset, struct kobject *kobj);
6718 int (*uevent)(struct kset *kset, struct kobject *kobj,
6719 struct kobj_uevent_env *env);
6720 };
6721
6722 struct kobj_attribute {
6723 struct attribute attr;
6724 ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
6725 char *buf);
6726 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
6727 const char *buf, size_t count);
6728 };
6729
6730 extern struct sysfs_ops kobj_sysfs_ops;
6731 # 151 "include/linux/kobject.h"
6732 struct kset {
6733 struct list_head list;
6734 spinlock_t list_lock;
6735 struct kobject kobj;
6736 struct kset_uevent_ops *uevent_ops;
6737 };
6738
6739 extern void kset_init(struct kset *kset);
6740 extern int __attribute__((warn_unused_result)) kset_register(struct kset *kset);
6741 extern void kset_unregister(struct kset *kset);
6742 extern struct kset * __attribute__((warn_unused_result)) kset_create_and_add(const char *name,
6743 struct kset_uevent_ops *u,
6744 struct kobject *parent_kobj);
6745
6746 static inline __attribute__((always_inline)) struct kset *to_kset(struct kobject *kobj)
6747 {
6748 return kobj ? ({ const typeof( ((struct kset *)0)->kobj ) *__mptr = (kobj); (struct kset *)( (char *)__mptr - __builtin_offsetof(struct kset,kobj) );}) : ((void *)0);
6749 }
6750
6751 static inline __attribute__((always_inline)) struct kset *kset_get(struct kset *k)
6752 {
6753 return k ? to_kset(kobject_get(&k->kobj)) : ((void *)0);
6754 }
6755
6756 static inline __attribute__((always_inline)) void kset_put(struct kset *k)
6757 {
6758 kobject_put(&k->kobj);
6759 }
6760
6761 static inline __attribute__((always_inline)) struct kobj_type *get_ktype(struct kobject *kobj)
6762 {
6763 return kobj->ktype;
6764 }
6765
6766 extern struct kobject *kset_find_obj(struct kset *, const char *);
6767
6768
6769 extern struct kobject *kernel_kobj;
6770
6771 extern struct kobject *mm_kobj;
6772
6773 extern struct kobject *hypervisor_kobj;
6774
6775 extern struct kobject *power_kobj;
6776
6777 extern struct kobject *firmware_kobj;
6778 # 209 "include/linux/kobject.h"
6779 static inline __attribute__((always_inline)) int kobject_uevent(struct kobject *kobj,
6780 enum kobject_action action)
6781 { return 0; }
6782 static inline __attribute__((always_inline)) int kobject_uevent_env(struct kobject *kobj,
6783 enum kobject_action action,
6784 char *envp[])
6785 { return 0; }
6786
6787 static inline __attribute__((always_inline)) int add_uevent_var(struct kobj_uevent_env *env,
6788 const char *format, ...)
6789 { return 0; }
6790
6791 static inline __attribute__((always_inline)) int kobject_action_type(const char *buf, size_t count,
6792 enum kobject_action *type)
6793 { return -22; }
6794 # 13 "include/linux/slub_def.h" 2
6795
6796 enum stat_item {
6797 ALLOC_FASTPATH,
6798 ALLOC_SLOWPATH,
6799 FREE_FASTPATH,
6800 FREE_SLOWPATH,
6801 FREE_FROZEN,
6802 FREE_ADD_PARTIAL,
6803 FREE_REMOVE_PARTIAL,
6804 ALLOC_FROM_PARTIAL,
6805 ALLOC_SLAB,
6806 ALLOC_REFILL,
6807 FREE_SLAB,
6808 CPUSLAB_FLUSH,
6809 DEACTIVATE_FULL,
6810 DEACTIVATE_EMPTY,
6811 DEACTIVATE_TO_HEAD,
6812 DEACTIVATE_TO_TAIL,
6813 DEACTIVATE_REMOTE_FREES,
6814 ORDER_FALLBACK,
6815 NR_SLUB_STAT_ITEMS };
6816
6817 struct kmem_cache_cpu {
6818 void **freelist;
6819 struct page *page;
6820 int node;
6821 unsigned int offset;
6822 unsigned int objsize;
6823
6824
6825
6826 };
6827
6828 struct kmem_cache_node {
6829 spinlock_t list_lock;
6830 unsigned long nr_partial;
6831 unsigned long min_partial;
6832 struct list_head partial;
6833
6834
6835
6836
6837
6838 };
6839
6840
6841
6842
6843
6844
6845 struct kmem_cache_order_objects {
6846 unsigned long x;
6847 };
6848
6849
6850
6851
6852 struct kmem_cache {
6853
6854 unsigned long flags;
6855 int size;
6856 int objsize;
6857 int offset;
6858 struct kmem_cache_order_objects oo;
6859
6860
6861
6862
6863
6864 struct kmem_cache_node local_node;
6865
6866
6867 struct kmem_cache_order_objects max;
6868 struct kmem_cache_order_objects min;
6869 gfp_t allocflags;
6870 int refcount;
6871 void (*ctor)(void *);
6872 int inuse;
6873 int align;
6874 const char *name;
6875 struct list_head list;
6876 # 108 "include/linux/slub_def.h"
6877 struct kmem_cache_cpu cpu_slab;
6878
6879 };
6880 # 127 "include/linux/slub_def.h"
6881 extern struct kmem_cache kmalloc_caches[12 + 1];
6882
6883
6884
6885
6886
6887 static inline __attribute__((always_inline)) __attribute__((always_inline)) int kmalloc_index(size_t size)
6888 {
6889 if (!size)
6890 return 0;
6891
6892 if (size <= 8)
6893 return ( __builtin_constant_p(8) ? ( (8) < 1 ? ____ilog2_NaN() : (8) & (1ULL << 63) ? 63 : (8) & (1ULL << 62) ? 62 : (8) & (1ULL << 61) ? 61 : (8) & (1ULL << 60) ? 60 : (8) & (1ULL << 59) ? 59 : (8) & (1ULL << 58) ? 58 : (8) & (1ULL << 57) ? 57 : (8) & (1ULL << 56) ? 56 : (8) & (1ULL << 55) ? 55 : (8) & (1ULL << 54) ? 54 : (8) & (1ULL << 53) ? 53 : (8) & (1ULL << 52) ? 52 : (8) & (1ULL << 51) ? 51 : (8) & (1ULL << 50) ? 50 : (8) & (1ULL << 49) ? 49 : (8) & (1ULL << 48) ? 48 : (8) & (1ULL << 47) ? 47 : (8) & (1ULL << 46) ? 46 : (8) & (1ULL << 45) ? 45 : (8) & (1ULL << 44) ? 44 : (8) & (1ULL << 43) ? 43 : (8) & (1ULL << 42) ? 42 : (8) & (1ULL << 41) ? 41 : (8) & (1ULL << 40) ? 40 : (8) & (1ULL << 39) ? 39 : (8) & (1ULL << 38) ? 38 : (8) & (1ULL << 37) ? 37 : (8) & (1ULL << 36) ? 36 : (8) & (1ULL << 35) ? 35 : (8) & (1ULL << 34) ? 34 : (8) & (1ULL << 33) ? 33 : (8) & (1ULL << 32) ? 32 : (8) & (1ULL << 31) ? 31 : (8) & (1ULL << 30) ? 30 : (8) & (1ULL << 29) ? 29 : (8) & (1ULL << 28) ? 28 : (8) & (1ULL << 27) ? 27 : (8) & (1ULL << 26) ? 26 : (8) & (1ULL << 25) ? 25 : (8) & (1ULL << 24) ? 24 : (8) & (1ULL << 23) ? 23 : (8) & (1ULL << 22) ? 22 : (8) & (1ULL << 21) ? 21 : (8) & (1ULL << 20) ? 20 : (8) & (1ULL << 19) ? 19 : (8) & (1ULL << 18) ? 18 : (8) & (1ULL << 17) ? 17 : (8) & (1ULL << 16) ? 16 : (8) & (1ULL << 15) ? 15 : (8) & (1ULL << 14) ? 14 : (8) & (1ULL << 13) ? 13 : (8) & (1ULL << 12) ? 12 : (8) & (1ULL << 11) ? 11 : (8) & (1ULL << 10) ? 10 : (8) & (1ULL << 9) ? 9 : (8) & (1ULL << 8) ? 8 : (8) & (1ULL << 7) ? 7 : (8) & (1ULL << 6) ? 6 : (8) & (1ULL << 5) ? 5 : (8) & (1ULL << 4) ? 4 : (8) & (1ULL << 3) ? 3 : (8) & (1ULL << 2) ? 2 : (8) & (1ULL << 1) ? 1 : (8) & (1ULL << 0) ? 0 : ____ilog2_NaN() ) : (sizeof(8) <= 4) ? __ilog2_u32(8) : __ilog2_u64(8) );
6894
6895
6896 if (size > 64 && size <= 96)
6897 return 1;
6898 if (size > 128 && size <= 192)
6899 return 2;
6900
6901 if (size <= 8) return 3;
6902 if (size <= 16) return 4;
6903 if (size <= 32) return 5;
6904 if (size <= 64) return 6;
6905 if (size <= 128) return 7;
6906 if (size <= 256) return 8;
6907 if (size <= 512) return 9;
6908 if (size <= 1024) return 10;
6909 if (size <= 2 * 1024) return 11;
6910 if (size <= 4 * 1024) return 12;
6911
6912
6913
6914
6915 if (size <= 8 * 1024) return 13;
6916 if (size <= 16 * 1024) return 14;
6917 if (size <= 32 * 1024) return 15;
6918 if (size <= 64 * 1024) return 16;
6919 if (size <= 128 * 1024) return 17;
6920 if (size <= 256 * 1024) return 18;
6921 if (size <= 512 * 1024) return 19;
6922 if (size <= 1024 * 1024) return 20;
6923 if (size <= 2 * 1024 * 1024) return 21;
6924 return -1;
6925 # 179 "include/linux/slub_def.h"
6926 }
6927
6928
6929
6930
6931
6932
6933
6934 static inline __attribute__((always_inline)) __attribute__((always_inline)) struct kmem_cache *kmalloc_slab(size_t size)
6935 {
6936 int index = kmalloc_index(size);
6937
6938 if (index == 0)
6939 return ((void *)0);
6940
6941 return &kmalloc_caches[index];
6942 }
6943 # 204 "include/linux/slub_def.h"
6944 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
6945 void *__kmalloc(size_t size, gfp_t flags);
6946
6947 static inline __attribute__((always_inline)) __attribute__((always_inline)) void *kmalloc_large(size_t size, gfp_t flags)
6948 {
6949 return (void *)__get_free_pages(flags | (( gfp_t)0x4000u), get_order(size));
6950 }
6951
6952 static inline __attribute__((always_inline)) __attribute__((always_inline)) void *kmalloc(size_t size, gfp_t flags)
6953 {
6954 if (__builtin_constant_p(size)) {
6955 if (size > (1UL << 12))
6956 return kmalloc_large(size, flags);
6957
6958 if (!(flags & (( gfp_t)0x01u))) {
6959 struct kmem_cache *s = kmalloc_slab(size);
6960
6961 if (!s)
6962 return ((void *)16);
6963
6964 return kmem_cache_alloc(s, flags);
6965 }
6966 }
6967 return __kmalloc(size, flags);
6968 }
6969 # 153 "include/linux/slab.h" 2
6970 # 210 "include/linux/slab.h"
6971 static inline __attribute__((always_inline)) void *kcalloc(size_t n, size_t size, gfp_t flags)
6972 {
6973 if (size != 0 && n > (~0UL) / size)
6974 return ((void *)0);
6975 return __kmalloc(n * size, flags | (( gfp_t)0x8000u));
6976 }
6977 # 228 "include/linux/slab.h"
6978 static inline __attribute__((always_inline)) void *kmalloc_node(size_t size, gfp_t flags, int node)
6979 {
6980 return kmalloc(size, flags);
6981 }
6982
6983 static inline __attribute__((always_inline)) void *__kmalloc_node(size_t size, gfp_t flags, int node)
6984 {
6985 return __kmalloc(size, flags);
6986 }
6987
6988 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
6989
6990 static inline __attribute__((always_inline)) void *kmem_cache_alloc_node(struct kmem_cache *cachep,
6991 gfp_t flags, int node)
6992 {
6993 return kmem_cache_alloc(cachep, flags);
6994 }
6995 # 256 "include/linux/slab.h"
6996 extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
6997 # 293 "include/linux/slab.h"
6998 static inline __attribute__((always_inline)) void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
6999 {
7000 return kmem_cache_alloc(k, flags | (( gfp_t)0x8000u));
7001 }
7002
7003
7004
7005
7006
7007
7008 static inline __attribute__((always_inline)) void *kzalloc(size_t size, gfp_t flags)
7009 {
7010 return kmalloc(size, flags | (( gfp_t)0x8000u));
7011 }
7012
7013
7014
7015
7016
7017
7018
7019 static inline __attribute__((always_inline)) void *kzalloc_node(size_t size, gfp_t flags, int node)
7020 {
7021 return kmalloc_node(size, flags | (( gfp_t)0x8000u), node);
7022 }
7023 # 6 "include/linux/percpu.h" 2
7024
7025
7026
7027 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/percpu.h" 1
7028
7029
7030
7031 # 1 "include/asm-generic/percpu.h" 1
7032 # 5 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/percpu.h" 2
7033 # 10 "include/linux/percpu.h" 2
7034 # 91 "include/linux/percpu.h"
7035 static inline __attribute__((always_inline)) __attribute__((always_inline)) void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
7036 {
7037 return kzalloc(size, gfp);
7038 }
7039
7040 static inline __attribute__((always_inline)) void percpu_free(void *__pdata)
7041 {
7042 kfree(__pdata);
7043 }
7044 # 40 "include/linux/rcupdate.h" 2
7045
7046
7047
7048 # 1 "include/linux/completion.h" 1
7049 # 25 "include/linux/completion.h"
7050 struct completion {
7051 unsigned int done;
7052 wait_queue_head_t wait;
7053 };
7054 # 73 "include/linux/completion.h"
7055 static inline __attribute__((always_inline)) void init_completion(struct completion *x)
7056 {
7057 x->done = 0;
7058 init_waitqueue_head(&x->wait);
7059 }
7060
7061 extern void wait_for_completion(struct completion *);
7062 extern int wait_for_completion_interruptible(struct completion *x);
7063 extern int wait_for_completion_killable(struct completion *x);
7064 extern unsigned long wait_for_completion_timeout(struct completion *x,
7065 unsigned long timeout);
7066 extern unsigned long wait_for_completion_interruptible_timeout(
7067 struct completion *x, unsigned long timeout);
7068 extern bool try_wait_for_completion(struct completion *x);
7069 extern bool completion_done(struct completion *x);
7070
7071 extern void complete(struct completion *);
7072 extern void complete_all(struct completion *);
7073 # 44 "include/linux/rcupdate.h" 2
7074
7075
7076
7077
7078
7079
7080 struct rcu_head {
7081 struct rcu_head *next;
7082 void (*func)(struct rcu_head *head);
7083 };
7084
7085
7086 # 1 "include/linux/rcuclassic.h" 1
7087 # 49 "include/linux/rcuclassic.h"
7088 struct rcu_ctrlblk {
7089 long cur;
7090 long completed;
7091 long pending;
7092
7093 unsigned long gp_start;
7094 unsigned long jiffies_stall;
7095
7096
7097
7098 int signaled;
7099
7100 spinlock_t lock ;
7101 cpumask_t cpumask;
7102
7103 } ;
7104
7105
7106 static inline __attribute__((always_inline)) int rcu_batch_before(long a, long b)
7107 {
7108 return (a - b) < 0;
7109 }
7110
7111
7112 static inline __attribute__((always_inline)) int rcu_batch_after(long a, long b)
7113 {
7114 return (a - b) > 0;
7115 }
7116
7117
7118 struct rcu_data {
7119
7120 long quiescbatch;
7121 int passed_quiesc;
7122 int qs_pending;
7123 # 100 "include/linux/rcuclassic.h"
7124 long batch;
7125 struct rcu_head *nxtlist;
7126 struct rcu_head **nxttail[3];
7127 long qlen;
7128 struct rcu_head *donelist;
7129 struct rcu_head **donetail;
7130 long blimit;
7131 int cpu;
7132 struct rcu_head barrier;
7133 };
7134
7135 extern __typeof__(struct rcu_data) per_cpu__rcu_data;
7136 extern __typeof__(struct rcu_data) per_cpu__rcu_bh_data;
7137
7138
7139
7140
7141
7142
7143
7144 static inline __attribute__((always_inline)) void rcu_qsctr_inc(int cpu)
7145 {
7146 struct rcu_data *rdp = &(*((void)(cpu), &per_cpu__rcu_data));
7147 rdp->passed_quiesc = 1;
7148 }
7149 static inline __attribute__((always_inline)) void rcu_bh_qsctr_inc(int cpu)
7150 {
7151 struct rcu_data *rdp = &(*((void)(cpu), &per_cpu__rcu_bh_data));
7152 rdp->passed_quiesc = 1;
7153 }
7154
7155 extern int rcu_pending(int cpu);
7156 extern int rcu_needs_cpu(int cpu);
7157 # 173 "include/linux/rcuclassic.h"
7158 extern void __rcu_init(void);
7159
7160 extern void rcu_check_callbacks(int cpu, int user);
7161 extern void rcu_restart_cpu(int cpu);
7162
7163 extern long rcu_batches_completed(void);
7164 extern long rcu_batches_completed_bh(void);
7165 # 57 "include/linux/rcupdate.h" 2
7166 # 194 "include/linux/rcupdate.h"
7167 struct rcu_synchronize {
7168 struct rcu_head head;
7169 struct completion completion;
7170 };
7171
7172 extern void wakeme_after_rcu(struct rcu_head *head);
7173 # 242 "include/linux/rcupdate.h"
7174 extern void call_rcu(struct rcu_head *head,
7175 void (*func)(struct rcu_head *head));
7176 # 263 "include/linux/rcupdate.h"
7177 extern void call_rcu_bh(struct rcu_head *head,
7178 void (*func)(struct rcu_head *head));
7179
7180
7181 extern void synchronize_rcu(void);
7182 extern void rcu_barrier(void);
7183 extern void rcu_barrier_bh(void);
7184 extern void rcu_barrier_sched(void);
7185
7186
7187 extern void rcu_init(void);
7188 extern int rcu_needs_cpu(int cpu);
7189 # 11 "include/linux/rculist.h" 2
7190
7191
7192
7193
7194
7195
7196
7197 static inline __attribute__((always_inline)) void __list_add_rcu(struct list_head *new,
7198 struct list_head *prev, struct list_head *next)
7199 {
7200 new->next = next;
7201 new->prev = prev;
7202 ({ if (!__builtin_constant_p(new) || ((new) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (prev->next) = (new); });
7203 next->prev = new;
7204 }
7205 # 43 "include/linux/rculist.h"
7206 static inline __attribute__((always_inline)) void list_add_rcu(struct list_head *new, struct list_head *head)
7207 {
7208 __list_add_rcu(new, head, head->next);
7209 }
7210 # 64 "include/linux/rculist.h"
7211 static inline __attribute__((always_inline)) void list_add_tail_rcu(struct list_head *new,
7212 struct list_head *head)
7213 {
7214 __list_add_rcu(new, head->prev, head);
7215 }
7216 # 94 "include/linux/rculist.h"
7217 static inline __attribute__((always_inline)) void list_del_rcu(struct list_head *entry)
7218 {
7219 __list_del(entry->prev, entry->next);
7220 entry->prev = ((void *) 0x00200200);
7221 }
7222 # 120 "include/linux/rculist.h"
7223 static inline __attribute__((always_inline)) void hlist_del_init_rcu(struct hlist_node *n)
7224 {
7225 if (!hlist_unhashed(n)) {
7226 __hlist_del(n);
7227 n->pprev = ((void *)0);
7228 }
7229 }
7230 # 136 "include/linux/rculist.h"
7231 static inline __attribute__((always_inline)) void list_replace_rcu(struct list_head *old,
7232 struct list_head *new)
7233 {
7234 new->next = old->next;
7235 new->prev = old->prev;
7236 ({ if (!__builtin_constant_p(new) || ((new) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (new->prev->next) = (new); });
7237 new->next->prev = new;
7238 old->prev = ((void *) 0x00200200);
7239 }
7240 # 163 "include/linux/rculist.h"
7241 static inline __attribute__((always_inline)) void list_splice_init_rcu(struct list_head *list,
7242 struct list_head *head,
7243 void (*sync)(void))
7244 {
7245 struct list_head *first = list->next;
7246 struct list_head *last = list->prev;
7247 struct list_head *at = head->next;
7248
7249 if (list_empty(head))
7250 return;
7251
7252
7253
7254 INIT_LIST_HEAD(list);
7255 # 185 "include/linux/rculist.h"
7256 sync();
7257 # 195 "include/linux/rculist.h"
7258 last->next = at;
7259 ({ if (!__builtin_constant_p(first) || ((first) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (head->next) = (first); });
7260 first->prev = head;
7261 at->prev = last;
7262 }
7263 # 257 "include/linux/rculist.h"
7264 static inline __attribute__((always_inline)) void hlist_del_rcu(struct hlist_node *n)
7265 {
7266 __hlist_del(n);
7267 n->pprev = ((void *) 0x00200200);
7268 }
7269 # 270 "include/linux/rculist.h"
7270 static inline __attribute__((always_inline)) void hlist_replace_rcu(struct hlist_node *old,
7271 struct hlist_node *new)
7272 {
7273 struct hlist_node *next = old->next;
7274
7275 new->next = next;
7276 new->pprev = old->pprev;
7277 ({ if (!__builtin_constant_p(new) || ((new) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (*new->pprev) = (new); });
7278 if (next)
7279 new->next->pprev = &new->next;
7280 old->pprev = ((void *) 0x00200200);
7281 }
7282 # 302 "include/linux/rculist.h"
7283 static inline __attribute__((always_inline)) void hlist_add_head_rcu(struct hlist_node *n,
7284 struct hlist_head *h)
7285 {
7286 struct hlist_node *first = h->first;
7287
7288 n->next = first;
7289 n->pprev = &h->first;
7290 ({ if (!__builtin_constant_p(n) || ((n) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (h->first) = (n); });
7291 if (first)
7292 first->pprev = &n->next;
7293 }
7294 # 332 "include/linux/rculist.h"
7295 static inline __attribute__((always_inline)) void hlist_add_before_rcu(struct hlist_node *n,
7296 struct hlist_node *next)
7297 {
7298 n->pprev = next->pprev;
7299 n->next = next;
7300 ({ if (!__builtin_constant_p(n) || ((n) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (*(n->pprev)) = (n); });
7301 next->pprev = &n->next;
7302 }
7303 # 359 "include/linux/rculist.h"
7304 static inline __attribute__((always_inline)) void hlist_add_after_rcu(struct hlist_node *prev,
7305 struct hlist_node *n)
7306 {
7307 n->next = prev->next;
7308 n->pprev = &prev->next;
7309 ({ if (!__builtin_constant_p(n) || ((n) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (prev->next) = (n); });
7310 if (n->next)
7311 n->next->pprev = &n->next;
7312 }
7313 # 7 "include/linux/dcache.h" 2
7314
7315
7316
7317
7318 struct nameidata;
7319 struct path;
7320 struct vfsmount;
7321 # 33 "include/linux/dcache.h"
7322 struct qstr {
7323 unsigned int hash;
7324 unsigned int len;
7325 const unsigned char *name;
7326 };
7327
7328 struct dentry_stat_t {
7329 int nr_dentry;
7330 int nr_unused;
7331 int age_limit;
7332 int want_pages;
7333 int dummy[2];
7334 };
7335 extern struct dentry_stat_t dentry_stat;
7336
7337
7338
7339
7340
7341
7342 static inline __attribute__((always_inline)) unsigned long
7343 partial_name_hash(unsigned long c, unsigned long prevhash)
7344 {
7345 return (prevhash + (c << 4) + (c >> 4)) * 11;
7346 }
7347
7348
7349
7350
7351
7352 static inline __attribute__((always_inline)) unsigned long end_name_hash(unsigned long hash)
7353 {
7354 return (unsigned int) hash;
7355 }
7356
7357
7358 static inline __attribute__((always_inline)) unsigned int
7359 full_name_hash(const unsigned char *name, unsigned int len)
7360 {
7361 unsigned long hash = 0;
7362 while (len--)
7363 hash = partial_name_hash(*name++, hash);
7364 return end_name_hash(hash);
7365 }
7366
7367 struct dcookie_struct;
7368
7369
7370
7371 struct dentry {
7372 atomic_t d_count;
7373 unsigned int d_flags;
7374 spinlock_t d_lock;
7375 struct inode *d_inode;
7376
7377
7378
7379
7380
7381 struct hlist_node d_hash;
7382 struct dentry *d_parent;
7383 struct qstr d_name;
7384
7385 struct list_head d_lru;
7386
7387
7388
7389 union {
7390 struct list_head d_child;
7391 struct rcu_head d_rcu;
7392 } d_u;
7393 struct list_head d_subdirs;
7394 struct list_head d_alias;
7395 unsigned long d_time;
7396 struct dentry_operations *d_op;
7397 struct super_block *d_sb;
7398 void *d_fsdata;
7399
7400 struct dcookie_struct *d_cookie;
7401
7402 int d_mounted;
7403 unsigned char d_iname[36];
7404 };
7405
7406
7407
7408
7409
7410
7411
7412 enum dentry_d_lock_class
7413 {
7414 DENTRY_D_LOCK_NORMAL,
7415 DENTRY_D_LOCK_NESTED
7416 };
7417
7418 struct dentry_operations {
7419 int (*d_revalidate)(struct dentry *, struct nameidata *);
7420 int (*d_hash) (struct dentry *, struct qstr *);
7421 int (*d_compare) (struct dentry *, struct qstr *, struct qstr *);
7422 int (*d_delete)(struct dentry *);
7423 void (*d_release)(struct dentry *);
7424 void (*d_iput)(struct dentry *, struct inode *);
7425 char *(*d_dname)(struct dentry *, char *, int);
7426 };
7427 # 180 "include/linux/dcache.h"
7428 extern spinlock_t dcache_lock;
7429 extern seqlock_t rename_lock;
7430 # 199 "include/linux/dcache.h"
7431 static inline __attribute__((always_inline)) void __d_drop(struct dentry *dentry)
7432 {
7433 if (!(dentry->d_flags & 0x0010)) {
7434 dentry->d_flags |= 0x0010;
7435 hlist_del_rcu(&dentry->d_hash);
7436 }
7437 }
7438
7439 static inline __attribute__((always_inline)) void d_drop(struct dentry *dentry)
7440 {
7441 _spin_lock(&dcache_lock);
7442 _spin_lock(&dentry->d_lock);
7443 __d_drop(dentry);
7444 _spin_unlock(&dentry->d_lock);
7445 _spin_unlock(&dcache_lock);
7446 }
7447
7448 static inline __attribute__((always_inline)) int dname_external(struct dentry *dentry)
7449 {
7450 return dentry->d_name.name != dentry->d_iname;
7451 }
7452
7453
7454
7455
7456 extern void d_instantiate(struct dentry *, struct inode *);
7457 extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
7458 extern struct dentry * d_materialise_unique(struct dentry *, struct inode *);
7459 extern void d_delete(struct dentry *);
7460
7461
7462 extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
7463 extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
7464 extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
7465 extern struct dentry * d_obtain_alias(struct inode *);
7466 extern void shrink_dcache_sb(struct super_block *);
7467 extern void shrink_dcache_parent(struct dentry *);
7468 extern void shrink_dcache_for_umount(struct super_block *);
7469 extern int d_invalidate(struct dentry *);
7470
7471
7472 extern struct dentry * d_alloc_root(struct inode *);
7473
7474
7475 extern void d_genocide(struct dentry *);
7476
7477 extern struct dentry *d_find_alias(struct inode *);
7478 extern void d_prune_aliases(struct inode *);
7479
7480
7481 extern int have_submounts(struct dentry *);
7482
7483
7484
7485
7486 extern void d_rehash(struct dentry *);
7487 # 265 "include/linux/dcache.h"
7488 static inline __attribute__((always_inline)) void d_add(struct dentry *entry, struct inode *inode)
7489 {
7490 d_instantiate(entry, inode);
7491 d_rehash(entry);
7492 }
7493 # 279 "include/linux/dcache.h"
7494 static inline __attribute__((always_inline)) struct dentry *d_add_unique(struct dentry *entry, struct inode *inode)
7495 {
7496 struct dentry *res;
7497
7498 res = d_instantiate_unique(entry, inode);
7499 d_rehash(res != ((void *)0) ? res : entry);
7500 return res;
7501 }
7502
7503
7504 extern void d_move(struct dentry *, struct dentry *);
7505 extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
7506
7507
7508 extern struct dentry * d_lookup(struct dentry *, struct qstr *);
7509 extern struct dentry * __d_lookup(struct dentry *, struct qstr *);
7510 extern struct dentry * d_hash_and_lookup(struct dentry *, struct qstr *);
7511
7512
7513 extern int d_validate(struct dentry *, struct dentry *);
7514
7515
7516
7517
7518 extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
7519
7520 extern char *__d_path(const struct path *path, struct path *root, char *, int);
7521 extern char *d_path(const struct path *, char *, int);
7522 extern char *dentry_path(struct dentry *, char *, int);
7523 # 324 "include/linux/dcache.h"
7524 static inline __attribute__((always_inline)) struct dentry *dget(struct dentry *dentry)
7525 {
7526 if (dentry) {
7527 do { if (__builtin_expect(!!(!((&dentry->d_count)->counter)), 0)) do { dump_bfin_trace_buffer(); printk("<0>" "BUG: failure at %s:%d/%s()!\n", "include/linux/dcache.h", 327, __func__); panic("BUG!"); } while (0); } while(0);
7528 atomic_inc(&dentry->d_count);
7529 }
7530 return dentry;
7531 }
7532
7533 extern struct dentry * dget_locked(struct dentry *);
7534 # 342 "include/linux/dcache.h"
7535 static inline __attribute__((always_inline)) int d_unhashed(struct dentry *dentry)
7536 {
7537 return (dentry->d_flags & 0x0010);
7538 }
7539
7540 static inline __attribute__((always_inline)) struct dentry *dget_parent(struct dentry *dentry)
7541 {
7542 struct dentry *ret;
7543
7544 _spin_lock(&dentry->d_lock);
7545 ret = dget(dentry->d_parent);
7546 _spin_unlock(&dentry->d_lock);
7547 return ret;
7548 }
7549
7550 extern void dput(struct dentry *);
7551
7552 static inline __attribute__((always_inline)) int d_mountpoint(struct dentry *dentry)
7553 {
7554 return dentry->d_mounted;
7555 }
7556
7557 extern struct vfsmount *lookup_mnt(struct vfsmount *, struct dentry *);
7558 extern struct dentry *lookup_create(struct nameidata *nd, int is_dir);
7559
7560 extern int sysctl_vfs_cache_pressure;
7561 # 292 "include/linux/fs.h" 2
7562 # 1 "include/linux/path.h" 1
7563
7564
7565
7566 struct dentry;
7567 struct vfsmount;
7568
7569 struct path {
7570 struct vfsmount *mnt;
7571 struct dentry *dentry;
7572 };
7573
7574 extern void path_get(struct path *);
7575 extern void path_put(struct path *);
7576 # 293 "include/linux/fs.h" 2
7577 # 1 "include/linux/stat.h" 1
7578
7579
7580
7581
7582
7583 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/stat.h" 1
7584
7585
7586
7587 struct stat {
7588 unsigned short st_dev;
7589 unsigned short __pad1;
7590 unsigned long st_ino;
7591 unsigned short st_mode;
7592 unsigned short st_nlink;
7593 unsigned short st_uid;
7594 unsigned short st_gid;
7595 unsigned short st_rdev;
7596 unsigned short __pad2;
7597 unsigned long st_size;
7598 unsigned long st_blksize;
7599 unsigned long st_blocks;
7600 unsigned long st_atime;
7601 unsigned long __unused1;
7602 unsigned long st_mtime;
7603 unsigned long __unused2;
7604 unsigned long st_ctime;
7605 unsigned long __unused3;
7606 unsigned long __unused4;
7607 unsigned long __unused5;
7608 };
7609
7610
7611
7612
7613 struct stat64 {
7614 unsigned long long st_dev;
7615 unsigned char __pad1[4];
7616
7617
7618 unsigned long __st_ino;
7619
7620 unsigned int st_mode;
7621 unsigned int st_nlink;
7622
7623 unsigned long st_uid;
7624 unsigned long st_gid;
7625
7626 unsigned long long st_rdev;
7627 unsigned char __pad2[4];
7628
7629 long long st_size;
7630 unsigned long st_blksize;
7631
7632 long long st_blocks;
7633
7634 unsigned long st_atime;
7635 unsigned long st_atime_nsec;
7636
7637 unsigned long st_mtime;
7638 unsigned long st_mtime_nsec;
7639
7640 unsigned long st_ctime;
7641 unsigned long st_ctime_nsec;
7642
7643 unsigned long long st_ino;
7644 };
7645 # 7 "include/linux/stat.h" 2
7646 # 62 "include/linux/stat.h"
7647 struct kstat {
7648 u64 ino;
7649 dev_t dev;
7650 umode_t mode;
7651 unsigned int nlink;
7652 uid_t uid;
7653 gid_t gid;
7654 dev_t rdev;
7655 loff_t size;
7656 struct timespec atime;
7657 struct timespec mtime;
7658 struct timespec ctime;
7659 unsigned long blksize;
7660 unsigned long long blocks;
7661 };
7662 # 294 "include/linux/fs.h" 2
7663
7664
7665
7666 # 1 "include/linux/radix-tree.h" 1
7667 # 41 "include/linux/radix-tree.h"
7668 static inline __attribute__((always_inline)) void *radix_tree_ptr_to_indirect(void *ptr)
7669 {
7670 return (void *)((unsigned long)ptr | 1);
7671 }
7672
7673 static inline __attribute__((always_inline)) void *radix_tree_indirect_to_ptr(void *ptr)
7674 {
7675 return (void *)((unsigned long)ptr & ~1);
7676 }
7677
7678 static inline __attribute__((always_inline)) int radix_tree_is_indirect_ptr(void *ptr)
7679 {
7680 return (int)((unsigned long)ptr & 1);
7681 }
7682
7683
7684
7685
7686
7687
7688 struct radix_tree_root {
7689 unsigned int height;
7690 gfp_t gfp_mask;
7691 struct radix_tree_node *rnode;
7692 };
7693 # 137 "include/linux/radix-tree.h"
7694 static inline __attribute__((always_inline)) void *radix_tree_deref_slot(void **pslot)
7695 {
7696 void *ret = *pslot;
7697 if (__builtin_expect(!!(radix_tree_is_indirect_ptr(ret)), 0))
7698 ret = ((void *)-1UL);
7699 return ret;
7700 }
7701 # 152 "include/linux/radix-tree.h"
7702 static inline __attribute__((always_inline)) void radix_tree_replace_slot(void **pslot, void *item)
7703 {
7704 do { if (__builtin_expect(!!(radix_tree_is_indirect_ptr(item)), 0)) do { dump_bfin_trace_buffer(); printk("<0>" "BUG: failure at %s:%d/%s()!\n", "include/linux/radix-tree.h", 154, __func__); panic("BUG!"); } while (0); } while(0);
7705 ({ if (!__builtin_constant_p(item) || ((item) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (*pslot) = (item); });
7706 }
7707
7708 int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
7709 void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
7710 void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
7711 void *radix_tree_delete(struct radix_tree_root *, unsigned long);
7712 unsigned int
7713 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
7714 unsigned long first_index, unsigned int max_items);
7715 unsigned int
7716 radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
7717 unsigned long first_index, unsigned int max_items);
7718 unsigned long radix_tree_next_hole(struct radix_tree_root *root,
7719 unsigned long index, unsigned long max_scan);
7720 int radix_tree_preload(gfp_t gfp_mask);
7721 void radix_tree_init(void);
7722 void *radix_tree_tag_set(struct radix_tree_root *root,
7723 unsigned long index, unsigned int tag);
7724 void *radix_tree_tag_clear(struct radix_tree_root *root,
7725 unsigned long index, unsigned int tag);
7726 int radix_tree_tag_get(struct radix_tree_root *root,
7727 unsigned long index, unsigned int tag);
7728 unsigned int
7729 radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
7730 unsigned long first_index, unsigned int max_items,
7731 unsigned int tag);
7732 unsigned int
7733 radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
7734 unsigned long first_index, unsigned int max_items,
7735 unsigned int tag);
7736 int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
7737
7738 static inline __attribute__((always_inline)) void radix_tree_preload_end(void)
7739 {
7740 do { } while (0);
7741 }
7742 # 298 "include/linux/fs.h" 2
7743 # 1 "include/linux/prio_tree.h" 1
7744 # 14 "include/linux/prio_tree.h"
7745 struct raw_prio_tree_node {
7746 struct prio_tree_node *left;
7747 struct prio_tree_node *right;
7748 struct prio_tree_node *parent;
7749 };
7750
7751 struct prio_tree_node {
7752 struct prio_tree_node *left;
7753 struct prio_tree_node *right;
7754 struct prio_tree_node *parent;
7755 unsigned long start;
7756 unsigned long last;
7757 };
7758
7759 struct prio_tree_root {
7760 struct prio_tree_node *prio_tree_node;
7761 unsigned short index_bits;
7762 unsigned short raw;
7763
7764
7765
7766
7767 };
7768
7769 struct prio_tree_iter {
7770 struct prio_tree_node *cur;
7771 unsigned long mask;
7772 unsigned long value;
7773 int size_level;
7774
7775 struct prio_tree_root *root;
7776 unsigned long r_index;
7777 unsigned long h_index;
7778 };
7779
7780 static inline __attribute__((always_inline)) void prio_tree_iter_init(struct prio_tree_iter *iter,
7781 struct prio_tree_root *root, unsigned long r_index, unsigned long h_index)
7782 {
7783 iter->root = root;
7784 iter->r_index = r_index;
7785 iter->h_index = h_index;
7786 iter->cur = ((void *)0);
7787 }
7788 # 84 "include/linux/prio_tree.h"
7789 static inline __attribute__((always_inline)) int prio_tree_empty(const struct prio_tree_root *root)
7790 {
7791 return root->prio_tree_node == ((void *)0);
7792 }
7793
7794 static inline __attribute__((always_inline)) int prio_tree_root(const struct prio_tree_node *node)
7795 {
7796 return node->parent == node;
7797 }
7798
7799 static inline __attribute__((always_inline)) int prio_tree_left_empty(const struct prio_tree_node *node)
7800 {
7801 return node->left == node;
7802 }
7803
7804 static inline __attribute__((always_inline)) int prio_tree_right_empty(const struct prio_tree_node *node)
7805 {
7806 return node->right == node;
7807 }
7808
7809
7810 struct prio_tree_node *prio_tree_replace(struct prio_tree_root *root,
7811 struct prio_tree_node *old, struct prio_tree_node *node);
7812 struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root,
7813 struct prio_tree_node *node);
7814 void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node);
7815 struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter);
7816 # 299 "include/linux/fs.h" 2
7817
7818 # 1 "include/linux/pid.h" 1
7819
7820
7821
7822
7823
7824 enum pid_type
7825 {
7826 PIDTYPE_PID,
7827 PIDTYPE_PGID,
7828 PIDTYPE_SID,
7829 PIDTYPE_MAX
7830 };
7831 # 50 "include/linux/pid.h"
7832 struct upid {
7833
7834 int nr;
7835 struct pid_namespace *ns;
7836 struct hlist_node pid_chain;
7837 };
7838
7839 struct pid
7840 {
7841 atomic_t count;
7842 unsigned int level;
7843
7844 struct hlist_head tasks[PIDTYPE_MAX];
7845 struct rcu_head rcu;
7846 struct upid numbers[1];
7847 };
7848
7849 extern struct pid init_struct_pid;
7850
7851 struct pid_link
7852 {
7853 struct hlist_node node;
7854 struct pid *pid;
7855 };
7856
7857 static inline __attribute__((always_inline)) struct pid *get_pid(struct pid *pid)
7858 {
7859 if (pid)
7860 atomic_inc(&pid->count);
7861 return pid;
7862 }
7863
7864 extern void put_pid(struct pid *pid);
7865 extern struct task_struct *pid_task(struct pid *pid, enum pid_type);
7866 extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type);
7867
7868 extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
7869
7870
7871
7872
7873
7874 extern void attach_pid(struct task_struct *task, enum pid_type type,
7875 struct pid *pid);
7876 extern void detach_pid(struct task_struct *task, enum pid_type);
7877 extern void change_pid(struct task_struct *task, enum pid_type,
7878 struct pid *pid);
7879 extern void transfer_pid(struct task_struct *old, struct task_struct *new,
7880 enum pid_type);
7881
7882 struct pid_namespace;
7883 extern struct pid_namespace init_pid_ns;
7884 # 112 "include/linux/pid.h"
7885 extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns);
7886 extern struct pid *find_vpid(int nr);
7887
7888
7889
7890
7891 extern struct pid *find_get_pid(int nr);
7892 extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
7893 int next_pidmap(struct pid_namespace *pid_ns, int last);
7894
7895 extern struct pid *alloc_pid(struct pid_namespace *ns);
7896 extern void free_pid(struct pid *pid);
7897 # 136 "include/linux/pid.h"
7898 static inline __attribute__((always_inline)) pid_t pid_nr(struct pid *pid)
7899 {
7900 pid_t nr = 0;
7901 if (pid)
7902 nr = pid->numbers[0].nr;
7903 return nr;
7904 }
7905
7906 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns);
7907 pid_t pid_vnr(struct pid *pid);
7908 # 301 "include/linux/fs.h" 2
7909
7910 # 1 "include/linux/capability.h" 1
7911 # 18 "include/linux/capability.h"
7912 struct task_struct;
7913 # 40 "include/linux/capability.h"
7914 typedef struct __user_cap_header_struct {
7915 __u32 version;
7916 int pid;
7917 } *cap_user_header_t;
7918
7919 typedef struct __user_cap_data_struct {
7920 __u32 effective;
7921 __u32 permitted;
7922 __u32 inheritable;
7923 } *cap_user_data_t;
7924 # 72 "include/linux/capability.h"
7925 struct vfs_cap_data {
7926 __le32 magic_etc;
7927 struct {
7928 __le32 permitted;
7929 __le32 inheritable;
7930 } data[2];
7931 };
7932 # 95 "include/linux/capability.h"
7933 typedef struct kernel_cap_struct {
7934 __u32 cap[2];
7935 } kernel_cap_t;
7936 # 416 "include/linux/capability.h"
7937 static inline __attribute__((always_inline)) kernel_cap_t cap_combine(const kernel_cap_t a,
7938 const kernel_cap_t b)
7939 {
7940 kernel_cap_t dest;
7941 do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] | b.cap[__capi]; } } while (0);
7942 return dest;
7943 }
7944
7945 static inline __attribute__((always_inline)) kernel_cap_t cap_intersect(const kernel_cap_t a,
7946 const kernel_cap_t b)
7947 {
7948 kernel_cap_t dest;
7949 do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] & b.cap[__capi]; } } while (0);
7950 return dest;
7951 }
7952
7953 static inline __attribute__((always_inline)) kernel_cap_t cap_drop(const kernel_cap_t a,
7954 const kernel_cap_t drop)
7955 {
7956 kernel_cap_t dest;
7957 do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] &~ drop.cap[__capi]; } } while (0);
7958 return dest;
7959 }
7960
7961 static inline __attribute__((always_inline)) kernel_cap_t cap_invert(const kernel_cap_t c)
7962 {
7963 kernel_cap_t dest;
7964 do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = ~ c.cap[__capi]; } } while (0);
7965 return dest;
7966 }
7967
7968 static inline __attribute__((always_inline)) int cap_isclear(const kernel_cap_t a)
7969 {
7970 unsigned __capi;
7971 for (__capi = 0; __capi < 2; ++__capi) {
7972 if (a.cap[__capi] != 0)
7973 return 0;
7974 }
7975 return 1;
7976 }
7977
7978 static inline __attribute__((always_inline)) int cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
7979 {
7980 kernel_cap_t dest;
7981 dest = cap_drop(a, set);
7982 return cap_isclear(dest);
7983 }
7984
7985
7986
7987 static inline __attribute__((always_inline)) int cap_is_fs_cap(int cap)
7988 {
7989 const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))), ((1 << ((32) & 31))) } });
7990 return !!((1 << ((cap) & 31)) & __cap_fs_set.cap[((cap) >> 5)]);
7991 }
7992
7993 static inline __attribute__((always_inline)) kernel_cap_t cap_drop_fs_set(const kernel_cap_t a)
7994 {
7995 const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))), ((1 << ((32) & 31))) } });
7996 return cap_drop(a, __cap_fs_set);
7997 }
7998
7999 static inline __attribute__((always_inline)) kernel_cap_t cap_raise_fs_set(const kernel_cap_t a,
8000 const kernel_cap_t permitted)
8001 {
8002 const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))), ((1 << ((32) & 31))) } });
8003 return cap_combine(a,
8004 cap_intersect(permitted, __cap_fs_set));
8005 }
8006
8007 static inline __attribute__((always_inline)) kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
8008 {
8009 const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31)))|(1 << ((24) & 31)), ((1 << ((32) & 31))) } });
8010 return cap_drop(a, __cap_fs_set);
8011 }
8012
8013 static inline __attribute__((always_inline)) kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
8014 const kernel_cap_t permitted)
8015 {
8016 const kernel_cap_t __cap_nfsd_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31)))|(1 << ((24) & 31)), ((1 << ((32) & 31))) } });
8017 return cap_combine(a,
8018 cap_intersect(permitted, __cap_nfsd_set));
8019 }
8020
8021 extern const kernel_cap_t __cap_empty_set;
8022 extern const kernel_cap_t __cap_full_set;
8023 extern const kernel_cap_t __cap_init_eff_set;
8024
8025 kernel_cap_t cap_set_effective(const kernel_cap_t pE_new);
8026 # 518 "include/linux/capability.h"
8027 extern int capable(int cap);
8028 # 303 "include/linux/fs.h" 2
8029 # 1 "include/linux/semaphore.h" 1
8030 # 16 "include/linux/semaphore.h"
8031 struct semaphore {
8032 spinlock_t lock;
8033 unsigned int count;
8034 struct list_head wait_list;
8035 };
8036 # 32 "include/linux/semaphore.h"
8037 static inline __attribute__((always_inline)) void sema_init(struct semaphore *sem, int val)
8038 {
8039 static struct lock_class_key __key;
8040 *sem = (struct semaphore) { .lock = (spinlock_t) { .raw_lock = { 1 }, .magic = 0xdead4ead, .owner = ((void *)-1L), .owner_cpu = -1, }, .count = val, .wait_list = { &((*sem).wait_list), &((*sem).wait_list) }, };
8041 do { (void)("semaphore->lock"); (void)(&__key); } while (0);
8042 }
8043
8044
8045
8046
8047 extern void down(struct semaphore *sem);
8048 extern int __attribute__((warn_unused_result)) down_interruptible(struct semaphore *sem);
8049 extern int __attribute__((warn_unused_result)) down_killable(struct semaphore *sem);
8050 extern int __attribute__((warn_unused_result)) down_trylock(struct semaphore *sem);
8051 extern int __attribute__((warn_unused_result)) down_timeout(struct semaphore *sem, long jiffies);
8052 extern void up(struct semaphore *sem);
8053 # 304 "include/linux/fs.h" 2
8054 # 1 "include/linux/fiemap.h" 1
8055 # 14 "include/linux/fiemap.h"
8056 struct fiemap_extent {
8057 __u64 fe_logical;
8058
8059 __u64 fe_physical;
8060
8061 __u64 fe_length;
8062 __u64 fe_reserved64[2];
8063 __u32 fe_flags;
8064 __u32 fe_reserved[3];
8065 };
8066
8067 struct fiemap {
8068 __u64 fm_start;
8069
8070 __u64 fm_length;
8071
8072 __u32 fm_flags;
8073 __u32 fm_mapped_extents;
8074 __u32 fm_extent_count;
8075 __u32 fm_reserved;
8076 struct fiemap_extent fm_extents[0];
8077 };
8078 # 305 "include/linux/fs.h" 2
8079
8080
8081
8082
8083 struct export_operations;
8084 struct hd_geometry;
8085 struct iovec;
8086 struct nameidata;
8087 struct kiocb;
8088 struct pipe_inode_info;
8089 struct poll_table_struct;
8090 struct kstatfs;
8091 struct vm_area_struct;
8092 struct vfsmount;
8093
8094 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) inode_init(void);
8095 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) inode_init_early(void);
8096 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) files_init(unsigned long);
8097
8098 struct buffer_head;
8099 typedef int (get_block_t)(struct inode *inode, sector_t iblock,
8100 struct buffer_head *bh_result, int create);
8101 typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
8102 ssize_t bytes, void *private);
8103 # 361 "include/linux/fs.h"
8104 struct iattr {
8105 unsigned int ia_valid;
8106 umode_t ia_mode;
8107 uid_t ia_uid;
8108 gid_t ia_gid;
8109 loff_t ia_size;
8110 struct timespec ia_atime;
8111 struct timespec ia_mtime;
8112 struct timespec ia_ctime;
8113
8114
8115
8116
8117
8118
8119 struct file *ia_file;
8120 };
8121
8122
8123
8124
8125 # 1 "include/linux/quota.h" 1
8126 # 98 "include/linux/quota.h"
8127 struct if_dqblk {
8128 __u64 dqb_bhardlimit;
8129 __u64 dqb_bsoftlimit;
8130 __u64 dqb_curspace;
8131 __u64 dqb_ihardlimit;
8132 __u64 dqb_isoftlimit;
8133 __u64 dqb_curinodes;
8134 __u64 dqb_btime;
8135 __u64 dqb_itime;
8136 __u32 dqb_valid;
8137 };
8138 # 119 "include/linux/quota.h"
8139 struct if_dqinfo {
8140 __u64 dqi_bgrace;
8141 __u64 dqi_igrace;
8142 __u32 dqi_flags;
8143 __u32 dqi_valid;
8144 };
8145 # 141 "include/linux/quota.h"
8146 enum {
8147 QUOTA_NL_C_UNSPEC,
8148 QUOTA_NL_C_WARNING,
8149 __QUOTA_NL_C_MAX,
8150 };
8151
8152
8153 enum {
8154 QUOTA_NL_A_UNSPEC,
8155 QUOTA_NL_A_QTYPE,
8156 QUOTA_NL_A_EXCESS_ID,
8157 QUOTA_NL_A_WARNING,
8158 QUOTA_NL_A_DEV_MAJOR,
8159 QUOTA_NL_A_DEV_MINOR,
8160 QUOTA_NL_A_CAUSED_ID,
8161 __QUOTA_NL_A_MAX,
8162 };
8163 # 168 "include/linux/quota.h"
8164 # 1 "include/linux/dqblk_xfs.h" 1
8165 # 50 "include/linux/dqblk_xfs.h"
8166 typedef struct fs_disk_quota {
8167 __s8 d_version;
8168 __s8 d_flags;
8169 __u16 d_fieldmask;
8170 __u32 d_id;
8171 __u64 d_blk_hardlimit;
8172 __u64 d_blk_softlimit;
8173 __u64 d_ino_hardlimit;
8174 __u64 d_ino_softlimit;
8175 __u64 d_bcount;
8176 __u64 d_icount;
8177 __s32 d_itimer;
8178
8179 __s32 d_btimer;
8180 __u16 d_iwarns;
8181 __u16 d_bwarns;
8182 __s32 d_padding2;
8183 __u64 d_rtb_hardlimit;
8184 __u64 d_rtb_softlimit;
8185 __u64 d_rtbcount;
8186 __s32 d_rtbtimer;
8187 __u16 d_rtbwarns;
8188 __s16 d_padding3;
8189 char d_padding4[8];
8190 } fs_disk_quota_t;
8191 # 137 "include/linux/dqblk_xfs.h"
8192 typedef struct fs_qfilestat {
8193 __u64 qfs_ino;
8194 __u64 qfs_nblks;
8195 __u32 qfs_nextents;
8196 } fs_qfilestat_t;
8197
8198 typedef struct fs_quota_stat {
8199 __s8 qs_version;
8200 __u16 qs_flags;
8201 __s8 qs_pad;
8202 fs_qfilestat_t qs_uquota;
8203 fs_qfilestat_t qs_gquota;
8204 __u32 qs_incoredqs;
8205 __s32 qs_btimelimit;
8206 __s32 qs_itimelimit;
8207 __s32 qs_rtbtimelimit;
8208 __u16 qs_bwarnlimit;
8209 __u16 qs_iwarnlimit;
8210 } fs_quota_stat_t;
8211 # 169 "include/linux/quota.h" 2
8212 # 1 "include/linux/dqblk_v1.h" 1
8213 # 21 "include/linux/dqblk_v1.h"
8214 struct v1_mem_dqinfo {
8215 };
8216 # 170 "include/linux/quota.h" 2
8217 # 1 "include/linux/dqblk_v2.h" 1
8218 # 20 "include/linux/dqblk_v2.h"
8219 struct v2_mem_dqinfo {
8220 unsigned int dqi_blocks;
8221 unsigned int dqi_free_blk;
8222 unsigned int dqi_free_entry;
8223 };
8224 # 171 "include/linux/quota.h" 2
8225
8226
8227
8228 typedef __kernel_uid32_t qid_t;
8229 typedef __u64 qsize_t;
8230
8231 extern spinlock_t dq_data_lock;
8232 # 189 "include/linux/quota.h"
8233 struct mem_dqblk {
8234 __u32 dqb_bhardlimit;
8235 __u32 dqb_bsoftlimit;
8236 qsize_t dqb_curspace;
8237 __u32 dqb_ihardlimit;
8238 __u32 dqb_isoftlimit;
8239 __u32 dqb_curinodes;
8240 time_t dqb_btime;
8241 time_t dqb_itime;
8242 };
8243
8244
8245
8246
8247 struct quota_format_type;
8248
8249 struct mem_dqinfo {
8250 struct quota_format_type *dqi_format;
8251 int dqi_fmt_id;
8252
8253 struct list_head dqi_dirty_list;
8254 unsigned long dqi_flags;
8255 unsigned int dqi_bgrace;
8256 unsigned int dqi_igrace;
8257 qsize_t dqi_maxblimit;
8258 qsize_t dqi_maxilimit;
8259 union {
8260 struct v1_mem_dqinfo v1_i;
8261 struct v2_mem_dqinfo v2_i;
8262 } u;
8263 };
8264
8265 struct super_block;
8266
8267
8268
8269
8270
8271 extern void mark_info_dirty(struct super_block *sb, int type);
8272 static inline __attribute__((always_inline)) int info_dirty(struct mem_dqinfo *info)
8273 {
8274 return test_bit(16, &info->dqi_flags);
8275 }
8276
8277 struct dqstats {
8278 int lookups;
8279 int drops;
8280 int reads;
8281 int writes;
8282 int cache_hits;
8283 int allocated_dquots;
8284 int free_dquots;
8285 int syncs;
8286 };
8287
8288 extern struct dqstats dqstats;
8289 # 253 "include/linux/quota.h"
8290 struct dquot {
8291 struct hlist_node dq_hash;
8292 struct list_head dq_inuse;
8293 struct list_head dq_free;
8294 struct list_head dq_dirty;
8295 struct mutex dq_lock;
8296 atomic_t dq_count;
8297 wait_queue_head_t dq_wait_unused;
8298 struct super_block *dq_sb;
8299 unsigned int dq_id;
8300 loff_t dq_off;
8301 unsigned long dq_flags;
8302 short dq_type;
8303 struct mem_dqblk dq_dqb;
8304 };
8305
8306
8307
8308
8309
8310
8311
8312 struct quota_format_ops {
8313 int (*check_quota_file)(struct super_block *sb, int type);
8314 int (*read_file_info)(struct super_block *sb, int type);
8315 int (*write_file_info)(struct super_block *sb, int type);
8316 int (*free_file_info)(struct super_block *sb, int type);
8317 int (*read_dqblk)(struct dquot *dquot);
8318 int (*commit_dqblk)(struct dquot *dquot);
8319 int (*release_dqblk)(struct dquot *dquot);
8320 };
8321
8322
8323 struct dquot_operations {
8324 int (*initialize) (struct inode *, int);
8325 int (*drop) (struct inode *);
8326 int (*alloc_space) (struct inode *, qsize_t, int);
8327 int (*alloc_inode) (const struct inode *, unsigned long);
8328 int (*free_space) (struct inode *, qsize_t);
8329 int (*free_inode) (const struct inode *, unsigned long);
8330 int (*transfer) (struct inode *, struct iattr *);
8331 int (*write_dquot) (struct dquot *);
8332 int (*acquire_dquot) (struct dquot *);
8333 int (*release_dquot) (struct dquot *);
8334 int (*mark_dirty) (struct dquot *);
8335 int (*write_info) (struct super_block *, int);
8336 };
8337
8338
8339 struct quotactl_ops {
8340 int (*quota_on)(struct super_block *, int, int, char *, int);
8341 int (*quota_off)(struct super_block *, int, int);
8342 int (*quota_sync)(struct super_block *, int);
8343 int (*get_info)(struct super_block *, int, struct if_dqinfo *);
8344 int (*set_info)(struct super_block *, int, struct if_dqinfo *);
8345 int (*get_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *);
8346 int (*set_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *);
8347 int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
8348 int (*set_xstate)(struct super_block *, unsigned int, int);
8349 int (*get_xquota)(struct super_block *, int, qid_t, struct fs_disk_quota *);
8350 int (*set_xquota)(struct super_block *, int, qid_t, struct fs_disk_quota *);
8351 };
8352
8353 struct quota_format_type {
8354 int qf_fmt_id;
8355 struct quota_format_ops *qf_ops;
8356 struct module *qf_owner;
8357 struct quota_format_type *qf_next;
8358 };
8359 # 330 "include/linux/quota.h"
8360 struct quota_info {
8361 unsigned int flags;
8362 struct mutex dqio_mutex;
8363 struct mutex dqonoff_mutex;
8364 struct rw_semaphore dqptr_sem;
8365 struct inode *files[2];
8366 struct mem_dqinfo info[2];
8367 struct quota_format_ops *ops[2];
8368 };
8369
8370 int register_quota_format(struct quota_format_type *fmt);
8371 void unregister_quota_format(struct quota_format_type *fmt);
8372
8373 struct quota_module_name {
8374 int qm_fmt_id;
8375 char *qm_mod_name;
8376 };
8377 # 383 "include/linux/fs.h" 2
8378 # 410 "include/linux/fs.h"
8379 enum positive_aop_returns {
8380 AOP_WRITEPAGE_ACTIVATE = 0x80000,
8381 AOP_TRUNCATED_PAGE = 0x80001,
8382 };
8383
8384
8385
8386
8387
8388
8389
8390 struct page;
8391 struct address_space;
8392 struct writeback_control;
8393
8394 struct iov_iter {
8395 const struct iovec *iov;
8396 unsigned long nr_segs;
8397 size_t iov_offset;
8398 size_t count;
8399 };
8400
8401 size_t iov_iter_copy_from_user_atomic(struct page *page,
8402 struct iov_iter *i, unsigned long offset, size_t bytes);
8403 size_t iov_iter_copy_from_user(struct page *page,
8404 struct iov_iter *i, unsigned long offset, size_t bytes);
8405 void iov_iter_advance(struct iov_iter *i, size_t bytes);
8406 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
8407 size_t iov_iter_single_seg_count(struct iov_iter *i);
8408
8409 static inline __attribute__((always_inline)) void iov_iter_init(struct iov_iter *i,
8410 const struct iovec *iov, unsigned long nr_segs,
8411 size_t count, size_t written)
8412 {
8413 i->iov = iov;
8414 i->nr_segs = nr_segs;
8415 i->iov_offset = 0;
8416 i->count = count + written;
8417
8418 iov_iter_advance(i, written);
8419 }
8420
8421 static inline __attribute__((always_inline)) size_t iov_iter_count(struct iov_iter *i)
8422 {
8423 return i->count;
8424 }
8425 # 466 "include/linux/fs.h"
8426 typedef struct {
8427 size_t written;
8428 size_t count;
8429 union {
8430 char *buf;
8431 void *data;
8432 } arg;
8433 int error;
8434 } read_descriptor_t;
8435
8436 typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
8437 unsigned long, unsigned long);
8438
8439 struct address_space_operations {
8440 int (*writepage)(struct page *page, struct writeback_control *wbc);
8441 int (*readpage)(struct file *, struct page *);
8442 void (*sync_page)(struct page *);
8443
8444
8445 int (*writepages)(struct address_space *, struct writeback_control *);
8446
8447
8448 int (*set_page_dirty)(struct page *page);
8449
8450 int (*readpages)(struct file *filp, struct address_space *mapping,
8451 struct list_head *pages, unsigned nr_pages);
8452
8453 int (*write_begin)(struct file *, struct address_space *mapping,
8454 loff_t pos, unsigned len, unsigned flags,
8455 struct page **pagep, void **fsdata);
8456 int (*write_end)(struct file *, struct address_space *mapping,
8457 loff_t pos, unsigned len, unsigned copied,
8458 struct page *page, void *fsdata);
8459
8460
8461 sector_t (*bmap)(struct address_space *, sector_t);
8462 void (*invalidatepage) (struct page *, unsigned long);
8463 int (*releasepage) (struct page *, gfp_t);
8464 ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
8465 loff_t offset, unsigned long nr_segs);
8466 int (*get_xip_mem)(struct address_space *, unsigned long, int,
8467 void **, unsigned long *);
8468
8469 int (*migratepage) (struct address_space *,
8470 struct page *, struct page *);
8471 int (*launder_page) (struct page *);
8472 int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
8473 unsigned long);
8474 };
8475
8476
8477
8478
8479
8480 int pagecache_write_begin(struct file *, struct address_space *mapping,
8481 loff_t pos, unsigned len, unsigned flags,
8482 struct page **pagep, void **fsdata);
8483
8484 int pagecache_write_end(struct file *, struct address_space *mapping,
8485 loff_t pos, unsigned len, unsigned copied,
8486 struct page *page, void *fsdata);
8487
8488 struct backing_dev_info;
8489 struct address_space {
8490 struct inode *host;
8491 struct radix_tree_root page_tree;
8492 spinlock_t tree_lock;
8493 unsigned int i_mmap_writable;
8494 struct prio_tree_root i_mmap;
8495 struct list_head i_mmap_nonlinear;
8496 spinlock_t i_mmap_lock;
8497 unsigned int truncate_count;
8498 unsigned long nrpages;
8499 unsigned long writeback_index;
8500 const struct address_space_operations *a_ops;
8501 unsigned long flags;
8502 struct backing_dev_info *backing_dev_info;
8503 spinlock_t private_lock;
8504 struct list_head private_list;
8505 struct address_space *assoc_mapping;
8506 } __attribute__((aligned(sizeof(long))));
8507
8508
8509
8510
8511
8512
8513 struct block_device {
8514 dev_t bd_dev;
8515 struct inode * bd_inode;
8516 int bd_openers;
8517 struct mutex bd_mutex;
8518 struct semaphore bd_mount_sem;
8519 struct list_head bd_inodes;
8520 void * bd_holder;
8521 int bd_holders;
8522
8523
8524
8525 struct block_device * bd_contains;
8526 unsigned bd_block_size;
8527 struct hd_struct * bd_part;
8528
8529 unsigned bd_part_count;
8530 int bd_invalidated;
8531 struct gendisk * bd_disk;
8532 struct list_head bd_list;
8533 struct backing_dev_info *bd_inode_backing_dev_info;
8534
8535
8536
8537
8538
8539
8540 unsigned long bd_private;
8541 };
8542 # 590 "include/linux/fs.h"
8543 int mapping_tagged(struct address_space *mapping, int tag);
8544
8545
8546
8547
8548 static inline __attribute__((always_inline)) int mapping_mapped(struct address_space *mapping)
8549 {
8550 return !prio_tree_empty(&mapping->i_mmap) ||
8551 !list_empty(&mapping->i_mmap_nonlinear);
8552 }
8553
8554
8555
8556
8557
8558
8559
8560 static inline __attribute__((always_inline)) int mapping_writably_mapped(struct address_space *mapping)
8561 {
8562 return mapping->i_mmap_writable != 0;
8563 }
8564 # 623 "include/linux/fs.h"
8565 struct inode {
8566 struct hlist_node i_hash;
8567 struct list_head i_list;
8568 struct list_head i_sb_list;
8569 struct list_head i_dentry;
8570 unsigned long i_ino;
8571 atomic_t i_count;
8572 unsigned int i_nlink;
8573 uid_t i_uid;
8574 gid_t i_gid;
8575 dev_t i_rdev;
8576 u64 i_version;
8577 loff_t i_size;
8578
8579
8580
8581 struct timespec i_atime;
8582 struct timespec i_mtime;
8583 struct timespec i_ctime;
8584 unsigned int i_blkbits;
8585 blkcnt_t i_blocks;
8586 unsigned short i_bytes;
8587 umode_t i_mode;
8588 spinlock_t i_lock;
8589 struct mutex i_mutex;
8590 struct rw_semaphore i_alloc_sem;
8591 const struct inode_operations *i_op;
8592 const struct file_operations *i_fop;
8593 struct super_block *i_sb;
8594 struct file_lock *i_flock;
8595 struct address_space *i_mapping;
8596 struct address_space i_data;
8597
8598 struct dquot *i_dquot[2];
8599
8600 struct list_head i_devices;
8601 union {
8602 struct pipe_inode_info *i_pipe;
8603 struct block_device *i_bdev;
8604 struct cdev *i_cdev;
8605 };
8606 int i_cindex;
8607
8608 __u32 i_generation;
8609
8610
8611 unsigned long i_dnotify_mask;
8612 struct dnotify_struct *i_dnotify;
8613
8614
8615
8616 struct list_head inotify_watches;
8617 struct mutex inotify_mutex;
8618
8619
8620 unsigned long i_state;
8621 unsigned long dirtied_when;
8622
8623 unsigned int i_flags;
8624
8625 atomic_t i_writecount;
8626
8627
8628
8629 void *i_private;
8630 };
8631 # 701 "include/linux/fs.h"
8632 enum inode_i_mutex_lock_class
8633 {
8634 I_MUTEX_NORMAL,
8635 I_MUTEX_PARENT,
8636 I_MUTEX_CHILD,
8637 I_MUTEX_XATTR,
8638 I_MUTEX_QUOTA
8639 };
8640
8641 extern void inode_double_lock(struct inode *inode1, struct inode *inode2);
8642 extern void inode_double_unlock(struct inode *inode1, struct inode *inode2);
8643 # 723 "include/linux/fs.h"
8644 static inline __attribute__((always_inline)) loff_t i_size_read(const struct inode *inode)
8645 {
8646 # 742 "include/linux/fs.h"
8647 return inode->i_size;
8648
8649 }
8650
8651
8652
8653
8654
8655
8656 static inline __attribute__((always_inline)) void i_size_write(struct inode *inode, loff_t i_size)
8657 {
8658 # 762 "include/linux/fs.h"
8659 inode->i_size = i_size;
8660
8661 }
8662
8663 static inline __attribute__((always_inline)) unsigned iminor(const struct inode *inode)
8664 {
8665 return ((unsigned int) ((inode->i_rdev) & ((1U << 20) - 1)));
8666 }
8667
8668 static inline __attribute__((always_inline)) unsigned imajor(const struct inode *inode)
8669 {
8670 return ((unsigned int) ((inode->i_rdev) >> 20));
8671 }
8672
8673 extern struct block_device *I_BDEV(struct inode *inode);
8674
8675 struct fown_struct {
8676 rwlock_t lock;
8677 struct pid *pid;
8678 enum pid_type pid_type;
8679 uid_t uid, euid;
8680 int signum;
8681 };
8682
8683
8684
8685
8686 struct file_ra_state {
8687 unsigned long start;
8688 unsigned int size;
8689 unsigned int async_size;
8690
8691
8692 unsigned int ra_pages;
8693 int mmap_miss;
8694 loff_t prev_pos;
8695 };
8696
8697
8698
8699
8700 static inline __attribute__((always_inline)) int ra_has_index(struct file_ra_state *ra, unsigned long index)
8701 {
8702 return (index >= ra->start &&
8703 index < ra->start + ra->size);
8704 }
8705
8706
8707
8708
8709 struct file {
8710
8711
8712
8713
8714 union {
8715 struct list_head fu_list;
8716 struct rcu_head fu_rcuhead;
8717 } f_u;
8718 struct path f_path;
8719
8720
8721 const struct file_operations *f_op;
8722 atomic_long_t f_count;
8723 unsigned int f_flags;
8724 fmode_t f_mode;
8725 loff_t f_pos;
8726 struct fown_struct f_owner;
8727 unsigned int f_uid, f_gid;
8728 struct file_ra_state f_ra;
8729
8730 u64 f_version;
8731
8732
8733
8734
8735 void *private_data;
8736
8737
8738
8739 struct list_head f_ep_links;
8740 spinlock_t f_ep_lock;
8741
8742 struct address_space *f_mapping;
8743
8744 unsigned long f_mnt_write_state;
8745
8746 };
8747 extern spinlock_t files_lock;
8748
8749
8750
8751
8752
8753
8754
8755 static inline __attribute__((always_inline)) void file_take_write(struct file *f)
8756 {
8757 ({ int __ret_warn_on = !!(f->f_mnt_write_state != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("include/linux/fs.h", 860); __builtin_expect(!!(__ret_warn_on), 0); });
8758 f->f_mnt_write_state = 1;
8759 }
8760 static inline __attribute__((always_inline)) void file_release_write(struct file *f)
8761 {
8762 f->f_mnt_write_state |= 2;
8763 }
8764 static inline __attribute__((always_inline)) void file_reset_write(struct file *f)
8765 {
8766 f->f_mnt_write_state = 0;
8767 }
8768 static inline __attribute__((always_inline)) void file_check_state(struct file *f)
8769 {
8770
8771
8772
8773
8774 ({ int __ret_warn_on = !!(f->f_mnt_write_state == 1); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("include/linux/fs.h", 877); __builtin_expect(!!(__ret_warn_on), 0); });
8775 ({ int __ret_warn_on = !!(f->f_mnt_write_state == 2); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("include/linux/fs.h", 878); __builtin_expect(!!(__ret_warn_on), 0); });
8776 }
8777 static inline __attribute__((always_inline)) int file_check_writeable(struct file *f)
8778 {
8779 if (f->f_mnt_write_state == 1)
8780 return 0;
8781 printk("<4>" "writeable file with no "
8782 "mnt_want_write()\n");
8783 ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("include/linux/fs.h", 886); __builtin_expect(!!(__ret_warn_on), 0); });
8784 return -22;
8785 }
8786 # 931 "include/linux/fs.h"
8787 typedef struct files_struct *fl_owner_t;
8788
8789 struct file_lock_operations {
8790 void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
8791 void (*fl_release_private)(struct file_lock *);
8792 };
8793
8794 struct lock_manager_operations {
8795 int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
8796 void (*fl_notify)(struct file_lock *);
8797 int (*fl_grant)(struct file_lock *, struct file_lock *, int);
8798 void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
8799 void (*fl_release_private)(struct file_lock *);
8800 void (*fl_break)(struct file_lock *);
8801 int (*fl_mylease)(struct file_lock *, struct file_lock *);
8802 int (*fl_change)(struct file_lock **, int);
8803 };
8804
8805 struct lock_manager {
8806 struct list_head list;
8807 };
8808
8809 void locks_start_grace(struct lock_manager *);
8810 void locks_end_grace(struct lock_manager *);
8811 int locks_in_grace(void);
8812
8813
8814 # 1 "include/linux/nfs_fs_i.h" 1
8815
8816
8817
8818
8819
8820 # 1 "include/linux/nfs.h" 1
8821 # 39 "include/linux/nfs.h"
8822 enum nfs_stat {
8823 NFS_OK = 0,
8824 NFSERR_PERM = 1,
8825 NFSERR_NOENT = 2,
8826 NFSERR_IO = 5,
8827 NFSERR_NXIO = 6,
8828 NFSERR_EAGAIN = 11,
8829 NFSERR_ACCES = 13,
8830 NFSERR_EXIST = 17,
8831 NFSERR_XDEV = 18,
8832 NFSERR_NODEV = 19,
8833 NFSERR_NOTDIR = 20,
8834 NFSERR_ISDIR = 21,
8835 NFSERR_INVAL = 22,
8836 NFSERR_FBIG = 27,
8837 NFSERR_NOSPC = 28,
8838 NFSERR_ROFS = 30,
8839 NFSERR_MLINK = 31,
8840 NFSERR_OPNOTSUPP = 45,
8841 NFSERR_NAMETOOLONG = 63,
8842 NFSERR_NOTEMPTY = 66,
8843 NFSERR_DQUOT = 69,
8844 NFSERR_STALE = 70,
8845 NFSERR_REMOTE = 71,
8846 NFSERR_WFLUSH = 99,
8847 NFSERR_BADHANDLE = 10001,
8848 NFSERR_NOT_SYNC = 10002,
8849 NFSERR_BAD_COOKIE = 10003,
8850 NFSERR_NOTSUPP = 10004,
8851 NFSERR_TOOSMALL = 10005,
8852 NFSERR_SERVERFAULT = 10006,
8853 NFSERR_BADTYPE = 10007,
8854 NFSERR_JUKEBOX = 10008,
8855 NFSERR_SAME = 10009,
8856 NFSERR_DENIED = 10010,
8857 NFSERR_EXPIRED = 10011,
8858 NFSERR_LOCKED = 10012,
8859 NFSERR_GRACE = 10013,
8860 NFSERR_FHEXPIRED = 10014,
8861 NFSERR_SHARE_DENIED = 10015,
8862 NFSERR_WRONGSEC = 10016,
8863 NFSERR_CLID_INUSE = 10017,
8864 NFSERR_RESOURCE = 10018,
8865 NFSERR_MOVED = 10019,
8866 NFSERR_NOFILEHANDLE = 10020,
8867 NFSERR_MINOR_VERS_MISMATCH = 10021,
8868 NFSERR_STALE_CLIENTID = 10022,
8869 NFSERR_STALE_STATEID = 10023,
8870 NFSERR_OLD_STATEID = 10024,
8871 NFSERR_BAD_STATEID = 10025,
8872 NFSERR_BAD_SEQID = 10026,
8873 NFSERR_NOT_SAME = 10027,
8874 NFSERR_LOCK_RANGE = 10028,
8875 NFSERR_SYMLINK = 10029,
8876 NFSERR_RESTOREFH = 10030,
8877 NFSERR_LEASE_MOVED = 10031,
8878 NFSERR_ATTRNOTSUPP = 10032,
8879 NFSERR_NO_GRACE = 10033,
8880 NFSERR_RECLAIM_BAD = 10034,
8881 NFSERR_RECLAIM_CONFLICT = 10035,
8882 NFSERR_BAD_XDR = 10036,
8883 NFSERR_LOCKS_HELD = 10037,
8884 NFSERR_OPENMODE = 10038,
8885 NFSERR_BADOWNER = 10039,
8886 NFSERR_BADCHAR = 10040,
8887 NFSERR_BADNAME = 10041,
8888 NFSERR_BAD_RANGE = 10042,
8889 NFSERR_LOCK_NOTSUPP = 10043,
8890 NFSERR_OP_ILLEGAL = 10044,
8891 NFSERR_DEADLOCK = 10045,
8892 NFSERR_FILE_OPEN = 10046,
8893 NFSERR_ADMIN_REVOKED = 10047,
8894 NFSERR_CB_PATH_DOWN = 10048,
8895 NFSERR_REPLAY_ME = 10049
8896 };
8897
8898
8899
8900 enum nfs_ftype {
8901 NFNON = 0,
8902 NFREG = 1,
8903 NFDIR = 2,
8904 NFBLK = 3,
8905 NFCHR = 4,
8906 NFLNK = 5,
8907 NFSOCK = 6,
8908 NFBAD = 7,
8909 NFFIFO = 8
8910 };
8911
8912
8913 # 1 "include/linux/sunrpc/msg_prot.h" 1
8914 # 18 "include/linux/sunrpc/msg_prot.h"
8915 typedef u32 rpc_authflavor_t;
8916
8917 enum rpc_auth_flavors {
8918 RPC_AUTH_NULL = 0,
8919 RPC_AUTH_UNIX = 1,
8920 RPC_AUTH_SHORT = 2,
8921 RPC_AUTH_DES = 3,
8922 RPC_AUTH_KRB = 4,
8923 RPC_AUTH_GSS = 6,
8924 RPC_AUTH_MAXFLAVOR = 8,
8925
8926 RPC_AUTH_GSS_KRB5 = 390003,
8927 RPC_AUTH_GSS_KRB5I = 390004,
8928 RPC_AUTH_GSS_KRB5P = 390005,
8929 RPC_AUTH_GSS_LKEY = 390006,
8930 RPC_AUTH_GSS_LKEYI = 390007,
8931 RPC_AUTH_GSS_LKEYP = 390008,
8932 RPC_AUTH_GSS_SPKM = 390009,
8933 RPC_AUTH_GSS_SPKMI = 390010,
8934 RPC_AUTH_GSS_SPKMP = 390011,
8935 };
8936
8937
8938
8939
8940 enum rpc_msg_type {
8941 RPC_CALL = 0,
8942 RPC_REPLY = 1
8943 };
8944
8945 enum rpc_reply_stat {
8946 RPC_MSG_ACCEPTED = 0,
8947 RPC_MSG_DENIED = 1
8948 };
8949
8950 enum rpc_accept_stat {
8951 RPC_SUCCESS = 0,
8952 RPC_PROG_UNAVAIL = 1,
8953 RPC_PROG_MISMATCH = 2,
8954 RPC_PROC_UNAVAIL = 3,
8955 RPC_GARBAGE_ARGS = 4,
8956 RPC_SYSTEM_ERR = 5,
8957
8958 RPC_DROP_REPLY = 60000,
8959 };
8960
8961 enum rpc_reject_stat {
8962 RPC_MISMATCH = 0,
8963 RPC_AUTH_ERROR = 1
8964 };
8965
8966 enum rpc_auth_stat {
8967 RPC_AUTH_OK = 0,
8968 RPC_AUTH_BADCRED = 1,
8969 RPC_AUTH_REJECTEDCRED = 2,
8970 RPC_AUTH_BADVERF = 3,
8971 RPC_AUTH_REJECTEDVERF = 4,
8972 RPC_AUTH_TOOWEAK = 5,
8973
8974 RPCSEC_GSS_CREDPROBLEM = 13,
8975 RPCSEC_GSS_CTXPROBLEM = 14
8976 };
8977 # 102 "include/linux/sunrpc/msg_prot.h"
8978 typedef __be32 rpc_fraghdr;
8979 # 131 "include/linux/nfs.h" 2
8980
8981
8982
8983
8984
8985
8986 struct nfs_fh {
8987 unsigned short size;
8988 unsigned char data[128];
8989 };
8990
8991
8992
8993
8994
8995 static inline __attribute__((always_inline)) int nfs_compare_fh(const struct nfs_fh *a, const struct nfs_fh *b)
8996 {
8997 return a->size != b->size || memcmp(a->data, b->data, a->size) != 0;
8998 }
8999
9000 static inline __attribute__((always_inline)) void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *source)
9001 {
9002 target->size = source->size;
9003 memcpy(target->data, source->data, source->size);
9004 }
9005 # 165 "include/linux/nfs.h"
9006 enum nfs3_stable_how {
9007 NFS_UNSTABLE = 0,
9008 NFS_DATA_SYNC = 1,
9009 NFS_FILE_SYNC = 2
9010 };
9011 # 7 "include/linux/nfs_fs_i.h" 2
9012
9013 struct nlm_lockowner;
9014
9015
9016
9017
9018 struct nfs_lock_info {
9019 u32 state;
9020 struct nlm_lockowner *owner;
9021 struct list_head list;
9022 };
9023
9024 struct nfs4_lock_state;
9025 struct nfs4_lock_info {
9026 struct nfs4_lock_state *owner;
9027 };
9028 # 959 "include/linux/fs.h" 2
9029
9030 struct file_lock {
9031 struct file_lock *fl_next;
9032 struct list_head fl_link;
9033 struct list_head fl_block;
9034 fl_owner_t fl_owner;
9035 unsigned char fl_flags;
9036 unsigned char fl_type;
9037 unsigned int fl_pid;
9038 struct pid *fl_nspid;
9039 wait_queue_head_t fl_wait;
9040 struct file *fl_file;
9041 loff_t fl_start;
9042 loff_t fl_end;
9043
9044 struct fasync_struct * fl_fasync;
9045 unsigned long fl_break_time;
9046
9047 struct file_lock_operations *fl_ops;
9048 struct lock_manager_operations *fl_lmops;
9049 union {
9050 struct nfs_lock_info nfs_fl;
9051 struct nfs4_lock_info nfs4_fl;
9052 struct {
9053 struct list_head link;
9054 int state;
9055 } afs;
9056 } fl_u;
9057 };
9058 # 996 "include/linux/fs.h"
9059 # 1 "include/linux/fcntl.h" 1
9060
9061
9062
9063 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/fcntl.h" 1
9064 # 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/fcntl.h"
9065 # 1 "include/asm-generic/fcntl.h" 1
9066 # 117 "include/asm-generic/fcntl.h"
9067 struct flock {
9068 short l_type;
9069 short l_whence;
9070 off_t l_start;
9071 off_t l_len;
9072 pid_t l_pid;
9073
9074 };
9075 # 140 "include/asm-generic/fcntl.h"
9076 struct flock64 {
9077 short l_type;
9078 short l_whence;
9079 loff_t l_start;
9080 loff_t l_len;
9081 pid_t l_pid;
9082
9083 };
9084 # 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/fcntl.h" 2
9085 # 5 "include/linux/fcntl.h" 2
9086 # 997 "include/linux/fs.h" 2
9087
9088 extern void send_sigio(struct fown_struct *fown, int fd, int band);
9089
9090
9091 extern int do_sync_mapping_range(struct address_space *mapping, loff_t offset,
9092 loff_t endbyte, unsigned int flags);
9093
9094
9095 extern int fcntl_getlk(struct file *, struct flock *);
9096 extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
9097 struct flock *);
9098
9099
9100 extern int fcntl_getlk64(struct file *, struct flock64 *);
9101 extern int fcntl_setlk64(unsigned int, struct file *, unsigned int,
9102 struct flock64 *);
9103
9104
9105 extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
9106 extern int fcntl_getlease(struct file *filp);
9107
9108
9109 extern void locks_init_lock(struct file_lock *);
9110 extern void locks_copy_lock(struct file_lock *, struct file_lock *);
9111 extern void __locks_copy_lock(struct file_lock *, const struct file_lock *);
9112 extern void locks_remove_posix(struct file *, fl_owner_t);
9113 extern void locks_remove_flock(struct file *);
9114 extern void posix_test_lock(struct file *, struct file_lock *);
9115 extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
9116 extern int posix_lock_file_wait(struct file *, struct file_lock *);
9117 extern int posix_unblock_lock(struct file *, struct file_lock *);
9118 extern int vfs_test_lock(struct file *, struct file_lock *);
9119 extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
9120 extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
9121 extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
9122 extern int __break_lease(struct inode *inode, unsigned int flags);
9123 extern void lease_get_mtime(struct inode *, struct timespec *time);
9124 extern int generic_setlease(struct file *, long, struct file_lock **);
9125 extern int vfs_setlease(struct file *, long, struct file_lock **);
9126 extern int lease_modify(struct file_lock **, int);
9127 extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
9128 extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
9129 # 1071 "include/linux/fs.h"
9130 struct fasync_struct {
9131 int magic;
9132 int fa_fd;
9133 struct fasync_struct *fa_next;
9134 struct file *fa_file;
9135 };
9136
9137
9138
9139
9140 extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
9141
9142 extern void kill_fasync(struct fasync_struct **, int, int);
9143
9144 extern void __kill_fasync(struct fasync_struct *, int, int);
9145
9146 extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
9147 extern int f_setown(struct file *filp, unsigned long arg, int force);
9148 extern void f_delown(struct file *filp);
9149 extern pid_t f_getown(struct file *filp);
9150 extern int send_sigurg(struct fown_struct *fown);
9151 # 1101 "include/linux/fs.h"
9152 extern struct list_head super_blocks;
9153 extern spinlock_t sb_lock;
9154
9155
9156
9157 struct super_block {
9158 struct list_head s_list;
9159 dev_t s_dev;
9160 unsigned long s_blocksize;
9161 unsigned char s_blocksize_bits;
9162 unsigned char s_dirt;
9163 unsigned long long s_maxbytes;
9164 struct file_system_type *s_type;
9165 const struct super_operations *s_op;
9166 struct dquot_operations *dq_op;
9167 struct quotactl_ops *s_qcop;
9168 const struct export_operations *s_export_op;
9169 unsigned long s_flags;
9170 unsigned long s_magic;
9171 struct dentry *s_root;
9172 struct rw_semaphore s_umount;
9173 struct mutex s_lock;
9174 int s_count;
9175 int s_syncing;
9176 int s_need_sync_fs;
9177 atomic_t s_active;
9178
9179
9180
9181 struct xattr_handler **s_xattr;
9182
9183 struct list_head s_inodes;
9184 struct list_head s_dirty;
9185 struct list_head s_io;
9186 struct list_head s_more_io;
9187 struct hlist_head s_anon;
9188 struct list_head s_files;
9189
9190 struct list_head s_dentry_lru;
9191 int s_nr_dentry_unused;
9192
9193 struct block_device *s_bdev;
9194 struct mtd_info *s_mtd;
9195 struct list_head s_instances;
9196 struct quota_info s_dquot;
9197
9198 int s_frozen;
9199 wait_queue_head_t s_wait_unfrozen;
9200
9201 char s_id[32];
9202
9203 void *s_fs_info;
9204 fmode_t s_mode;
9205
9206
9207
9208
9209
9210 struct mutex s_vfs_rename_mutex;
9211
9212
9213
9214 u32 s_time_gran;
9215
9216
9217
9218
9219
9220 char *s_subtype;
9221
9222
9223
9224
9225
9226 char *s_options;
9227 };
9228
9229 extern struct timespec current_fs_time(struct super_block *sb);
9230
9231
9232
9233
9234 enum {
9235 SB_UNFROZEN = 0,
9236 SB_FREEZE_WRITE = 1,
9237 SB_FREEZE_TRANS = 2,
9238 };
9239 # 1200 "include/linux/fs.h"
9240 extern void lock_super(struct super_block *);
9241 extern void unlock_super(struct super_block *);
9242
9243
9244
9245
9246 extern int vfs_permission(struct nameidata *, int);
9247 extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *);
9248 extern int vfs_mkdir(struct inode *, struct dentry *, int);
9249 extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t);
9250 extern int vfs_symlink(struct inode *, struct dentry *, const char *);
9251 extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
9252 extern int vfs_rmdir(struct inode *, struct dentry *);
9253 extern int vfs_unlink(struct inode *, struct dentry *);
9254 extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
9255
9256
9257
9258
9259 extern void dentry_unhash(struct dentry *dentry);
9260
9261
9262
9263
9264 extern int file_permission(struct file *, int);
9265
9266
9267
9268
9269 struct fiemap_extent_info {
9270 unsigned int fi_flags;
9271 unsigned int fi_extents_mapped;
9272 unsigned int fi_extents_max;
9273 struct fiemap_extent *fi_extents_start;
9274
9275 };
9276 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
9277 u64 phys, u64 len, u32 flags);
9278 int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
9279 # 1259 "include/linux/fs.h"
9280 int generic_osync_inode(struct inode *, struct address_space *, int);
9281
9282
9283
9284
9285
9286
9287
9288 typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned);
9289 struct block_device_operations;
9290 # 1281 "include/linux/fs.h"
9291 struct file_operations {
9292 struct module *owner;
9293 loff_t (*llseek) (struct file *, loff_t, int);
9294 ssize_t (*read) (struct file *, char *, size_t, loff_t *);
9295 ssize_t (*write) (struct file *, const char *, size_t, loff_t *);
9296 ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
9297 ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
9298 int (*readdir) (struct file *, void *, filldir_t);
9299 unsigned int (*poll) (struct file *, struct poll_table_struct *);
9300 int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long);
9301 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
9302 long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
9303 int (*mmap) (struct file *, struct vm_area_struct *);
9304 int (*open) (struct inode *, struct file *);
9305 int (*flush) (struct file *, fl_owner_t id);
9306 int (*release) (struct inode *, struct file *);
9307 int (*fsync) (struct file *, struct dentry *, int datasync);
9308 int (*aio_fsync) (struct kiocb *, int datasync);
9309 int (*fasync) (int, struct file *, int);
9310 int (*lock) (struct file *, int, struct file_lock *);
9311 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
9312 unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
9313 int (*check_flags)(int);
9314 int (*dir_notify)(struct file *filp, unsigned long arg);
9315 int (*flock) (struct file *, int, struct file_lock *);
9316 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
9317 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
9318 int (*setlease)(struct file *, long, struct file_lock **);
9319 };
9320
9321 struct inode_operations {
9322 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
9323 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
9324 int (*link) (struct dentry *,struct inode *,struct dentry *);
9325 int (*unlink) (struct inode *,struct dentry *);
9326 int (*symlink) (struct inode *,struct dentry *,const char *);
9327 int (*mkdir) (struct inode *,struct dentry *,int);
9328 int (*rmdir) (struct inode *,struct dentry *);
9329 int (*mknod) (struct inode *,struct dentry *,int,dev_t);
9330 int (*rename) (struct inode *, struct dentry *,
9331 struct inode *, struct dentry *);
9332 int (*readlink) (struct dentry *, char *,int);
9333 void * (*follow_link) (struct dentry *, struct nameidata *);
9334 void (*put_link) (struct dentry *, struct nameidata *, void *);
9335 void (*truncate) (struct inode *);
9336 int (*permission) (struct inode *, int);
9337 int (*setattr) (struct dentry *, struct iattr *);
9338 int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
9339 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
9340 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
9341 ssize_t (*listxattr) (struct dentry *, char *, size_t);
9342 int (*removexattr) (struct dentry *, const char *);
9343 void (*truncate_range)(struct inode *, loff_t, loff_t);
9344 long (*fallocate)(struct inode *inode, int mode, loff_t offset,
9345 loff_t len);
9346 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
9347 u64 len);
9348 };
9349
9350 struct seq_file;
9351
9352 ssize_t rw_copy_check_uvector(int type, const struct iovec * uvector,
9353 unsigned long nr_segs, unsigned long fast_segs,
9354 struct iovec *fast_pointer,
9355 struct iovec **ret_pointer);
9356
9357 extern ssize_t vfs_read(struct file *, char *, size_t, loff_t *);
9358 extern ssize_t vfs_write(struct file *, const char *, size_t, loff_t *);
9359 extern ssize_t vfs_readv(struct file *, const struct iovec *,
9360 unsigned long, loff_t *);
9361 extern ssize_t vfs_writev(struct file *, const struct iovec *,
9362 unsigned long, loff_t *);
9363
9364 struct super_operations {
9365 struct inode *(*alloc_inode)(struct super_block *sb);
9366 void (*destroy_inode)(struct inode *);
9367
9368 void (*dirty_inode) (struct inode *);
9369 int (*write_inode) (struct inode *, int);
9370 void (*drop_inode) (struct inode *);
9371 void (*delete_inode) (struct inode *);
9372 void (*put_super) (struct super_block *);
9373 void (*write_super) (struct super_block *);
9374 int (*sync_fs)(struct super_block *sb, int wait);
9375 void (*write_super_lockfs) (struct super_block *);
9376 void (*unlockfs) (struct super_block *);
9377 int (*statfs) (struct dentry *, struct kstatfs *);
9378 int (*remount_fs) (struct super_block *, int *, char *);
9379 void (*clear_inode) (struct inode *);
9380 void (*umount_begin) (struct super_block *);
9381
9382 int (*show_options)(struct seq_file *, struct vfsmount *);
9383 int (*show_stats)(struct seq_file *, struct vfsmount *);
9384
9385 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
9386 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
9387
9388 };
9389 # 1447 "include/linux/fs.h"
9390 extern void __mark_inode_dirty(struct inode *, int);
9391 static inline __attribute__((always_inline)) void mark_inode_dirty(struct inode *inode)
9392 {
9393 __mark_inode_dirty(inode, (1 | 2 | 4));
9394 }
9395
9396 static inline __attribute__((always_inline)) void mark_inode_dirty_sync(struct inode *inode)
9397 {
9398 __mark_inode_dirty(inode, 1);
9399 }
9400 # 1466 "include/linux/fs.h"
9401 static inline __attribute__((always_inline)) void inc_nlink(struct inode *inode)
9402 {
9403 inode->i_nlink++;
9404 }
9405
9406 static inline __attribute__((always_inline)) void inode_inc_link_count(struct inode *inode)
9407 {
9408 inc_nlink(inode);
9409 mark_inode_dirty(inode);
9410 }
9411 # 1488 "include/linux/fs.h"
9412 static inline __attribute__((always_inline)) void drop_nlink(struct inode *inode)
9413 {
9414 inode->i_nlink--;
9415 }
9416 # 1501 "include/linux/fs.h"
9417 static inline __attribute__((always_inline)) void clear_nlink(struct inode *inode)
9418 {
9419 inode->i_nlink = 0;
9420 }
9421
9422 static inline __attribute__((always_inline)) void inode_dec_link_count(struct inode *inode)
9423 {
9424 drop_nlink(inode);
9425 mark_inode_dirty(inode);
9426 }
9427 # 1520 "include/linux/fs.h"
9428 static inline __attribute__((always_inline)) void inode_inc_iversion(struct inode *inode)
9429 {
9430 _spin_lock(&inode->i_lock);
9431 inode->i_version++;
9432 _spin_unlock(&inode->i_lock);
9433 }
9434
9435 extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry);
9436 static inline __attribute__((always_inline)) void file_accessed(struct file *file)
9437 {
9438 if (!(file->f_flags & 01000000))
9439 touch_atime(file->f_path.mnt, file->f_path.dentry);
9440 }
9441
9442 int sync_inode(struct inode *inode, struct writeback_control *wbc);
9443
9444 struct file_system_type {
9445 const char *name;
9446 int fs_flags;
9447 int (*get_sb) (struct file_system_type *, int,
9448 const char *, void *, struct vfsmount *);
9449 void (*kill_sb) (struct super_block *);
9450 struct module *owner;
9451 struct file_system_type * next;
9452 struct list_head fs_supers;
9453
9454 struct lock_class_key s_lock_key;
9455 struct lock_class_key s_umount_key;
9456
9457 struct lock_class_key i_lock_key;
9458 struct lock_class_key i_mutex_key;
9459 struct lock_class_key i_mutex_dir_key;
9460 struct lock_class_key i_alloc_sem_key;
9461 };
9462
9463 extern int get_sb_bdev(struct file_system_type *fs_type,
9464 int flags, const char *dev_name, void *data,
9465 int (*fill_super)(struct super_block *, void *, int),
9466 struct vfsmount *mnt);
9467 extern int get_sb_single(struct file_system_type *fs_type,
9468 int flags, void *data,
9469 int (*fill_super)(struct super_block *, void *, int),
9470 struct vfsmount *mnt);
9471 extern int get_sb_nodev(struct file_system_type *fs_type,
9472 int flags, void *data,
9473 int (*fill_super)(struct super_block *, void *, int),
9474 struct vfsmount *mnt);
9475 void generic_shutdown_super(struct super_block *sb);
9476 void kill_block_super(struct super_block *sb);
9477 void kill_anon_super(struct super_block *sb);
9478 void kill_litter_super(struct super_block *sb);
9479 void deactivate_super(struct super_block *sb);
9480 int set_anon_super(struct super_block *s, void *data);
9481 struct super_block *sget(struct file_system_type *type,
9482 int (*test)(struct super_block *,void *),
9483 int (*set)(struct super_block *,void *),
9484 void *data);
9485 extern int get_sb_pseudo(struct file_system_type *, char *,
9486 const struct super_operations *ops, unsigned long,
9487 struct vfsmount *mnt);
9488 extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
9489 int __put_super_and_need_restart(struct super_block *sb);
9490
9491
9492
9493
9494
9495
9496
9497 extern int register_filesystem(struct file_system_type *);
9498 extern int unregister_filesystem(struct file_system_type *);
9499 extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
9500
9501 extern int may_umount_tree(struct vfsmount *);
9502 extern int may_umount(struct vfsmount *);
9503 extern long do_mount(char *, char *, char *, unsigned long, void *);
9504 extern struct vfsmount *collect_mounts(struct vfsmount *, struct dentry *);
9505 extern void drop_collected_mounts(struct vfsmount *);
9506
9507 extern int vfs_statfs(struct dentry *, struct kstatfs *);
9508
9509
9510 extern struct kobject *fs_kobj;
9511
9512 extern int rw_verify_area(int, struct file *, loff_t *, size_t);
9513
9514
9515
9516
9517
9518 extern int locks_mandatory_locked(struct inode *);
9519 extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t);
9520
9521
9522
9523
9524
9525
9526 static inline __attribute__((always_inline)) int __mandatory_lock(struct inode *ino)
9527 {
9528 return (ino->i_mode & (0002000 | 00010)) == 0002000;
9529 }
9530
9531
9532
9533
9534
9535
9536 static inline __attribute__((always_inline)) int mandatory_lock(struct inode *ino)
9537 {
9538 return ((ino)->i_sb->s_flags & (64)) && __mandatory_lock(ino);
9539 }
9540
9541 static inline __attribute__((always_inline)) int locks_verify_locked(struct inode *inode)
9542 {
9543 if (mandatory_lock(inode))
9544 return locks_mandatory_locked(inode);
9545 return 0;
9546 }
9547
9548 static inline __attribute__((always_inline)) int locks_verify_truncate(struct inode *inode,
9549 struct file *filp,
9550 loff_t size)
9551 {
9552 if (inode->i_flock && mandatory_lock(inode))
9553 return locks_mandatory_area(
9554 2, inode, filp,
9555 size < inode->i_size ? size : inode->i_size,
9556 (size < inode->i_size ? inode->i_size - size
9557 : size - inode->i_size)
9558 );
9559 return 0;
9560 }
9561
9562 static inline __attribute__((always_inline)) int break_lease(struct inode *inode, unsigned int mode)
9563 {
9564 if (inode->i_flock)
9565 return __break_lease(inode, mode);
9566 return 0;
9567 }
9568 # 1672 "include/linux/fs.h"
9569 extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
9570 struct file *filp);
9571 extern long do_sys_open(int dfd, const char *filename, int flags,
9572 int mode);
9573 extern struct file *filp_open(const char *, int, int);
9574 extern struct file * dentry_open(struct dentry *, struct vfsmount *, int);
9575 extern int filp_close(struct file *, fl_owner_t id);
9576 extern char * getname(const char *);
9577
9578
9579 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vfs_caches_init_early(void);
9580 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vfs_caches_init(unsigned long);
9581
9582 extern struct kmem_cache *names_cachep;
9583 # 1696 "include/linux/fs.h"
9584 extern int register_blkdev(unsigned int, const char *);
9585 extern void unregister_blkdev(unsigned int, const char *);
9586 extern struct block_device *bdget(dev_t);
9587 extern void bd_set_size(struct block_device *, loff_t size);
9588 extern void bd_forget(struct inode *inode);
9589 extern void bdput(struct block_device *);
9590 extern struct block_device *open_by_devnum(dev_t, fmode_t);
9591
9592
9593
9594 extern const struct file_operations def_blk_fops;
9595 extern const struct file_operations def_chr_fops;
9596 extern const struct file_operations bad_sock_fops;
9597 extern const struct file_operations def_fifo_fops;
9598
9599 extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
9600 extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
9601 extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
9602 extern int blkdev_get(struct block_device *, fmode_t);
9603 extern int blkdev_put(struct block_device *, fmode_t);
9604 extern int bd_claim(struct block_device *, void *);
9605 extern void bd_release(struct block_device *);
9606 # 1729 "include/linux/fs.h"
9607 extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
9608 extern int register_chrdev_region(dev_t, unsigned, const char *);
9609 extern int register_chrdev(unsigned int, const char *,
9610 const struct file_operations *);
9611 extern void unregister_chrdev(unsigned int, const char *);
9612 extern void unregister_chrdev_region(dev_t, unsigned);
9613 extern void chrdev_show(struct seq_file *,off_t);
9614
9615
9616
9617
9618
9619
9620
9621 extern const char *__bdevname(dev_t, char *buffer);
9622 extern const char *bdevname(struct block_device *bdev, char *buffer);
9623 extern struct block_device *lookup_bdev(const char *);
9624 extern struct block_device *open_bdev_exclusive(const char *, fmode_t, void *);
9625 extern void close_bdev_exclusive(struct block_device *, fmode_t);
9626 extern void blkdev_show(struct seq_file *,off_t);
9627
9628
9629
9630
9631
9632 extern void init_special_inode(struct inode *, umode_t, dev_t);
9633
9634
9635 extern void make_bad_inode(struct inode *);
9636 extern int is_bad_inode(struct inode *);
9637
9638 extern const struct file_operations read_pipefifo_fops;
9639 extern const struct file_operations write_pipefifo_fops;
9640 extern const struct file_operations rdwr_pipefifo_fops;
9641
9642 extern int fs_may_remount_ro(struct super_block *);
9643 # 1777 "include/linux/fs.h"
9644 extern void check_disk_size_change(struct gendisk *disk,
9645 struct block_device *bdev);
9646 extern int revalidate_disk(struct gendisk *);
9647 extern int check_disk_change(struct block_device *);
9648 extern int __invalidate_device(struct block_device *);
9649 extern int invalidate_partition(struct gendisk *, int);
9650
9651 extern int invalidate_inodes(struct super_block *);
9652 unsigned long __invalidate_mapping_pages(struct address_space *mapping,
9653 unsigned long start, unsigned long end,
9654 bool be_atomic);
9655 unsigned long invalidate_mapping_pages(struct address_space *mapping,
9656 unsigned long start, unsigned long end);
9657
9658 static inline __attribute__((always_inline)) unsigned long __attribute__((deprecated))
9659 invalidate_inode_pages(struct address_space *mapping)
9660 {
9661 return invalidate_mapping_pages(mapping, 0, ~0UL);
9662 }
9663
9664 static inline __attribute__((always_inline)) void invalidate_remote_inode(struct inode *inode)
9665 {
9666 if ((((inode->i_mode) & 00170000) == 0100000) || (((inode->i_mode) & 00170000) == 0040000) ||
9667 (((inode->i_mode) & 00170000) == 0120000))
9668 invalidate_mapping_pages(inode->i_mapping, 0, -1);
9669 }
9670 extern int invalidate_inode_pages2(struct address_space *mapping);
9671 extern int invalidate_inode_pages2_range(struct address_space *mapping,
9672 unsigned long start, unsigned long end);
9673 extern void generic_sync_sb_inodes(struct super_block *sb,
9674 struct writeback_control *wbc);
9675 extern int write_inode_now(struct inode *, int);
9676 extern int filemap_fdatawrite(struct address_space *);
9677 extern int filemap_flush(struct address_space *);
9678 extern int filemap_fdatawait(struct address_space *);
9679 extern int filemap_write_and_wait(struct address_space *mapping);
9680 extern int filemap_write_and_wait_range(struct address_space *mapping,
9681 loff_t lstart, loff_t lend);
9682 extern int wait_on_page_writeback_range(struct address_space *mapping,
9683 unsigned long start, unsigned long end);
9684 extern int __filemap_fdatawrite_range(struct address_space *mapping,
9685 loff_t start, loff_t end, int sync_mode);
9686 extern int filemap_fdatawrite_range(struct address_space *mapping,
9687 loff_t start, loff_t end);
9688
9689 extern long do_fsync(struct file *file, int datasync);
9690 extern void sync_supers(void);
9691 extern void sync_filesystems(int wait);
9692 extern void __fsync_super(struct super_block *sb);
9693 extern void emergency_sync(void);
9694 extern void emergency_remount(void);
9695 extern int do_remount_sb(struct super_block *sb, int flags,
9696 void *data, int force);
9697
9698 extern sector_t bmap(struct inode *, sector_t);
9699
9700 extern int notify_change(struct dentry *, struct iattr *);
9701 extern int inode_permission(struct inode *, int);
9702 extern int generic_permission(struct inode *, int,
9703 int (*check_acl)(struct inode *, int));
9704
9705 static inline __attribute__((always_inline)) bool execute_ok(struct inode *inode)
9706 {
9707 return (inode->i_mode & (00100|00010|00001)) || (((inode->i_mode) & 00170000) == 0040000);
9708 }
9709
9710 extern int get_write_access(struct inode *);
9711 extern int deny_write_access(struct file *);
9712 static inline __attribute__((always_inline)) void put_write_access(struct inode * inode)
9713 {
9714 atomic_dec(&inode->i_writecount);
9715 }
9716 static inline __attribute__((always_inline)) void allow_write_access(struct file *file)
9717 {
9718 if (file)
9719 atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
9720 }
9721 extern int do_pipe(int *);
9722 extern int do_pipe_flags(int *, int);
9723 extern struct file *create_read_pipe(struct file *f, int flags);
9724 extern struct file *create_write_pipe(int flags);
9725 extern void free_write_pipe(struct file *);
9726
9727 extern struct file *do_filp_open(int dfd, const char *pathname,
9728 int open_flag, int mode);
9729 extern int may_open(struct nameidata *, int, int);
9730
9731 extern int kernel_read(struct file *, unsigned long, char *, unsigned long);
9732 extern struct file * open_exec(const char *);
9733
9734
9735 extern int is_subdir(struct dentry *, struct dentry *);
9736 extern ino_t find_inode_number(struct dentry *, struct qstr *);
9737
9738 # 1 "include/linux/err.h" 1
9739 # 22 "include/linux/err.h"
9740 static inline __attribute__((always_inline)) void *ERR_PTR(long error)
9741 {
9742 return (void *) error;
9743 }
9744
9745 static inline __attribute__((always_inline)) long PTR_ERR(const void *ptr)
9746 {
9747 return (long) ptr;
9748 }
9749
9750 static inline __attribute__((always_inline)) long IS_ERR(const void *ptr)
9751 {
9752 return __builtin_expect(!!(((unsigned long)ptr) >= (unsigned long)-4095), 0);
9753 }
9754 # 44 "include/linux/err.h"
9755 static inline __attribute__((always_inline)) void *ERR_CAST(const void *ptr)
9756 {
9757
9758 return (void *) ptr;
9759 }
9760 # 1872 "include/linux/fs.h" 2
9761
9762
9763 extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
9764
9765 extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
9766
9767 extern void inode_init_once(struct inode *);
9768 extern void iput(struct inode *);
9769 extern struct inode * igrab(struct inode *);
9770 extern ino_t iunique(struct super_block *, ino_t);
9771 extern int inode_needs_sync(struct inode *inode);
9772 extern void generic_delete_inode(struct inode *inode);
9773 extern void generic_drop_inode(struct inode *inode);
9774
9775 extern struct inode *ilookup5_nowait(struct super_block *sb,
9776 unsigned long hashval, int (*test)(struct inode *, void *),
9777 void *data);
9778 extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
9779 int (*test)(struct inode *, void *), void *data);
9780 extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
9781
9782 extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
9783 extern struct inode * iget_locked(struct super_block *, unsigned long);
9784 extern void unlock_new_inode(struct inode *);
9785
9786 extern void __iget(struct inode * inode);
9787 extern void iget_failed(struct inode *);
9788 extern void clear_inode(struct inode *);
9789 extern void destroy_inode(struct inode *);
9790 extern struct inode *new_inode(struct super_block *);
9791 extern int should_remove_suid(struct dentry *);
9792 extern int file_remove_suid(struct file *);
9793
9794 extern void __insert_inode_hash(struct inode *, unsigned long hashval);
9795 extern void remove_inode_hash(struct inode *);
9796 static inline __attribute__((always_inline)) void insert_inode_hash(struct inode *inode) {
9797 __insert_inode_hash(inode, inode->i_ino);
9798 }
9799
9800 extern struct file * get_empty_filp(void);
9801 extern void file_move(struct file *f, struct list_head *list);
9802 extern void file_kill(struct file *f);
9803
9804 struct bio;
9805 extern void submit_bio(int, struct bio *);
9806 extern int bdev_read_only(struct block_device *);
9807
9808 extern int set_blocksize(struct block_device *, int);
9809 extern int sb_set_blocksize(struct super_block *, int);
9810 extern int sb_min_blocksize(struct super_block *, int);
9811 extern int sb_has_dirty_inodes(struct super_block *);
9812
9813 extern int generic_file_mmap(struct file *, struct vm_area_struct *);
9814 extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
9815 extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
9816 int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
9817 extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
9818 extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
9819 extern ssize_t generic_file_aio_write_nolock(struct kiocb *, const struct iovec *,
9820 unsigned long, loff_t);
9821 extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *,
9822 unsigned long *, loff_t, loff_t *, size_t, size_t);
9823 extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *,
9824 unsigned long, loff_t, loff_t *, size_t, ssize_t);
9825 extern ssize_t do_sync_read(struct file *filp, char *buf, size_t len, loff_t *ppos);
9826 extern ssize_t do_sync_write(struct file *filp, const char *buf, size_t len, loff_t *ppos);
9827 extern int generic_segment_checks(const struct iovec *iov,
9828 unsigned long *nr_segs, size_t *count, int access_flags);
9829
9830
9831 extern ssize_t generic_file_splice_read(struct file *, loff_t *,
9832 struct pipe_inode_info *, size_t, unsigned int);
9833 extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
9834 struct file *, loff_t *, size_t, unsigned int);
9835 extern ssize_t generic_file_splice_write_nolock(struct pipe_inode_info *,
9836 struct file *, loff_t *, size_t, unsigned int);
9837 extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
9838 struct file *out, loff_t *, size_t len, unsigned int flags);
9839 extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
9840 size_t len, unsigned int flags);
9841
9842 extern void
9843 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
9844 extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
9845 extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
9846 extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset,
9847 int origin);
9848 extern int generic_file_open(struct inode * inode, struct file * filp);
9849 extern int nonseekable_open(struct inode * inode, struct file * filp);
9850 # 1970 "include/linux/fs.h"
9851 static inline __attribute__((always_inline)) int xip_truncate_page(struct address_space *mapping, loff_t from)
9852 {
9853 return 0;
9854 }
9855
9856
9857
9858 ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
9859 struct block_device *bdev, const struct iovec *iov, loff_t offset,
9860 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
9861 int lock_type);
9862
9863 enum {
9864 DIO_LOCKING = 1,
9865 DIO_NO_LOCKING,
9866 DIO_OWN_LOCKING,
9867 };
9868
9869 static inline __attribute__((always_inline)) ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
9870 struct inode *inode, struct block_device *bdev, const struct iovec *iov,
9871 loff_t offset, unsigned long nr_segs, get_block_t get_block,
9872 dio_iodone_t end_io)
9873 {
9874 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
9875 nr_segs, get_block, end_io, DIO_LOCKING);
9876 }
9877
9878 static inline __attribute__((always_inline)) ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
9879 struct inode *inode, struct block_device *bdev, const struct iovec *iov,
9880 loff_t offset, unsigned long nr_segs, get_block_t get_block,
9881 dio_iodone_t end_io)
9882 {
9883 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
9884 nr_segs, get_block, end_io, DIO_NO_LOCKING);
9885 }
9886
9887 static inline __attribute__((always_inline)) ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb,
9888 struct inode *inode, struct block_device *bdev, const struct iovec *iov,
9889 loff_t offset, unsigned long nr_segs, get_block_t get_block,
9890 dio_iodone_t end_io)
9891 {
9892 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
9893 nr_segs, get_block, end_io, DIO_OWN_LOCKING);
9894 }
9895
9896
9897 extern const struct file_operations generic_ro_fops;
9898
9899
9900
9901 extern int vfs_readlink(struct dentry *, char *, int, const char *);
9902 extern int vfs_follow_link(struct nameidata *, const char *);
9903 extern int page_readlink(struct dentry *, char *, int);
9904 extern void *page_follow_link_light(struct dentry *, struct nameidata *);
9905 extern void page_put_link(struct dentry *, struct nameidata *, void *);
9906 extern int __page_symlink(struct inode *inode, const char *symname, int len,
9907 gfp_t gfp_mask);
9908 extern int page_symlink(struct inode *inode, const char *symname, int len);
9909 extern const struct inode_operations page_symlink_inode_operations;
9910 extern int generic_readlink(struct dentry *, char *, int);
9911 extern void generic_fillattr(struct inode *, struct kstat *);
9912 extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
9913 void inode_add_bytes(struct inode *inode, loff_t bytes);
9914 void inode_sub_bytes(struct inode *inode, loff_t bytes);
9915 loff_t inode_get_bytes(struct inode *inode);
9916 void inode_set_bytes(struct inode *inode, loff_t bytes);
9917
9918 extern int vfs_readdir(struct file *, filldir_t, void *);
9919
9920 extern int vfs_stat(char *, struct kstat *);
9921 extern int vfs_lstat(char *, struct kstat *);
9922 extern int vfs_stat_fd(int dfd, char *, struct kstat *);
9923 extern int vfs_lstat_fd(int dfd, char *, struct kstat *);
9924 extern int vfs_fstat(unsigned int, struct kstat *);
9925
9926 extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
9927 unsigned long arg);
9928 extern int generic_block_fiemap(struct inode *inode,
9929 struct fiemap_extent_info *fieinfo, u64 start,
9930 u64 len, get_block_t *get_block);
9931
9932 extern void get_filesystem(struct file_system_type *fs);
9933 extern void put_filesystem(struct file_system_type *fs);
9934 extern struct file_system_type *get_fs_type(const char *name);
9935 extern struct super_block *get_super(struct block_device *);
9936 extern struct super_block *user_get_super(dev_t);
9937 extern void drop_super(struct super_block *sb);
9938
9939 extern int dcache_dir_open(struct inode *, struct file *);
9940 extern int dcache_dir_close(struct inode *, struct file *);
9941 extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
9942 extern int dcache_readdir(struct file *, void *, filldir_t);
9943 extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
9944 extern int simple_statfs(struct dentry *, struct kstatfs *);
9945 extern int simple_link(struct dentry *, struct inode *, struct dentry *);
9946 extern int simple_unlink(struct inode *, struct dentry *);
9947 extern int simple_rmdir(struct inode *, struct dentry *);
9948 extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
9949 extern int simple_sync_file(struct file *, struct dentry *, int);
9950 extern int simple_empty(struct dentry *);
9951 extern int simple_readpage(struct file *file, struct page *page);
9952 extern int simple_prepare_write(struct file *file, struct page *page,
9953 unsigned offset, unsigned to);
9954 extern int simple_write_begin(struct file *file, struct address_space *mapping,
9955 loff_t pos, unsigned len, unsigned flags,
9956 struct page **pagep, void **fsdata);
9957 extern int simple_write_end(struct file *file, struct address_space *mapping,
9958 loff_t pos, unsigned len, unsigned copied,
9959 struct page *page, void *fsdata);
9960
9961 extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *);
9962 extern ssize_t generic_read_dir(struct file *, char *, size_t, loff_t *);
9963 extern const struct file_operations simple_dir_operations;
9964 extern const struct inode_operations simple_dir_inode_operations;
9965 struct tree_descr { char *name; const struct file_operations *ops; int mode; };
9966 struct dentry *d_alloc_name(struct dentry *, const char *);
9967 extern int simple_fill_super(struct super_block *, int, struct tree_descr *);
9968 extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
9969 extern void simple_release_fs(struct vfsmount **mount, int *count);
9970
9971 extern ssize_t simple_read_from_buffer(void *to, size_t count,
9972 loff_t *ppos, const void *from, size_t available);
9973 # 2100 "include/linux/fs.h"
9974 extern int inode_change_ok(struct inode *, struct iattr *);
9975 extern int __attribute__((warn_unused_result)) inode_setattr(struct inode *, struct iattr *);
9976
9977 extern void file_update_time(struct file *file);
9978
9979 extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt);
9980 extern void save_mount_options(struct super_block *sb, char *options);
9981
9982 static inline __attribute__((always_inline)) ino_t parent_ino(struct dentry *dentry)
9983 {
9984 ino_t res;
9985
9986 _spin_lock(&dentry->d_lock);
9987 res = dentry->d_parent->d_inode->i_ino;
9988 _spin_unlock(&dentry->d_lock);
9989 return res;
9990 }
9991
9992
9993
9994
9995
9996
9997
9998 struct simple_transaction_argresp {
9999 ssize_t size;
10000 char data[0];
10001 };
10002
10003
10004
10005 char *simple_transaction_get(struct file *file, const char *buf,
10006 size_t size);
10007 ssize_t simple_transaction_read(struct file *file, char *buf,
10008 size_t size, loff_t *pos);
10009 int simple_transaction_release(struct inode *inode, struct file *file);
10010
10011 static inline __attribute__((always_inline)) void simple_transaction_set(struct file *file, size_t n)
10012 {
10013 struct simple_transaction_argresp *ar = file->private_data;
10014
10015 do { if (__builtin_expect(!!(n > ((1UL << 12) - sizeof(struct simple_transaction_argresp))), 0)) do { dump_bfin_trace_buffer(); printk("<0>" "BUG: failure at %s:%d/%s()!\n", "include/linux/fs.h", 2141, __func__); panic("BUG!"); } while (0); } while(0);
10016
10017
10018
10019
10020
10021 __asm__ __volatile__("": : :"memory");
10022 ar->size = n;
10023 }
10024 # 2181 "include/linux/fs.h"
10025 static inline __attribute__((always_inline)) void __attribute__((format(printf, 1, 2)))
10026 __simple_attr_check_format(const char *fmt, ...)
10027 {
10028
10029 }
10030
10031 int simple_attr_open(struct inode *inode, struct file *file,
10032 int (*get)(void *, u64 *), int (*set)(void *, u64),
10033 const char *fmt);
10034 int simple_attr_release(struct inode *inode, struct file *file);
10035 ssize_t simple_attr_read(struct file *file, char *buf,
10036 size_t len, loff_t *ppos);
10037 ssize_t simple_attr_write(struct file *file, const char *buf,
10038 size_t len, loff_t *ppos);
10039 # 2208 "include/linux/fs.h"
10040 static inline __attribute__((always_inline)) char *alloc_secdata(void)
10041 {
10042 return (char *)1;
10043 }
10044
10045 static inline __attribute__((always_inline)) void free_secdata(void *secdata)
10046 { }
10047
10048
10049 struct ctl_table;
10050 int proc_nr_files(struct ctl_table *table, int write, struct file *filp,
10051 void *buffer, size_t *lenp, loff_t *ppos);
10052
10053 int get_filesystem_list(char * buf);
10054 # 19 "include/linux/debugfs.h" 2
10055
10056
10057
10058 struct file_operations;
10059
10060 struct debugfs_blob_wrapper {
10061 void *data;
10062 unsigned long size;
10063 };
10064
10065 extern struct dentry *arch_debugfs_dir;
10066
10067
10068
10069
10070 extern const struct file_operations debugfs_file_operations;
10071 extern const struct inode_operations debugfs_link_operations;
10072
10073 struct dentry *debugfs_create_file(const char *name, mode_t mode,
10074 struct dentry *parent, void *data,
10075 const struct file_operations *fops);
10076
10077 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
10078
10079 struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
10080 const char *dest);
10081
10082 void debugfs_remove(struct dentry *dentry);
10083 void debugfs_remove_recursive(struct dentry *dentry);
10084
10085 struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
10086 struct dentry *new_dir, const char *new_name);
10087
10088 struct dentry *debugfs_create_u8(const char *name, mode_t mode,
10089 struct dentry *parent, u8 *value);
10090 struct dentry *debugfs_create_u16(const char *name, mode_t mode,
10091 struct dentry *parent, u16 *value);
10092 struct dentry *debugfs_create_u32(const char *name, mode_t mode,
10093 struct dentry *parent, u32 *value);
10094 struct dentry *debugfs_create_u64(const char *name, mode_t mode,
10095 struct dentry *parent, u64 *value);
10096 struct dentry *debugfs_create_x8(const char *name, mode_t mode,
10097 struct dentry *parent, u8 *value);
10098 struct dentry *debugfs_create_x16(const char *name, mode_t mode,
10099 struct dentry *parent, u16 *value);
10100 struct dentry *debugfs_create_x32(const char *name, mode_t mode,
10101 struct dentry *parent, u32 *value);
10102 struct dentry *debugfs_create_bool(const char *name, mode_t mode,
10103 struct dentry *parent, u32 *value);
10104
10105 struct dentry *debugfs_create_blob(const char *name, mode_t mode,
10106 struct dentry *parent,
10107 struct debugfs_blob_wrapper *blob);
10108 # 19 "kernel/trace/trace.c" 2
10109 # 1 "include/linux/pagemap.h" 1
10110
10111
10112
10113
10114
10115
10116 # 1 "include/linux/mm.h" 1
10117 # 10 "include/linux/mm.h"
10118 # 1 "include/linux/mmdebug.h" 1
10119
10120
10121
10122 # 1 "include/linux/autoconf.h" 1
10123 # 5 "include/linux/mmdebug.h" 2
10124 # 11 "include/linux/mm.h" 2
10125
10126 # 1 "include/linux/rbtree.h" 1
10127 # 100 "include/linux/rbtree.h"
10128 struct rb_node
10129 {
10130 unsigned long rb_parent_color;
10131
10132
10133 struct rb_node *rb_right;
10134 struct rb_node *rb_left;
10135 } __attribute__((aligned(sizeof(long))));
10136
10137
10138 struct rb_root
10139 {
10140 struct rb_node *rb_node;
10141 };
10142 # 123 "include/linux/rbtree.h"
10143 static inline __attribute__((always_inline)) void rb_set_parent(struct rb_node *rb, struct rb_node *p)
10144 {
10145 rb->rb_parent_color = (rb->rb_parent_color & 3) | (unsigned long)p;
10146 }
10147 static inline __attribute__((always_inline)) void rb_set_color(struct rb_node *rb, int color)
10148 {
10149 rb->rb_parent_color = (rb->rb_parent_color & ~1) | color;
10150 }
10151 # 139 "include/linux/rbtree.h"
10152 extern void rb_insert_color(struct rb_node *, struct rb_root *);
10153 extern void rb_erase(struct rb_node *, struct rb_root *);
10154
10155
10156 extern struct rb_node *rb_next(struct rb_node *);
10157 extern struct rb_node *rb_prev(struct rb_node *);
10158 extern struct rb_node *rb_first(struct rb_root *);
10159 extern struct rb_node *rb_last(struct rb_root *);
10160
10161
10162 extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
10163 struct rb_root *root);
10164
10165 static inline __attribute__((always_inline)) void rb_link_node(struct rb_node * node, struct rb_node * parent,
10166 struct rb_node ** rb_link)
10167 {
10168 node->rb_parent_color = (unsigned long )parent;
10169 node->rb_left = node->rb_right = ((void *)0);
10170
10171 *rb_link = node;
10172 }
10173 # 13 "include/linux/mm.h" 2
10174
10175 # 1 "include/linux/debug_locks.h" 1
10176
10177
10178
10179
10180
10181 struct task_struct;
10182
10183 extern int debug_locks;
10184 extern int debug_locks_silent;
10185
10186
10187
10188
10189 extern int debug_locks_off(void);
10190 # 35 "include/linux/debug_locks.h"
10191 extern void locking_selftest(void);
10192
10193
10194
10195
10196 struct task_struct;
10197 # 49 "include/linux/debug_locks.h"
10198 static inline __attribute__((always_inline)) void debug_show_all_locks(void)
10199 {
10200 }
10201
10202 static inline __attribute__((always_inline)) void __debug_show_held_locks(struct task_struct *task)
10203 {
10204 }
10205
10206 static inline __attribute__((always_inline)) void debug_show_held_locks(struct task_struct *task)
10207 {
10208 }
10209
10210 static inline __attribute__((always_inline)) void
10211 debug_check_no_locks_freed(const void *from, unsigned long len)
10212 {
10213 }
10214
10215 static inline __attribute__((always_inline)) void
10216 debug_check_no_locks_held(struct task_struct *task)
10217 {
10218 }
10219 # 15 "include/linux/mm.h" 2
10220 # 1 "include/linux/mm_types.h" 1
10221
10222
10223
10224 # 1 "include/linux/auxvec.h" 1
10225
10226
10227
10228 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/auxvec.h" 1
10229 # 5 "include/linux/auxvec.h" 2
10230 # 5 "include/linux/mm_types.h" 2
10231 # 15 "include/linux/mm_types.h"
10232 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/mmu.h" 1
10233
10234
10235
10236
10237
10238 struct sram_list_struct {
10239 struct sram_list_struct *next;
10240 void *addr;
10241 size_t length;
10242 };
10243
10244 typedef struct {
10245 struct vm_list_struct *vmlist;
10246 unsigned long end_brk;
10247 unsigned long stack_start;
10248
10249
10250
10251 void *l1_stack_save;
10252
10253 struct sram_list_struct *sram_list;
10254 # 30 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/mmu.h"
10255 } mm_context_t;
10256 # 16 "include/linux/mm_types.h" 2
10257
10258
10259
10260
10261
10262
10263 struct address_space;
10264
10265
10266
10267
10268
10269
10270 typedef unsigned long mm_counter_t;
10271 # 39 "include/linux/mm_types.h"
10272 struct page {
10273 unsigned long flags;
10274
10275 atomic_t _count;
10276 union {
10277 atomic_t _mapcount;
10278
10279
10280
10281 struct {
10282 u16 inuse;
10283 u16 objects;
10284 };
10285 };
10286 union {
10287 struct {
10288 unsigned long private;
10289
10290
10291
10292
10293
10294
10295 struct address_space *mapping;
10296
10297
10298
10299
10300
10301
10302 };
10303
10304
10305
10306 struct kmem_cache *slab;
10307 struct page *first_page;
10308 };
10309 union {
10310 unsigned long index;
10311 void *freelist;
10312 };
10313 struct list_head lru;
10314 # 97 "include/linux/mm_types.h"
10315 };
10316
10317
10318
10319
10320
10321
10322
10323 struct vm_area_struct {
10324 struct mm_struct * vm_mm;
10325 unsigned long vm_start;
10326 unsigned long vm_end;
10327
10328
10329
10330 struct vm_area_struct *vm_next;
10331
10332 pgprot_t vm_page_prot;
10333 unsigned long vm_flags;
10334
10335 struct rb_node vm_rb;
10336
10337
10338
10339
10340
10341
10342
10343 union {
10344 struct {
10345 struct list_head list;
10346 void *parent;
10347 struct vm_area_struct *head;
10348 } vm_set;
10349
10350 struct raw_prio_tree_node prio_tree_node;
10351 } shared;
10352
10353
10354
10355
10356
10357
10358
10359 struct list_head anon_vma_node;
10360 struct anon_vma *anon_vma;
10361
10362
10363 struct vm_operations_struct * vm_ops;
10364
10365
10366 unsigned long vm_pgoff;
10367
10368 struct file * vm_file;
10369 void * vm_private_data;
10370 unsigned long vm_truncate_count;
10371
10372
10373 atomic_t vm_usage;
10374
10375
10376
10377
10378 };
10379
10380 struct core_thread {
10381 struct task_struct *task;
10382 struct core_thread *next;
10383 };
10384
10385 struct core_state {
10386 atomic_t nr_threads;
10387 struct core_thread dumper;
10388 struct completion startup;
10389 };
10390
10391 struct mm_struct {
10392 struct vm_area_struct * mmap;
10393 struct rb_root mm_rb;
10394 struct vm_area_struct * mmap_cache;
10395 unsigned long (*get_unmapped_area) (struct file *filp,
10396 unsigned long addr, unsigned long len,
10397 unsigned long pgoff, unsigned long flags);
10398 void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
10399 unsigned long mmap_base;
10400 unsigned long task_size;
10401 unsigned long cached_hole_size;
10402 unsigned long free_area_cache;
10403 pgd_t * pgd;
10404 atomic_t mm_users;
10405 atomic_t mm_count;
10406 int map_count;
10407 struct rw_semaphore mmap_sem;
10408 spinlock_t page_table_lock;
10409
10410 struct list_head mmlist;
10411
10412
10413
10414
10415
10416
10417
10418 mm_counter_t _file_rss;
10419 mm_counter_t _anon_rss;
10420
10421 unsigned long hiwater_rss;
10422 unsigned long hiwater_vm;
10423
10424 unsigned long total_vm, locked_vm, shared_vm, exec_vm;
10425 unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
10426 unsigned long start_code, end_code, start_data, end_data;
10427 unsigned long start_brk, brk, start_stack;
10428 unsigned long arg_start, arg_end, env_start, env_end;
10429
10430 unsigned long saved_auxv[(2*(0 + 18 + 1))];
10431
10432 cpumask_t cpu_vm_mask;
10433
10434
10435 mm_context_t context;
10436 # 226 "include/linux/mm_types.h"
10437 unsigned int faultstamp;
10438 unsigned int token_priority;
10439 unsigned int last_interval;
10440
10441 unsigned long flags;
10442
10443 struct core_state *core_state;
10444
10445
10446 rwlock_t ioctx_list_lock;
10447 struct kioctx *ioctx_list;
10448 # 253 "include/linux/mm_types.h"
10449 struct file *exe_file;
10450 unsigned long num_exe_file_vmas;
10451
10452
10453
10454
10455 };
10456 # 16 "include/linux/mm.h" 2
10457
10458 struct mempolicy;
10459 struct anon_vma;
10460 struct file_ra_state;
10461 struct user_struct;
10462 struct writeback_control;
10463
10464
10465 extern unsigned long max_mapnr;
10466
10467
10468 extern unsigned long num_physpages;
10469 extern void * high_memory;
10470 extern int page_cluster;
10471
10472
10473 extern int sysctl_legacy_va_layout;
10474
10475
10476
10477
10478 extern unsigned long mmap_min_addr;
10479
10480
10481 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h" 1
10482
10483
10484
10485 # 1 "include/asm-generic/4level-fixup.h" 1
10486 # 5 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h" 2
10487
10488
10489
10490
10491 typedef pte_t *pte_addr_t;
10492 # 34 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h"
10493 extern void paging_init(void);
10494
10495
10496
10497
10498
10499
10500
10501 static inline __attribute__((always_inline)) int pte_file(pte_t pte)
10502 {
10503 return 0;
10504 }
10505 # 62 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h"
10506 static inline __attribute__((always_inline)) pte_t pte_rdprotect(pte_t _pte) { _pte.pte &= ~(0x00000004); return _pte; };
10507 static inline __attribute__((always_inline)) pte_t pte_mkread(pte_t _pte) { _pte.pte |= (0x00000004); return _pte; };
10508 static inline __attribute__((always_inline)) pte_t pte_wrprotect(pte_t _pte) { _pte.pte &= ~(0x00000008); return _pte; };
10509 static inline __attribute__((always_inline)) pte_t pte_mkwrite(pte_t _pte) { _pte.pte |= (0x00000008); return _pte; };
10510 static inline __attribute__((always_inline)) pte_t pte_exprotect(pte_t _pte) { _pte.pte &= ~(0x00000004 | 0x00000008); return _pte; };
10511 static inline __attribute__((always_inline)) pte_t pte_mkexec(pte_t _pte) { _pte.pte |= (0x00000004 | 0x00000008); return _pte; };
10512 static inline __attribute__((always_inline)) pte_t pte_mkclean(pte_t _pte) { _pte.pte &= ~(0x00000080); return _pte; };
10513 static inline __attribute__((always_inline)) pte_t pte_mkdirty(pte_t _pte) { _pte.pte |= (0x00000080); return _pte; };
10514 static inline __attribute__((always_inline)) pte_t pte_mkold(pte_t _pte) { _pte.pte &= ~0x00000010 | 0x00000004 | 0x00000008; return _pte; };
10515 static inline __attribute__((always_inline)) pte_t pte_mkyoung(pte_t _pte) { _pte.pte |= 0x00000010 | 0x00000004 | 0x00000008; return _pte; };
10516
10517
10518
10519
10520
10521
10522
10523 extern unsigned int kobjsize(const void *objp);
10524 # 95 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h"
10525 # 1 "include/asm-generic/pgtable.h" 1
10526 # 96 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h" 2
10527 # 41 "include/linux/mm.h" 2
10528 # 57 "include/linux/mm.h"
10529 extern struct kmem_cache *vm_area_cachep;
10530
10531
10532
10533
10534
10535
10536 struct vm_list_struct {
10537 struct vm_list_struct *next;
10538 struct vm_area_struct *vma;
10539 };
10540
10541
10542 extern struct rb_root nommu_vma_tree;
10543 extern struct rw_semaphore nommu_vma_sem;
10544
10545 extern unsigned int kobjsize(const void *objp);
10546 # 144 "include/linux/mm.h"
10547 extern pgprot_t protection_map[16];
10548 # 159 "include/linux/mm.h"
10549 struct vm_fault {
10550 unsigned int flags;
10551 unsigned long pgoff;
10552 void *virtual_address;
10553
10554 struct page *page;
10555
10556
10557
10558
10559 };
10560
10561
10562
10563
10564
10565
10566 struct vm_operations_struct {
10567 void (*open)(struct vm_area_struct * area);
10568 void (*close)(struct vm_area_struct * area);
10569 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
10570
10571
10572
10573 int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
10574
10575
10576
10577
10578 int (*access)(struct vm_area_struct *vma, unsigned long addr,
10579 void *buf, int len, int write);
10580 # 215 "include/linux/mm.h"
10581 };
10582
10583 struct mmu_gather;
10584 struct inode;
10585 # 227 "include/linux/mm.h"
10586 # 1 "include/linux/page-flags.h" 1
10587 # 72 "include/linux/page-flags.h"
10588 enum pageflags {
10589 PG_locked,
10590 PG_error,
10591 PG_referenced,
10592 PG_uptodate,
10593 PG_dirty,
10594 PG_lru,
10595 PG_active,
10596 PG_slab,
10597 PG_owner_priv_1,
10598 PG_arch_1,
10599 PG_reserved,
10600 PG_private,
10601 PG_writeback,
10602
10603 PG_head,
10604 PG_tail,
10605
10606
10607
10608 PG_swapcache,
10609 PG_mappedtodisk,
10610 PG_reclaim,
10611 PG_buddy,
10612 PG_swapbacked,
10613
10614
10615
10616
10617
10618
10619
10620 __NR_PAGEFLAGS,
10621
10622
10623 PG_checked = PG_owner_priv_1,
10624
10625
10626 PG_pinned = PG_owner_priv_1,
10627 PG_savepinned = PG_dirty,
10628
10629
10630 PG_slob_page = PG_active,
10631 PG_slob_free = PG_private,
10632
10633
10634 PG_slub_frozen = PG_active,
10635 PG_slub_debug = PG_error,
10636 };
10637 # 181 "include/linux/page-flags.h"
10638 struct page;
10639
10640 static inline __attribute__((always_inline)) int PageLocked(struct page *page) { return test_bit(PG_locked, &page->flags); }
10641 static inline __attribute__((always_inline)) int PageError(struct page *page) { return test_bit(PG_error, &page->flags); } static inline __attribute__((always_inline)) void SetPageError(struct page *page) { set_bit(PG_error, &page->flags); } static inline __attribute__((always_inline)) void ClearPageError(struct page *page) { clear_bit(PG_error, &page->flags); }
10642 static inline __attribute__((always_inline)) int PageReferenced(struct page *page) { return test_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) void SetPageReferenced(struct page *page) { set_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReferenced(struct page *page) { clear_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageReferenced(struct page *page) { return test_and_clear_bit(PG_referenced, &page->flags); }
10643 static inline __attribute__((always_inline)) int PageDirty(struct page *page) { return test_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void SetPageDirty(struct page *page) { set_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void ClearPageDirty(struct page *page) { clear_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) int TestSetPageDirty(struct page *page) { return test_and_set_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageDirty(struct page *page) { return test_and_clear_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageDirty(struct page *page) { __clear_bit(PG_dirty, &page->flags); }
10644 static inline __attribute__((always_inline)) int PageLRU(struct page *page) { return test_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void SetPageLRU(struct page *page) { set_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void ClearPageLRU(struct page *page) { clear_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageLRU(struct page *page) { __clear_bit(PG_lru, &page->flags); }
10645 static inline __attribute__((always_inline)) int PageActive(struct page *page) { return test_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void SetPageActive(struct page *page) { set_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void ClearPageActive(struct page *page) { clear_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageActive(struct page *page) { __clear_bit(PG_active, &page->flags); }
10646 static inline __attribute__((always_inline)) int TestClearPageActive(struct page *page) { return test_and_clear_bit(PG_active, &page->flags); }
10647 static inline __attribute__((always_inline)) int PageSlab(struct page *page) { return test_bit(PG_slab, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlab(struct page *page) { __set_bit(PG_slab, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlab(struct page *page) { __clear_bit(PG_slab, &page->flags); }
10648 static inline __attribute__((always_inline)) int PageChecked(struct page *page) { return test_bit(PG_checked, &page->flags); } static inline __attribute__((always_inline)) void SetPageChecked(struct page *page) { set_bit(PG_checked, &page->flags); } static inline __attribute__((always_inline)) void ClearPageChecked(struct page *page) { clear_bit(PG_checked, &page->flags); }
10649 static inline __attribute__((always_inline)) int PagePinned(struct page *page) { return test_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) void SetPagePinned(struct page *page) { set_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePinned(struct page *page) { clear_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) int TestSetPagePinned(struct page *page) { return test_and_set_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) int TestClearPagePinned(struct page *page) { return test_and_clear_bit(PG_pinned, &page->flags); }
10650 static inline __attribute__((always_inline)) int PageSavePinned(struct page *page) { return test_bit(PG_savepinned, &page->flags); } static inline __attribute__((always_inline)) void SetPageSavePinned(struct page *page) { set_bit(PG_savepinned, &page->flags); } static inline __attribute__((always_inline)) void ClearPageSavePinned(struct page *page) { clear_bit(PG_savepinned, &page->flags); };
10651 static inline __attribute__((always_inline)) int PageReserved(struct page *page) { return test_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void SetPageReserved(struct page *page) { set_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReserved(struct page *page) { clear_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageReserved(struct page *page) { __clear_bit(PG_reserved, &page->flags); }
10652 static inline __attribute__((always_inline)) int PagePrivate(struct page *page) { return test_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void SetPagePrivate(struct page *page) { set_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePrivate(struct page *page) { clear_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void __ClearPagePrivate(struct page *page) { __clear_bit(PG_private, &page->flags); }
10653 static inline __attribute__((always_inline)) void __SetPagePrivate(struct page *page) { __set_bit(PG_private, &page->flags); }
10654 static inline __attribute__((always_inline)) int PageSwapBacked(struct page *page) { return test_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void SetPageSwapBacked(struct page *page) { set_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void ClearPageSwapBacked(struct page *page) { clear_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSwapBacked(struct page *page) { __clear_bit(PG_swapbacked, &page->flags); }
10655
10656 static inline __attribute__((always_inline)) int PageSlobPage(struct page *page) { return test_bit(PG_slob_page, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlobPage(struct page *page) { __set_bit(PG_slob_page, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlobPage(struct page *page) { __clear_bit(PG_slob_page, &page->flags); }
10657 static inline __attribute__((always_inline)) int PageSlobFree(struct page *page) { return test_bit(PG_slob_free, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlobFree(struct page *page) { __set_bit(PG_slob_free, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlobFree(struct page *page) { __clear_bit(PG_slob_free, &page->flags); }
10658
10659 static inline __attribute__((always_inline)) int PageSlubFrozen(struct page *page) { return test_bit(PG_slub_frozen, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlubFrozen(struct page *page) { __set_bit(PG_slub_frozen, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlubFrozen(struct page *page) { __clear_bit(PG_slub_frozen, &page->flags); }
10660 static inline __attribute__((always_inline)) int PageSlubDebug(struct page *page) { return test_bit(PG_slub_debug, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlubDebug(struct page *page) { __set_bit(PG_slub_debug, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlubDebug(struct page *page) { __clear_bit(PG_slub_debug, &page->flags); }
10661
10662
10663
10664
10665
10666 static inline __attribute__((always_inline)) int PageWriteback(struct page *page) { return test_bit(PG_writeback, &page->flags); } static inline __attribute__((always_inline)) int TestSetPageWriteback(struct page *page) { return test_and_set_bit(PG_writeback, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageWriteback(struct page *page) { return test_and_clear_bit(PG_writeback, &page->flags); }
10667 static inline __attribute__((always_inline)) int PageBuddy(struct page *page) { return test_bit(PG_buddy, &page->flags); } static inline __attribute__((always_inline)) void __SetPageBuddy(struct page *page) { __set_bit(PG_buddy, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageBuddy(struct page *page) { __clear_bit(PG_buddy, &page->flags); }
10668 static inline __attribute__((always_inline)) int PageMappedToDisk(struct page *page) { return test_bit(PG_mappedtodisk, &page->flags); } static inline __attribute__((always_inline)) void SetPageMappedToDisk(struct page *page) { set_bit(PG_mappedtodisk, &page->flags); } static inline __attribute__((always_inline)) void ClearPageMappedToDisk(struct page *page) { clear_bit(PG_mappedtodisk, &page->flags); }
10669
10670
10671 static inline __attribute__((always_inline)) int PageReclaim(struct page *page) { return test_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void SetPageReclaim(struct page *page) { set_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReclaim(struct page *page) { clear_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageReclaim(struct page *page) { return test_and_clear_bit(PG_reclaim, &page->flags); }
10672 static inline __attribute__((always_inline)) int PageReadahead(struct page *page) { return test_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void SetPageReadahead(struct page *page) { set_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReadahead(struct page *page) { clear_bit(PG_reclaim, &page->flags); }
10673 # 224 "include/linux/page-flags.h"
10674 static inline __attribute__((always_inline)) int PageHighMem(struct page *page) { return 0; }
10675
10676
10677
10678
10679
10680 static inline __attribute__((always_inline)) int PageSwapCache(struct page *page) { return 0; }
10681 # 244 "include/linux/page-flags.h"
10682 static inline __attribute__((always_inline)) int PageMlocked(struct page *page) { return 0; }
10683 static inline __attribute__((always_inline)) void SetPageMlocked(struct page *page) { } static inline __attribute__((always_inline)) int TestClearPageMlocked(struct page *page) { return 0; }
10684
10685 static inline __attribute__((always_inline)) int PageUnevictable(struct page *page) { return 0; } static inline __attribute__((always_inline)) int TestClearPageUnevictable(struct page *page) { return 0; }
10686 static inline __attribute__((always_inline)) void SetPageUnevictable(struct page *page) { } static inline __attribute__((always_inline)) void ClearPageUnevictable(struct page *page) { }
10687 static inline __attribute__((always_inline)) void __ClearPageUnevictable(struct page *page) { }
10688
10689
10690
10691
10692
10693 static inline __attribute__((always_inline)) int PageUncached(struct page *page) { return 0; }
10694
10695
10696 static inline __attribute__((always_inline)) int PageUptodate(struct page *page)
10697 {
10698 int ret = test_bit(PG_uptodate, &(page)->flags);
10699 # 270 "include/linux/page-flags.h"
10700 if (ret)
10701 __asm__ __volatile__("": : :"memory");
10702
10703 return ret;
10704 }
10705
10706 static inline __attribute__((always_inline)) void __SetPageUptodate(struct page *page)
10707 {
10708 __asm__ __volatile__("": : :"memory");
10709 __set_bit(PG_uptodate, &(page)->flags);
10710 }
10711
10712 static inline __attribute__((always_inline)) void SetPageUptodate(struct page *page)
10713 {
10714 # 296 "include/linux/page-flags.h"
10715 __asm__ __volatile__("": : :"memory");
10716 set_bit(PG_uptodate, &(page)->flags);
10717
10718 }
10719
10720 static inline __attribute__((always_inline)) void ClearPageUptodate(struct page *page) { clear_bit(PG_uptodate, &page->flags); }
10721
10722 extern void cancel_dirty_page(struct page *page, unsigned int account_size);
10723
10724 int test_clear_page_writeback(struct page *page);
10725 int test_set_page_writeback(struct page *page);
10726
10727 static inline __attribute__((always_inline)) void set_page_writeback(struct page *page)
10728 {
10729 test_set_page_writeback(page);
10730 }
10731 # 320 "include/linux/page-flags.h"
10732 static inline __attribute__((always_inline)) int PageHead(struct page *page) { return test_bit(PG_head, &page->flags); } static inline __attribute__((always_inline)) void __SetPageHead(struct page *page) { __set_bit(PG_head, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageHead(struct page *page) { __clear_bit(PG_head, &page->flags); }
10733 static inline __attribute__((always_inline)) int PageTail(struct page *page) { return test_bit(PG_tail, &page->flags); } static inline __attribute__((always_inline)) void __SetPageTail(struct page *page) { __set_bit(PG_tail, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageTail(struct page *page) { __clear_bit(PG_tail, &page->flags); }
10734
10735 static inline __attribute__((always_inline)) int PageCompound(struct page *page)
10736 {
10737 return page->flags & ((1L << PG_head) | (1L << PG_tail));
10738
10739 }
10740 # 228 "include/linux/mm.h" 2
10741 # 245 "include/linux/mm.h"
10742 static inline __attribute__((always_inline)) int put_page_testzero(struct page *page)
10743 {
10744 do { } while (0);
10745 return (atomic_sub_return(1, (&page->_count)) == 0);
10746 }
10747
10748
10749
10750
10751
10752 static inline __attribute__((always_inline)) int get_page_unless_zero(struct page *page)
10753 {
10754 do { } while (0);
10755 return ({ int c, old; c = (((&page->_count))->counter); while (c != (0) && (old = ((int)((__typeof__(*((&((((&page->_count)))->counter)))))__cmpxchg_local_generic(((&((((&page->_count)))->counter))), (unsigned long)(((c))), (unsigned long)(((c + (1)))), sizeof(*((&((((&page->_count)))->counter)))))))) != c) c = old; c != (0); });
10756 }
10757
10758
10759 struct page *vmalloc_to_page(const void *addr);
10760 unsigned long vmalloc_to_pfn(const void *addr);
10761
10762
10763
10764
10765
10766
10767
10768 static inline __attribute__((always_inline)) int is_vmalloc_addr(const void *x)
10769 {
10770
10771
10772
10773
10774
10775 return 0;
10776
10777 }
10778
10779 static inline __attribute__((always_inline)) struct page *compound_head(struct page *page)
10780 {
10781 if (__builtin_expect(!!(PageTail(page)), 0))
10782 return page->first_page;
10783 return page;
10784 }
10785
10786 static inline __attribute__((always_inline)) int page_count(struct page *page)
10787 {
10788 return ((&compound_head(page)->_count)->counter);
10789 }
10790
10791 static inline __attribute__((always_inline)) void get_page(struct page *page)
10792 {
10793 page = compound_head(page);
10794 do { } while (0);
10795 atomic_inc(&page->_count);
10796 }
10797
10798 static inline __attribute__((always_inline)) struct page *virt_to_head_page(const void *x)
10799 {
10800 struct page *page = (mem_map + (((unsigned long)(x)-(0x00000000)) >> 12));
10801 return compound_head(page);
10802 }
10803
10804
10805
10806
10807
10808 static inline __attribute__((always_inline)) void init_page_count(struct page *page)
10809 {
10810 (((&page->_count)->counter) = 1);
10811 }
10812
10813 void put_page(struct page *page);
10814 void put_pages_list(struct list_head *pages);
10815
10816 void split_page(struct page *page, unsigned int order);
10817 void split_compound_page(struct page *page, unsigned int order);
10818
10819
10820
10821
10822
10823
10824 typedef void compound_page_dtor(struct page *);
10825
10826 static inline __attribute__((always_inline)) void set_compound_page_dtor(struct page *page,
10827 compound_page_dtor *dtor)
10828 {
10829 page[1].lru.next = (void *)dtor;
10830 }
10831
10832 static inline __attribute__((always_inline)) compound_page_dtor *get_compound_page_dtor(struct page *page)
10833 {
10834 return (compound_page_dtor *)page[1].lru.next;
10835 }
10836
10837 static inline __attribute__((always_inline)) int compound_order(struct page *page)
10838 {
10839 if (!PageHead(page))
10840 return 0;
10841 return (unsigned long)page[1].lru.prev;
10842 }
10843
10844 static inline __attribute__((always_inline)) void set_compound_order(struct page *page, unsigned long order)
10845 {
10846 page[1].lru.prev = (void *)order;
10847 }
10848 # 497 "include/linux/mm.h"
10849 static inline __attribute__((always_inline)) enum zone_type page_zonenum(struct page *page)
10850 {
10851 return (page->flags >> (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0))) & ((1UL << 2) - 1);
10852 }
10853 # 510 "include/linux/mm.h"
10854 static inline __attribute__((always_inline)) int page_zone_id(struct page *page)
10855 {
10856 return (page->flags >> ((((((sizeof(unsigned long)*8) - 0) - 0) < ((((sizeof(unsigned long)*8) - 0) - 0) - 2))? (((sizeof(unsigned long)*8) - 0) - 0) : ((((sizeof(unsigned long)*8) - 0) - 0) - 2)) * ((0 + 2) != 0))) & ((1UL << (0 + 2)) - 1);
10857 }
10858
10859 static inline __attribute__((always_inline)) int zone_to_nid(struct zone *zone)
10860 {
10861
10862
10863
10864 return 0;
10865
10866 }
10867
10868
10869
10870
10871 static inline __attribute__((always_inline)) int page_to_nid(struct page *page)
10872 {
10873 return (page->flags >> ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0))) & ((1UL << 0) - 1);
10874 }
10875
10876
10877 static inline __attribute__((always_inline)) struct zone *page_zone(struct page *page)
10878 {
10879 return &(&contig_page_data)->node_zones[page_zonenum(page)];
10880 }
10881 # 545 "include/linux/mm.h"
10882 static inline __attribute__((always_inline)) void set_page_zone(struct page *page, enum zone_type zone)
10883 {
10884 page->flags &= ~(((1UL << 2) - 1) << (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0)));
10885 page->flags |= (zone & ((1UL << 2) - 1)) << (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0));
10886 }
10887
10888 static inline __attribute__((always_inline)) void set_page_node(struct page *page, unsigned long node)
10889 {
10890 page->flags &= ~(((1UL << 0) - 1) << ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0)));
10891 page->flags |= (node & ((1UL << 0) - 1)) << ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0));
10892 }
10893
10894 static inline __attribute__((always_inline)) void set_page_section(struct page *page, unsigned long section)
10895 {
10896 page->flags &= ~(((1UL << 0) - 1) << (((sizeof(unsigned long)*8) - 0) * (0 != 0)));
10897 page->flags |= (section & ((1UL << 0) - 1)) << (((sizeof(unsigned long)*8) - 0) * (0 != 0));
10898 }
10899
10900 static inline __attribute__((always_inline)) void set_page_links(struct page *page, enum zone_type zone,
10901 unsigned long node, unsigned long pfn)
10902 {
10903 set_page_zone(page, zone);
10904 set_page_node(page, node);
10905 set_page_section(page, ((pfn) >> 0));
10906 }
10907
10908
10909
10910
10911
10912 static inline __attribute__((always_inline)) unsigned long round_hint_to_min(unsigned long hint)
10913 {
10914
10915
10916
10917
10918
10919
10920 return hint;
10921 }
10922
10923
10924
10925
10926 # 1 "include/linux/vmstat.h" 1
10927
10928
10929
10930
10931
10932 # 1 "include/linux/mm.h" 1
10933 # 7 "include/linux/vmstat.h" 2
10934 # 31 "include/linux/vmstat.h"
10935 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
10936 PGALLOC_DMA, PGALLOC_NORMAL , PGALLOC_MOVABLE,
10937 PGFREE, PGACTIVATE, PGDEACTIVATE,
10938 PGFAULT, PGMAJFAULT,
10939 PGREFILL_DMA, PGREFILL_NORMAL , PGREFILL_MOVABLE,
10940 PGSTEAL_DMA, PGSTEAL_NORMAL , PGSTEAL_MOVABLE,
10941 PGSCAN_KSWAPD_DMA, PGSCAN_KSWAPD_NORMAL , PGSCAN_KSWAPD_MOVABLE,
10942 PGSCAN_DIRECT_DMA, PGSCAN_DIRECT_NORMAL , PGSCAN_DIRECT_MOVABLE,
10943 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
10944 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
10945 # 54 "include/linux/vmstat.h"
10946 NR_VM_EVENT_ITEMS
10947 };
10948
10949 extern int sysctl_stat_interval;
10950 # 70 "include/linux/vmstat.h"
10951 struct vm_event_state {
10952 unsigned long event[NR_VM_EVENT_ITEMS];
10953 };
10954
10955 extern __typeof__(struct vm_event_state) per_cpu__vm_event_states;
10956
10957 static inline __attribute__((always_inline)) void __count_vm_event(enum vm_event_item item)
10958 {
10959 per_cpu__vm_event_states.event[item]++;
10960 }
10961
10962 static inline __attribute__((always_inline)) void count_vm_event(enum vm_event_item item)
10963 {
10964 (*({ extern int simple_identifier_vm_event_states(void); do { } while (0); &per_cpu__vm_event_states; })).event[item]++;
10965 do { } while (0);
10966 }
10967
10968 static inline __attribute__((always_inline)) void __count_vm_events(enum vm_event_item item, long delta)
10969 {
10970 per_cpu__vm_event_states.event[item] += delta;
10971 }
10972
10973 static inline __attribute__((always_inline)) void count_vm_events(enum vm_event_item item, long delta)
10974 {
10975 (*({ extern int simple_identifier_vm_event_states(void); do { } while (0); &per_cpu__vm_event_states; })).event[item] += delta;
10976 do { } while (0);
10977 }
10978
10979 extern void all_vm_events(unsigned long *);
10980
10981
10982
10983 static inline __attribute__((always_inline)) void vm_events_fold_cpu(int cpu)
10984 {
10985 }
10986 # 138 "include/linux/vmstat.h"
10987 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
10988
10989 static inline __attribute__((always_inline)) void zone_page_state_add(long x, struct zone *zone,
10990 enum zone_stat_item item)
10991 {
10992 atomic_long_add(x, &zone->vm_stat[item]);
10993 atomic_long_add(x, &vm_stat[item]);
10994 }
10995
10996 static inline __attribute__((always_inline)) unsigned long global_page_state(enum zone_stat_item item)
10997 {
10998 long x = atomic_long_read(&vm_stat[item]);
10999
11000
11001
11002
11003 return x;
11004 }
11005
11006 static inline __attribute__((always_inline)) unsigned long zone_page_state(struct zone *zone,
11007 enum zone_stat_item item)
11008 {
11009 long x = atomic_long_read(&zone->vm_stat[item]);
11010
11011
11012
11013
11014 return x;
11015 }
11016
11017 extern unsigned long global_lru_pages(void);
11018
11019 static inline __attribute__((always_inline)) unsigned long zone_lru_pages(struct zone *zone)
11020 {
11021 return (zone_page_state(zone, NR_ACTIVE_ANON)
11022 + zone_page_state(zone, NR_ACTIVE_FILE)
11023 + zone_page_state(zone, NR_INACTIVE_ANON)
11024 + zone_page_state(zone, NR_INACTIVE_FILE));
11025 }
11026 # 220 "include/linux/vmstat.h"
11027 static inline __attribute__((always_inline)) void zap_zone_vm_stats(struct zone *zone)
11028 {
11029 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
11030 }
11031
11032 extern void inc_zone_state(struct zone *, enum zone_stat_item);
11033 # 248 "include/linux/vmstat.h"
11034 static inline __attribute__((always_inline)) void __mod_zone_page_state(struct zone *zone,
11035 enum zone_stat_item item, int delta)
11036 {
11037 zone_page_state_add(delta, zone, item);
11038 }
11039
11040 static inline __attribute__((always_inline)) void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
11041 {
11042 atomic_long_inc(&zone->vm_stat[item]);
11043 atomic_long_inc(&vm_stat[item]);
11044 }
11045
11046 static inline __attribute__((always_inline)) void __inc_zone_page_state(struct page *page,
11047 enum zone_stat_item item)
11048 {
11049 __inc_zone_state(page_zone(page), item);
11050 }
11051
11052 static inline __attribute__((always_inline)) void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
11053 {
11054 atomic_long_dec(&zone->vm_stat[item]);
11055 atomic_long_dec(&vm_stat[item]);
11056 }
11057
11058 static inline __attribute__((always_inline)) void __dec_zone_page_state(struct page *page,
11059 enum zone_stat_item item)
11060 {
11061 __dec_zone_state(page_zone(page), item);
11062 }
11063 # 286 "include/linux/vmstat.h"
11064 static inline __attribute__((always_inline)) void refresh_cpu_vm_stats(int cpu) { }
11065 # 590 "include/linux/mm.h" 2
11066
11067 static inline __attribute__((always_inline)) __attribute__((always_inline)) void *lowmem_page_address(struct page *page)
11068 {
11069 return ((void *) ((unsigned long)((((unsigned long) ((void *)(((((page) - mem_map) << 12) + (0x00000000))))) >> 12) << 12)));
11070 }
11071 # 632 "include/linux/mm.h"
11072 extern struct address_space swapper_space;
11073 static inline __attribute__((always_inline)) struct address_space *page_mapping(struct page *page)
11074 {
11075 struct address_space *mapping = page->mapping;
11076
11077 do { } while (0);
11078
11079
11080
11081
11082
11083 if (__builtin_expect(!!((unsigned long)mapping & 1), 0))
11084 mapping = ((void *)0);
11085 return mapping;
11086 }
11087
11088 static inline __attribute__((always_inline)) int PageAnon(struct page *page)
11089 {
11090 return ((unsigned long)page->mapping & 1) != 0;
11091 }
11092
11093
11094
11095
11096
11097 static inline __attribute__((always_inline)) unsigned long page_index(struct page *page)
11098 {
11099 if (__builtin_expect(!!(PageSwapCache(page)), 0))
11100 return ((page)->private);
11101 return page->index;
11102 }
11103
11104
11105
11106
11107
11108
11109 static inline __attribute__((always_inline)) void reset_page_mapcount(struct page *page)
11110 {
11111 (((&(page)->_mapcount)->counter) = -1);
11112 }
11113
11114 static inline __attribute__((always_inline)) int page_mapcount(struct page *page)
11115 {
11116 return ((&(page)->_mapcount)->counter) + 1;
11117 }
11118
11119
11120
11121
11122 static inline __attribute__((always_inline)) int page_mapped(struct page *page)
11123 {
11124 return ((&(page)->_mapcount)->counter) >= 0;
11125 }
11126 # 707 "include/linux/mm.h"
11127 extern void show_free_areas(void);
11128
11129
11130
11131
11132 static inline __attribute__((always_inline)) int shmem_lock(struct file *file, int lock,
11133 struct user_struct *user)
11134 {
11135 return 0;
11136 }
11137
11138 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
11139
11140 int shmem_zero_setup(struct vm_area_struct *);
11141
11142
11143 extern unsigned long shmem_get_unmapped_area(struct file *file,
11144 unsigned long addr,
11145 unsigned long len,
11146 unsigned long pgoff,
11147 unsigned long flags);
11148
11149
11150 extern int can_do_mlock(void);
11151 extern int user_shm_lock(size_t, struct user_struct *);
11152 extern void user_shm_unlock(size_t, struct user_struct *);
11153
11154
11155
11156
11157 struct zap_details {
11158 struct vm_area_struct *nonlinear_vma;
11159 struct address_space *check_mapping;
11160 unsigned long first_index;
11161 unsigned long last_index;
11162 spinlock_t *i_mmap_lock;
11163 unsigned long truncate_count;
11164 };
11165
11166 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
11167 pte_t pte);
11168
11169 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
11170 unsigned long size);
11171 unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
11172 unsigned long size, struct zap_details *);
11173 unsigned long unmap_vmas(struct mmu_gather **tlb,
11174 struct vm_area_struct *start_vma, unsigned long start_addr,
11175 unsigned long end_addr, unsigned long *nr_accounted,
11176 struct zap_details *);
11177 # 768 "include/linux/mm.h"
11178 struct mm_walk {
11179 int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
11180 int (*pud_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
11181 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
11182 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
11183 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
11184 struct mm_struct *mm;
11185 void *private;
11186 };
11187
11188 int walk_page_range(unsigned long addr, unsigned long end,
11189 struct mm_walk *walk);
11190 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
11191 unsigned long end, unsigned long floor, unsigned long ceiling);
11192 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
11193 struct vm_area_struct *vma);
11194 void unmap_mapping_range(struct address_space *mapping,
11195 loff_t const holebegin, loff_t const holelen, int even_cows);
11196 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
11197 void *buf, int len, int write);
11198
11199 static inline __attribute__((always_inline)) void unmap_shared_mapping_range(struct address_space *mapping,
11200 loff_t const holebegin, loff_t const holelen)
11201 {
11202 unmap_mapping_range(mapping, holebegin, holelen, 0);
11203 }
11204
11205 extern int vmtruncate(struct inode * inode, loff_t offset);
11206 extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
11207
11208
11209
11210
11211
11212 static inline __attribute__((always_inline)) int handle_mm_fault(struct mm_struct *mm,
11213 struct vm_area_struct *vma, unsigned long address,
11214 int write_access)
11215 {
11216
11217 do { dump_bfin_trace_buffer(); printk("<0>" "BUG: failure at %s:%d/%s()!\n", "include/linux/mm.h", 807, __func__); panic("BUG!"); } while (0);
11218 return 0x0002;
11219 }
11220
11221
11222 extern int make_pages_present(unsigned long addr, unsigned long end);
11223 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
11224
11225 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
11226 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
11227
11228 extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
11229 extern void do_invalidatepage(struct page *page, unsigned long offset);
11230
11231 int __set_page_dirty_nobuffers(struct page *page);
11232 int __set_page_dirty_no_writeback(struct page *page);
11233 int redirty_page_for_writepage(struct writeback_control *wbc,
11234 struct page *page);
11235 int set_page_dirty(struct page *page);
11236 int set_page_dirty_lock(struct page *page);
11237 int clear_page_dirty_for_io(struct page *page);
11238
11239 extern unsigned long move_page_tables(struct vm_area_struct *vma,
11240 unsigned long old_addr, struct vm_area_struct *new_vma,
11241 unsigned long new_addr, unsigned long len);
11242 extern unsigned long do_mremap(unsigned long addr,
11243 unsigned long old_len, unsigned long new_len,
11244 unsigned long flags, unsigned long new_addr);
11245 extern int mprotect_fixup(struct vm_area_struct *vma,
11246 struct vm_area_struct **pprev, unsigned long start,
11247 unsigned long end, unsigned long newflags);
11248 # 849 "include/linux/mm.h"
11249 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
11250 struct page **pages);
11251 # 867 "include/linux/mm.h"
11252 struct shrinker {
11253 int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
11254 int seeks;
11255
11256
11257 struct list_head list;
11258 long nr;
11259 };
11260
11261 extern void register_shrinker(struct shrinker *);
11262 extern void unregister_shrinker(struct shrinker *);
11263
11264 int vma_wants_writenotify(struct vm_area_struct *vma);
11265
11266 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
11267
11268
11269 static inline __attribute__((always_inline)) int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
11270 unsigned long address)
11271 {
11272 return 0;
11273 }
11274 # 900 "include/linux/mm.h"
11275 int __pmd_alloc(struct mm_struct *mm, pgd_t *pud, unsigned long address);
11276
11277
11278 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
11279 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
11280 # 946 "include/linux/mm.h"
11281 static inline __attribute__((always_inline)) void pgtable_page_ctor(struct page *page)
11282 {
11283 do {} while (0);
11284 __inc_zone_page_state(page, NR_PAGETABLE);
11285 }
11286
11287 static inline __attribute__((always_inline)) void pgtable_page_dtor(struct page *page)
11288 {
11289 do {} while (0);
11290 __dec_zone_page_state(page, NR_PAGETABLE);
11291 }
11292 # 984 "include/linux/mm.h"
11293 extern void free_area_init(unsigned long * zones_size);
11294 extern void free_area_init_node(int nid, unsigned long * zones_size,
11295 unsigned long zone_start_pfn, unsigned long *zholes_size);
11296 # 1037 "include/linux/mm.h"
11297 extern void set_dma_reserve(unsigned long new_dma_reserve);
11298 extern void memmap_init_zone(unsigned long, int, unsigned long,
11299 unsigned long, enum memmap_context);
11300 extern void setup_per_zone_pages_min(void);
11301 extern void mem_init(void);
11302 extern void show_mem(void);
11303 extern void si_meminfo(struct sysinfo * val);
11304 extern void si_meminfo_node(struct sysinfo *val, int nid);
11305 extern int after_bootmem;
11306
11307
11308
11309
11310 static inline __attribute__((always_inline)) void setup_per_cpu_pageset(void) {}
11311
11312
11313
11314 void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
11315 void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
11316 void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *);
11317 struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
11318 struct prio_tree_iter *iter);
11319
11320
11321
11322
11323
11324 static inline __attribute__((always_inline)) void vma_nonlinear_insert(struct vm_area_struct *vma,
11325 struct list_head *list)
11326 {
11327 vma->shared.vm_set.parent = ((void *)0);
11328 list_add_tail(&vma->shared.vm_set.list, list);
11329 }
11330
11331
11332 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
11333 extern void vma_adjust(struct vm_area_struct *vma, unsigned long start,
11334 unsigned long end, unsigned long pgoff, struct vm_area_struct *insert);
11335 extern struct vm_area_struct *vma_merge(struct mm_struct *,
11336 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
11337 unsigned long vm_flags, struct anon_vma *, struct file *, unsigned long,
11338 struct mempolicy *);
11339 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
11340 extern int split_vma(struct mm_struct *,
11341 struct vm_area_struct *, unsigned long addr, int new_below);
11342 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
11343 extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
11344 struct rb_node **, struct rb_node *);
11345 extern void unlink_file_vma(struct vm_area_struct *);
11346 extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
11347 unsigned long addr, unsigned long len, unsigned long pgoff);
11348 extern void exit_mmap(struct mm_struct *);
11349
11350 extern int mm_take_all_locks(struct mm_struct *mm);
11351 extern void mm_drop_all_locks(struct mm_struct *mm);
11352
11353
11354
11355 extern void added_exe_file_vma(struct mm_struct *mm);
11356 extern void removed_exe_file_vma(struct mm_struct *mm);
11357 # 1105 "include/linux/mm.h"
11358 extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
11359 extern int install_special_mapping(struct mm_struct *mm,
11360 unsigned long addr, unsigned long len,
11361 unsigned long flags, struct page **pages);
11362
11363 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
11364
11365 extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
11366 unsigned long len, unsigned long prot,
11367 unsigned long flag, unsigned long pgoff);
11368 extern unsigned long mmap_region(struct file *file, unsigned long addr,
11369 unsigned long len, unsigned long flags,
11370 unsigned int vm_flags, unsigned long pgoff,
11371 int accountable);
11372
11373 static inline __attribute__((always_inline)) unsigned long do_mmap(struct file *file, unsigned long addr,
11374 unsigned long len, unsigned long prot,
11375 unsigned long flag, unsigned long offset)
11376 {
11377 unsigned long ret = -22;
11378 if ((offset + (((len)+((typeof(len))((1UL << 12))-1))&~((typeof(len))((1UL << 12))-1))) < offset)
11379 goto out;
11380 if (!(offset & ~(~((1UL << 12)-1))))
11381 ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> 12);
11382 out:
11383 return ret;
11384 }
11385
11386 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
11387
11388 extern unsigned long do_brk(unsigned long, unsigned long);
11389
11390
11391 extern unsigned long page_unuse(struct page *);
11392 extern void truncate_inode_pages(struct address_space *, loff_t);
11393 extern void truncate_inode_pages_range(struct address_space *,
11394 loff_t lstart, loff_t lend);
11395
11396
11397 extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
11398
11399
11400 int write_one_page(struct page *page, int wait);
11401
11402
11403
11404
11405
11406 int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
11407 unsigned long offset, unsigned long nr_to_read);
11408 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
11409 unsigned long offset, unsigned long nr_to_read);
11410
11411 void page_cache_sync_readahead(struct address_space *mapping,
11412 struct file_ra_state *ra,
11413 struct file *filp,
11414 unsigned long offset,
11415 unsigned long size);
11416
11417 void page_cache_async_readahead(struct address_space *mapping,
11418 struct file_ra_state *ra,
11419 struct file *filp,
11420 struct page *pg,
11421 unsigned long offset,
11422 unsigned long size);
11423
11424 unsigned long max_sane_readahead(unsigned long nr);
11425
11426
11427 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
11428
11429
11430
11431 extern int expand_stack_downwards(struct vm_area_struct *vma,
11432 unsigned long address);
11433
11434
11435 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
11436 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
11437 struct vm_area_struct **pprev);
11438
11439
11440
11441 static inline __attribute__((always_inline)) struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
11442 {
11443 struct vm_area_struct * vma = find_vma(mm,start_addr);
11444
11445 if (vma && end_addr <= vma->vm_start)
11446 vma = ((void *)0);
11447 return vma;
11448 }
11449
11450 static inline __attribute__((always_inline)) unsigned long vma_pages(struct vm_area_struct *vma)
11451 {
11452 return (vma->vm_end - vma->vm_start) >> 12;
11453 }
11454
11455 pgprot_t vm_get_page_prot(unsigned long vm_flags);
11456 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
11457 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
11458 unsigned long pfn, unsigned long size, pgprot_t);
11459 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
11460 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
11461 unsigned long pfn);
11462 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
11463 unsigned long pfn);
11464
11465 struct page *follow_page(struct vm_area_struct *, unsigned long address,
11466 unsigned int foll_flags);
11467
11468
11469
11470
11471
11472 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
11473 void *data);
11474 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
11475 unsigned long size, pte_fn_t fn, void *data);
11476
11477
11478 void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
11479 # 1246 "include/linux/mm.h"
11480 static inline __attribute__((always_inline)) void
11481 kernel_map_pages(struct page *page, int numpages, int enable) {}
11482 static inline __attribute__((always_inline)) void enable_debug_pagealloc(void)
11483 {
11484 }
11485
11486
11487
11488
11489
11490 extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk);
11491
11492
11493
11494
11495 int in_gate_area_no_task(unsigned long addr);
11496
11497
11498
11499 int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
11500 void *, size_t *, loff_t *);
11501 unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
11502 unsigned long lru_pages);
11503 void drop_pagecache(void);
11504
11505
11506
11507
11508
11509
11510
11511 const char * arch_vma_name(struct vm_area_struct *vma);
11512 void print_vma_addr(char *prefix, unsigned long rip);
11513
11514 struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
11515 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
11516 pgd_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
11517 pmd_t *vmemmap_pmd_populate(pgd_t *pud, unsigned long addr, int node);
11518 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
11519 void *vmemmap_alloc_block(unsigned long size, int node);
11520 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
11521 int vmemmap_populate_basepages(struct page *start_page,
11522 unsigned long pages, int node);
11523 int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
11524 void vmemmap_populate_print_last(void);
11525 # 8 "include/linux/pagemap.h" 2
11526
11527
11528 # 1 "include/linux/highmem.h" 1
11529
11530
11531
11532
11533
11534 # 1 "include/linux/uaccess.h" 1
11535
11536
11537
11538
11539 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h" 1
11540 # 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h"
11541 # 1 "include/linux/sched.h" 1
11542 # 44 "include/linux/sched.h"
11543 struct sched_param {
11544 int sched_priority;
11545 };
11546 # 66 "include/linux/sched.h"
11547 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cputime.h" 1
11548
11549
11550
11551 # 1 "include/asm-generic/cputime.h" 1
11552
11553
11554
11555
11556
11557
11558 typedef unsigned long cputime_t;
11559 # 24 "include/asm-generic/cputime.h"
11560 typedef u64 cputime64_t;
11561 # 5 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cputime.h" 2
11562 # 67 "include/linux/sched.h" 2
11563
11564
11565 # 1 "include/linux/sem.h" 1
11566
11567
11568
11569 # 1 "include/linux/ipc.h" 1
11570 # 9 "include/linux/ipc.h"
11571 struct ipc_perm
11572 {
11573 __kernel_key_t key;
11574 __kernel_uid_t uid;
11575 __kernel_gid_t gid;
11576 __kernel_uid_t cuid;
11577 __kernel_gid_t cgid;
11578 __kernel_mode_t mode;
11579 unsigned short seq;
11580 };
11581
11582
11583 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ipcbuf.h" 1
11584 # 16 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ipcbuf.h"
11585 struct ipc64_perm {
11586 __kernel_key_t key;
11587 __kernel_uid32_t uid;
11588 __kernel_gid32_t gid;
11589 __kernel_uid32_t cuid;
11590 __kernel_gid32_t cgid;
11591 __kernel_mode_t mode;
11592 unsigned short __pad1;
11593 unsigned short seq;
11594 unsigned short __pad2;
11595 unsigned long __unused1;
11596 unsigned long __unused2;
11597 };
11598 # 22 "include/linux/ipc.h" 2
11599 # 57 "include/linux/ipc.h"
11600 struct ipc_kludge {
11601 struct msgbuf *msgp;
11602 long msgtyp;
11603 };
11604 # 88 "include/linux/ipc.h"
11605 struct kern_ipc_perm
11606 {
11607 spinlock_t lock;
11608 int deleted;
11609 int id;
11610 key_t key;
11611 uid_t uid;
11612 gid_t gid;
11613 uid_t cuid;
11614 gid_t cgid;
11615 mode_t mode;
11616 unsigned long seq;
11617 void *security;
11618 };
11619 # 5 "include/linux/sem.h" 2
11620 # 23 "include/linux/sem.h"
11621 struct semid_ds {
11622 struct ipc_perm sem_perm;
11623 __kernel_time_t sem_otime;
11624 __kernel_time_t sem_ctime;
11625 struct sem *sem_base;
11626 struct sem_queue *sem_pending;
11627 struct sem_queue **sem_pending_last;
11628 struct sem_undo *undo;
11629 unsigned short sem_nsems;
11630 };
11631
11632
11633 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/sembuf.h" 1
11634 # 14 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/sembuf.h"
11635 struct semid64_ds {
11636 struct ipc64_perm sem_perm;
11637 __kernel_time_t sem_otime;
11638 unsigned long __unused1;
11639 __kernel_time_t sem_ctime;
11640 unsigned long __unused2;
11641 unsigned long sem_nsems;
11642 unsigned long __unused3;
11643 unsigned long __unused4;
11644 };
11645 # 36 "include/linux/sem.h" 2
11646
11647
11648 struct sembuf {
11649 unsigned short sem_num;
11650 short sem_op;
11651 short sem_flg;
11652 };
11653
11654
11655 union semun {
11656 int val;
11657 struct semid_ds *buf;
11658 unsigned short *array;
11659 struct seminfo *__buf;
11660 void *__pad;
11661 };
11662
11663 struct seminfo {
11664 int semmap;
11665 int semmni;
11666 int semmns;
11667 int semmnu;
11668 int semmsl;
11669 int semopm;
11670 int semume;
11671 int semusz;
11672 int semvmx;
11673 int semaem;
11674 };
11675 # 83 "include/linux/sem.h"
11676 struct task_struct;
11677
11678
11679 struct sem {
11680 int semval;
11681 int sempid;
11682 };
11683
11684
11685 struct sem_array {
11686 struct kern_ipc_perm sem_perm;
11687 time_t sem_otime;
11688 time_t sem_ctime;
11689 struct sem *sem_base;
11690 struct list_head sem_pending;
11691 struct list_head list_id;
11692 unsigned long sem_nsems;
11693 };
11694
11695
11696 struct sem_queue {
11697 struct list_head list;
11698 struct task_struct *sleeper;
11699 struct sem_undo *undo;
11700 int pid;
11701 int status;
11702 struct sembuf *sops;
11703 int nsops;
11704 int alter;
11705 };
11706
11707
11708
11709
11710 struct sem_undo {
11711 struct list_head list_proc;
11712
11713 struct rcu_head rcu;
11714 struct sem_undo_list *ulp;
11715 struct list_head list_id;
11716 int semid;
11717 short * semadj;
11718 };
11719
11720
11721
11722
11723 struct sem_undo_list {
11724 atomic_t refcnt;
11725 spinlock_t lock;
11726 struct list_head list_proc;
11727 };
11728
11729 struct sysv_sem {
11730 struct sem_undo_list *undo_list;
11731 };
11732
11733
11734
11735 extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
11736 extern void exit_sem(struct task_struct *tsk);
11737 # 70 "include/linux/sched.h" 2
11738 # 1 "include/linux/signal.h" 1
11739
11740
11741
11742 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/signal.h" 1
11743
11744
11745
11746
11747
11748
11749 struct siginfo;
11750 # 17 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/signal.h"
11751 typedef unsigned long old_sigset_t;
11752
11753 typedef struct {
11754 unsigned long sig[(64 / 32)];
11755 } sigset_t;
11756 # 107 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/signal.h"
11757 # 1 "include/asm-generic/signal.h" 1
11758 # 17 "include/asm-generic/signal.h"
11759 typedef void __signalfn_t(int);
11760 typedef __signalfn_t *__sighandler_t;
11761
11762 typedef void __restorefn_t(void);
11763 typedef __restorefn_t *__sigrestore_t;
11764 # 108 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/signal.h" 2
11765
11766
11767 struct old_sigaction {
11768 __sighandler_t sa_handler;
11769 old_sigset_t sa_mask;
11770 unsigned long sa_flags;
11771 void (*sa_restorer) (void);
11772 };
11773
11774 struct sigaction {
11775 __sighandler_t sa_handler;
11776 unsigned long sa_flags;
11777 void (*sa_restorer) (void);
11778 sigset_t sa_mask;
11779 };
11780
11781 struct k_sigaction {
11782 struct sigaction sa;
11783 };
11784 # 145 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/signal.h"
11785 typedef struct sigaltstack {
11786 void *ss_sp;
11787 int ss_flags;
11788 size_t ss_size;
11789 } stack_t;
11790
11791
11792
11793 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/sigcontext.h" 1
11794
11795
11796
11797
11798 struct sigcontext {
11799 unsigned long sc_r0;
11800 unsigned long sc_r1;
11801 unsigned long sc_r2;
11802 unsigned long sc_r3;
11803 unsigned long sc_r4;
11804 unsigned long sc_r5;
11805 unsigned long sc_r6;
11806 unsigned long sc_r7;
11807 unsigned long sc_p0;
11808 unsigned long sc_p1;
11809 unsigned long sc_p2;
11810 unsigned long sc_p3;
11811 unsigned long sc_p4;
11812 unsigned long sc_p5;
11813 unsigned long sc_usp;
11814 unsigned long sc_a0w;
11815 unsigned long sc_a1w;
11816 unsigned long sc_a0x;
11817 unsigned long sc_a1x;
11818 unsigned long sc_astat;
11819 unsigned long sc_rets;
11820 unsigned long sc_pc;
11821 unsigned long sc_retx;
11822 unsigned long sc_fp;
11823 unsigned long sc_i0;
11824 unsigned long sc_i1;
11825 unsigned long sc_i2;
11826 unsigned long sc_i3;
11827 unsigned long sc_m0;
11828 unsigned long sc_m1;
11829 unsigned long sc_m2;
11830 unsigned long sc_m3;
11831 unsigned long sc_l0;
11832 unsigned long sc_l1;
11833 unsigned long sc_l2;
11834 unsigned long sc_l3;
11835 unsigned long sc_b0;
11836 unsigned long sc_b1;
11837 unsigned long sc_b2;
11838 unsigned long sc_b3;
11839 unsigned long sc_lc0;
11840 unsigned long sc_lc1;
11841 unsigned long sc_lt0;
11842 unsigned long sc_lt1;
11843 unsigned long sc_lb0;
11844 unsigned long sc_lb1;
11845 unsigned long sc_seqstat;
11846 };
11847 # 154 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/signal.h" 2
11848 # 5 "include/linux/signal.h" 2
11849 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/siginfo.h" 1
11850
11851
11852
11853
11854 # 1 "include/asm-generic/siginfo.h" 1
11855
11856
11857
11858
11859
11860
11861 typedef union sigval {
11862 int sival_int;
11863 void *sival_ptr;
11864 } sigval_t;
11865 # 40 "include/asm-generic/siginfo.h"
11866 typedef struct siginfo {
11867 int si_signo;
11868 int si_errno;
11869 int si_code;
11870
11871 union {
11872 int _pad[((128 - (3 * sizeof(int))) / sizeof(int))];
11873
11874
11875 struct {
11876 pid_t _pid;
11877 uid_t _uid;
11878 } _kill;
11879
11880
11881 struct {
11882 timer_t _tid;
11883 int _overrun;
11884 char _pad[sizeof( uid_t) - sizeof(int)];
11885 sigval_t _sigval;
11886 int _sys_private;
11887 } _timer;
11888
11889
11890 struct {
11891 pid_t _pid;
11892 uid_t _uid;
11893 sigval_t _sigval;
11894 } _rt;
11895
11896
11897 struct {
11898 pid_t _pid;
11899 uid_t _uid;
11900 int _status;
11901 clock_t _utime;
11902 clock_t _stime;
11903 } _sigchld;
11904
11905
11906 struct {
11907 void *_addr;
11908
11909
11910
11911 } _sigfault;
11912
11913
11914 struct {
11915 long _band;
11916 int _fd;
11917 } _sigpoll;
11918 } _sifields;
11919 } siginfo_t;
11920 # 253 "include/asm-generic/siginfo.h"
11921 typedef struct sigevent {
11922 sigval_t sigev_value;
11923 int sigev_signo;
11924 int sigev_notify;
11925 union {
11926 int _pad[((64 - (sizeof(int) * 2 + sizeof(sigval_t))) / sizeof(int))];
11927 int _tid;
11928
11929 struct {
11930 void (*_function)(sigval_t);
11931 void *_attribute;
11932 } _sigev_thread;
11933 } _sigev_un;
11934 } sigevent_t;
11935
11936
11937
11938
11939
11940
11941
11942 struct siginfo;
11943 void do_schedule_next_timer(struct siginfo *info);
11944
11945
11946
11947
11948
11949 static inline __attribute__((always_inline)) void copy_siginfo(struct siginfo *to, struct siginfo *from)
11950 {
11951 if (from->si_code < 0)
11952 memcpy(to, from, sizeof(*to));
11953 else
11954
11955 memcpy(to, from, (3 * sizeof(int)) + sizeof(from->_sifields._sigchld));
11956 }
11957
11958
11959
11960 extern int copy_siginfo_to_user(struct siginfo *to, struct siginfo *from);
11961 # 6 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/siginfo.h" 2
11962 # 6 "include/linux/signal.h" 2
11963 # 14 "include/linux/signal.h"
11964 struct sigqueue {
11965 struct list_head list;
11966 int flags;
11967 siginfo_t info;
11968 struct user_struct *user;
11969 };
11970
11971
11972
11973
11974 struct sigpending {
11975 struct list_head list;
11976 sigset_t signal;
11977 };
11978 # 38 "include/linux/signal.h"
11979 static inline __attribute__((always_inline)) void sigaddset(sigset_t *set, int _sig)
11980 {
11981 unsigned long sig = _sig - 1;
11982 if ((64 / 32) == 1)
11983 set->sig[0] |= 1UL << sig;
11984 else
11985 set->sig[sig / 32] |= 1UL << (sig % 32);
11986 }
11987
11988 static inline __attribute__((always_inline)) void sigdelset(sigset_t *set, int _sig)
11989 {
11990 unsigned long sig = _sig - 1;
11991 if ((64 / 32) == 1)
11992 set->sig[0] &= ~(1UL << sig);
11993 else
11994 set->sig[sig / 32] &= ~(1UL << (sig % 32));
11995 }
11996
11997 static inline __attribute__((always_inline)) int sigismember(sigset_t *set, int _sig)
11998 {
11999 unsigned long sig = _sig - 1;
12000 if ((64 / 32) == 1)
12001 return 1 & (set->sig[0] >> sig);
12002 else
12003 return 1 & (set->sig[sig / 32] >> (sig % 32));
12004 }
12005
12006 static inline __attribute__((always_inline)) int sigfindinword(unsigned long word)
12007 {
12008 return __ffs(~(~word));
12009 }
12010
12011
12012
12013 static inline __attribute__((always_inline)) int sigisemptyset(sigset_t *set)
12014 {
12015 extern void _NSIG_WORDS_is_unsupported_size(void);
12016 switch ((64 / 32)) {
12017 case 4:
12018 return (set->sig[3] | set->sig[2] |
12019 set->sig[1] | set->sig[0]) == 0;
12020 case 2:
12021 return (set->sig[1] | set->sig[0]) == 0;
12022 case 1:
12023 return set->sig[0] == 0;
12024 default:
12025 _NSIG_WORDS_is_unsupported_size();
12026 return 0;
12027 }
12028 }
12029 # 119 "include/linux/signal.h"
12030 static inline __attribute__((always_inline)) void sigorsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) | (b3)); r->sig[2] = ((a2) | (b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) | (b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) | (b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } }
12031
12032
12033 static inline __attribute__((always_inline)) void sigandsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & (b3)); r->sig[2] = ((a2) & (b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & (b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & (b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } }
12034
12035
12036 static inline __attribute__((always_inline)) void signandsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & ~(b3)); r->sig[2] = ((a2) & ~(b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & ~(b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & ~(b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } }
12037 # 149 "include/linux/signal.h"
12038 static inline __attribute__((always_inline)) void signotset(sigset_t *set) { extern void _NSIG_WORDS_is_unsupported_size(void); switch ((64 / 32)) { case 4: set->sig[3] = (~(set->sig[3])); set->sig[2] = (~(set->sig[2])); case 2: set->sig[1] = (~(set->sig[1])); case 1: set->sig[0] = (~(set->sig[0])); break; default: _NSIG_WORDS_is_unsupported_size(); } }
12039
12040
12041
12042
12043 static inline __attribute__((always_inline)) void sigemptyset(sigset_t *set)
12044 {
12045 switch ((64 / 32)) {
12046 default:
12047 memset(set, 0, sizeof(sigset_t));
12048 break;
12049 case 2: set->sig[1] = 0;
12050 case 1: set->sig[0] = 0;
12051 break;
12052 }
12053 }
12054
12055 static inline __attribute__((always_inline)) void sigfillset(sigset_t *set)
12056 {
12057 switch ((64 / 32)) {
12058 default:
12059 memset(set, -1, sizeof(sigset_t));
12060 break;
12061 case 2: set->sig[1] = -1;
12062 case 1: set->sig[0] = -1;
12063 break;
12064 }
12065 }
12066
12067
12068
12069 static inline __attribute__((always_inline)) void sigaddsetmask(sigset_t *set, unsigned long mask)
12070 {
12071 set->sig[0] |= mask;
12072 }
12073
12074 static inline __attribute__((always_inline)) void sigdelsetmask(sigset_t *set, unsigned long mask)
12075 {
12076 set->sig[0] &= ~mask;
12077 }
12078
12079 static inline __attribute__((always_inline)) int sigtestsetmask(sigset_t *set, unsigned long mask)
12080 {
12081 return (set->sig[0] & mask) != 0;
12082 }
12083
12084 static inline __attribute__((always_inline)) void siginitset(sigset_t *set, unsigned long mask)
12085 {
12086 set->sig[0] = mask;
12087 switch ((64 / 32)) {
12088 default:
12089 memset(&set->sig[1], 0, sizeof(long)*((64 / 32)-1));
12090 break;
12091 case 2: set->sig[1] = 0;
12092 case 1: ;
12093 }
12094 }
12095
12096 static inline __attribute__((always_inline)) void siginitsetinv(sigset_t *set, unsigned long mask)
12097 {
12098 set->sig[0] = ~mask;
12099 switch ((64 / 32)) {
12100 default:
12101 memset(&set->sig[1], -1, sizeof(long)*((64 / 32)-1));
12102 break;
12103 case 2: set->sig[1] = -1;
12104 case 1: ;
12105 }
12106 }
12107
12108
12109
12110 static inline __attribute__((always_inline)) void init_sigpending(struct sigpending *sig)
12111 {
12112 sigemptyset(&sig->signal);
12113 INIT_LIST_HEAD(&sig->list);
12114 }
12115
12116 extern void flush_sigqueue(struct sigpending *queue);
12117
12118
12119 static inline __attribute__((always_inline)) int valid_signal(unsigned long sig)
12120 {
12121 return sig <= 64 ? 1 : 0;
12122 }
12123
12124 extern int next_signal(struct sigpending *pending, sigset_t *mask);
12125 extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
12126 extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *);
12127 extern long do_sigpending(void *, unsigned long);
12128 extern int sigprocmask(int, sigset_t *, sigset_t *);
12129 extern int show_unhandled_signals;
12130
12131 struct pt_regs;
12132 extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie);
12133 extern void exit_signals(struct task_struct *tsk);
12134
12135 extern struct kmem_cache *sighand_cachep;
12136
12137 int unhandled_signal(struct task_struct *tsk, int sig);
12138 # 373 "include/linux/signal.h"
12139 void signals_init(void);
12140 # 71 "include/linux/sched.h" 2
12141 # 1 "include/linux/fs_struct.h" 1
12142
12143
12144
12145
12146
12147 struct fs_struct {
12148 atomic_t count;
12149 rwlock_t lock;
12150 int umask;
12151 struct path root, pwd;
12152 };
12153
12154
12155
12156
12157
12158
12159
12160 extern struct kmem_cache *fs_cachep;
12161
12162 extern void exit_fs(struct task_struct *);
12163 extern void set_fs_root(struct fs_struct *, struct path *);
12164 extern void set_fs_pwd(struct fs_struct *, struct path *);
12165 extern struct fs_struct *copy_fs_struct(struct fs_struct *);
12166 extern void put_fs_struct(struct fs_struct *);
12167 # 72 "include/linux/sched.h" 2
12168
12169
12170
12171
12172
12173 # 1 "include/linux/proportions.h" 1
12174 # 12 "include/linux/proportions.h"
12175 # 1 "include/linux/percpu_counter.h" 1
12176 # 78 "include/linux/percpu_counter.h"
12177 struct percpu_counter {
12178 s64 count;
12179 };
12180
12181 static inline __attribute__((always_inline)) int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
12182 {
12183 fbc->count = amount;
12184 return 0;
12185 }
12186
12187
12188
12189 static inline __attribute__((always_inline)) void percpu_counter_destroy(struct percpu_counter *fbc)
12190 {
12191 }
12192
12193 static inline __attribute__((always_inline)) void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
12194 {
12195 fbc->count = amount;
12196 }
12197
12198
12199
12200
12201 static inline __attribute__((always_inline)) void
12202 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
12203 {
12204 do { } while (0);
12205 fbc->count += amount;
12206 do { } while (0);
12207 }
12208
12209 static inline __attribute__((always_inline)) s64 percpu_counter_read(struct percpu_counter *fbc)
12210 {
12211 return fbc->count;
12212 }
12213
12214 static inline __attribute__((always_inline)) s64 percpu_counter_read_positive(struct percpu_counter *fbc)
12215 {
12216 return fbc->count;
12217 }
12218
12219 static inline __attribute__((always_inline)) s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
12220 {
12221 return percpu_counter_read_positive(fbc);
12222 }
12223
12224 static inline __attribute__((always_inline)) s64 percpu_counter_sum(struct percpu_counter *fbc)
12225 {
12226 return percpu_counter_read(fbc);
12227 }
12228
12229
12230
12231 static inline __attribute__((always_inline)) void percpu_counter_inc(struct percpu_counter *fbc)
12232 {
12233 percpu_counter_add(fbc, 1);
12234 }
12235
12236 static inline __attribute__((always_inline)) void percpu_counter_dec(struct percpu_counter *fbc)
12237 {
12238 percpu_counter_add(fbc, -1);
12239 }
12240
12241 static inline __attribute__((always_inline)) void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
12242 {
12243 percpu_counter_add(fbc, -amount);
12244 }
12245 # 13 "include/linux/proportions.h" 2
12246
12247
12248
12249 struct prop_global {
12250
12251
12252
12253
12254
12255 int shift;
12256
12257
12258
12259
12260
12261
12262 struct percpu_counter events;
12263 };
12264
12265
12266
12267
12268
12269
12270 struct prop_descriptor {
12271 int index;
12272 struct prop_global pg[2];
12273 struct mutex mutex;
12274 };
12275
12276 int prop_descriptor_init(struct prop_descriptor *pd, int shift);
12277 void prop_change_shift(struct prop_descriptor *pd, int new_shift);
12278
12279
12280
12281
12282
12283 struct prop_local_percpu {
12284
12285
12286
12287 struct percpu_counter events;
12288
12289
12290
12291
12292 int shift;
12293 unsigned long period;
12294 spinlock_t lock;
12295 };
12296
12297 int prop_local_init_percpu(struct prop_local_percpu *pl);
12298 void prop_local_destroy_percpu(struct prop_local_percpu *pl);
12299 void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
12300 void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
12301 long *numerator, long *denominator);
12302
12303 static inline __attribute__((always_inline))
12304 void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
12305 {
12306 unsigned long flags;
12307
12308 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
12309 __prop_inc_percpu(pd, pl);
12310 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
12311 }
12312 # 89 "include/linux/proportions.h"
12313 void __prop_inc_percpu_max(struct prop_descriptor *pd,
12314 struct prop_local_percpu *pl, long frac);
12315
12316
12317
12318
12319
12320
12321 struct prop_local_single {
12322
12323
12324
12325 unsigned long events;
12326
12327
12328
12329
12330
12331 unsigned long period;
12332 int shift;
12333 spinlock_t lock;
12334 };
12335
12336
12337
12338
12339
12340 int prop_local_init_single(struct prop_local_single *pl);
12341 void prop_local_destroy_single(struct prop_local_single *pl);
12342 void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
12343 void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
12344 long *numerator, long *denominator);
12345
12346 static inline __attribute__((always_inline))
12347 void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
12348 {
12349 unsigned long flags;
12350
12351 __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) );
12352 __prop_inc_single(pd, pl);
12353 do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0);
12354 }
12355 # 78 "include/linux/sched.h" 2
12356 # 1 "include/linux/seccomp.h" 1
12357 # 24 "include/linux/seccomp.h"
12358 typedef struct { } seccomp_t;
12359
12360
12361
12362 static inline __attribute__((always_inline)) long prctl_get_seccomp(void)
12363 {
12364 return -22;
12365 }
12366
12367 static inline __attribute__((always_inline)) long prctl_set_seccomp(unsigned long arg2)
12368 {
12369 return -22;
12370 }
12371 # 79 "include/linux/sched.h" 2
12372
12373 # 1 "include/linux/rtmutex.h" 1
12374 # 16 "include/linux/rtmutex.h"
12375 # 1 "include/linux/plist.h" 1
12376 # 80 "include/linux/plist.h"
12377 struct plist_head {
12378 struct list_head prio_list;
12379 struct list_head node_list;
12380
12381
12382
12383 };
12384
12385 struct plist_node {
12386 int prio;
12387 struct plist_head plist;
12388 };
12389 # 127 "include/linux/plist.h"
12390 static inline __attribute__((always_inline)) void
12391 plist_head_init(struct plist_head *head, spinlock_t *lock)
12392 {
12393 INIT_LIST_HEAD(&head->prio_list);
12394 INIT_LIST_HEAD(&head->node_list);
12395
12396
12397
12398 }
12399
12400
12401
12402
12403
12404
12405 static inline __attribute__((always_inline)) void plist_node_init(struct plist_node *node, int prio)
12406 {
12407 node->prio = prio;
12408 plist_head_init(&node->plist, ((void *)0));
12409 }
12410
12411 extern void plist_add(struct plist_node *node, struct plist_head *head);
12412 extern void plist_del(struct plist_node *node, struct plist_head *head);
12413 # 195 "include/linux/plist.h"
12414 static inline __attribute__((always_inline)) int plist_head_empty(const struct plist_head *head)
12415 {
12416 return list_empty(&head->node_list);
12417 }
12418
12419
12420
12421
12422
12423 static inline __attribute__((always_inline)) int plist_node_empty(const struct plist_node *node)
12424 {
12425 return plist_head_empty(&node->plist);
12426 }
12427 # 234 "include/linux/plist.h"
12428 static inline __attribute__((always_inline)) struct plist_node* plist_first(const struct plist_head *head)
12429 {
12430 return ({ const typeof( ((struct plist_node *)0)->plist.node_list ) *__mptr = (head->node_list.next); (struct plist_node *)( (char *)__mptr - __builtin_offsetof(struct plist_node,plist.node_list) );});
12431
12432 }
12433 # 17 "include/linux/rtmutex.h" 2
12434 # 26 "include/linux/rtmutex.h"
12435 struct rt_mutex {
12436 spinlock_t wait_lock;
12437 struct plist_head wait_list;
12438 struct task_struct *owner;
12439
12440
12441
12442
12443
12444
12445 };
12446
12447 struct rt_mutex_waiter;
12448 struct hrtimer_sleeper;
12449
12450
12451
12452
12453
12454
12455 static inline __attribute__((always_inline)) int rt_mutex_debug_check_no_locks_freed(const void *from,
12456 unsigned long len)
12457 {
12458 return 0;
12459 }
12460 # 80 "include/linux/rtmutex.h"
12461 static inline __attribute__((always_inline)) int rt_mutex_is_locked(struct rt_mutex *lock)
12462 {
12463 return lock->owner != ((void *)0);
12464 }
12465
12466 extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
12467 extern void rt_mutex_destroy(struct rt_mutex *lock);
12468
12469 extern void rt_mutex_lock(struct rt_mutex *lock);
12470 extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
12471 int detect_deadlock);
12472 extern int rt_mutex_timed_lock(struct rt_mutex *lock,
12473 struct hrtimer_sleeper *timeout,
12474 int detect_deadlock);
12475
12476 extern int rt_mutex_trylock(struct rt_mutex *lock);
12477
12478 extern void rt_mutex_unlock(struct rt_mutex *lock);
12479 # 81 "include/linux/sched.h" 2
12480
12481
12482
12483 # 1 "include/linux/resource.h" 1
12484
12485
12486
12487
12488
12489 struct task_struct;
12490 # 24 "include/linux/resource.h"
12491 struct rusage {
12492 struct timeval ru_utime;
12493 struct timeval ru_stime;
12494 long ru_maxrss;
12495 long ru_ixrss;
12496 long ru_idrss;
12497 long ru_isrss;
12498 long ru_minflt;
12499 long ru_majflt;
12500 long ru_nswap;
12501 long ru_inblock;
12502 long ru_oublock;
12503 long ru_msgsnd;
12504 long ru_msgrcv;
12505 long ru_nsignals;
12506 long ru_nvcsw;
12507 long ru_nivcsw;
12508 };
12509
12510 struct rlimit {
12511 unsigned long rlim_cur;
12512 unsigned long rlim_max;
12513 };
12514 # 71 "include/linux/resource.h"
12515 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/resource.h" 1
12516
12517
12518
12519 # 1 "include/asm-generic/resource.h" 1
12520 # 5 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/resource.h" 2
12521 # 72 "include/linux/resource.h" 2
12522
12523 int getrusage(struct task_struct *p, int who, struct rusage *ru);
12524 # 85 "include/linux/sched.h" 2
12525
12526 # 1 "include/linux/hrtimer.h" 1
12527 # 26 "include/linux/hrtimer.h"
12528 struct hrtimer_clock_base;
12529 struct hrtimer_cpu_base;
12530
12531
12532
12533
12534 enum hrtimer_mode {
12535 HRTIMER_MODE_ABS,
12536 HRTIMER_MODE_REL,
12537 };
12538
12539
12540
12541
12542 enum hrtimer_restart {
12543 HRTIMER_NORESTART,
12544 HRTIMER_RESTART,
12545 };
12546 # 59 "include/linux/hrtimer.h"
12547 enum hrtimer_cb_mode {
12548 HRTIMER_CB_SOFTIRQ,
12549 HRTIMER_CB_IRQSAFE_PERCPU,
12550 HRTIMER_CB_IRQSAFE_UNLOCKED,
12551 };
12552 # 124 "include/linux/hrtimer.h"
12553 struct hrtimer {
12554 struct rb_node node;
12555 ktime_t _expires;
12556 ktime_t _softexpires;
12557 enum hrtimer_restart (*function)(struct hrtimer *);
12558 struct hrtimer_clock_base *base;
12559 unsigned long state;
12560 struct list_head cb_entry;
12561 enum hrtimer_cb_mode cb_mode;
12562
12563 int start_pid;
12564 void *start_site;
12565 char start_comm[16];
12566
12567 };
12568 # 147 "include/linux/hrtimer.h"
12569 struct hrtimer_sleeper {
12570 struct hrtimer timer;
12571 struct task_struct *task;
12572 };
12573 # 164 "include/linux/hrtimer.h"
12574 struct hrtimer_clock_base {
12575 struct hrtimer_cpu_base *cpu_base;
12576 clockid_t index;
12577 struct rb_root active;
12578 struct rb_node *first;
12579 ktime_t resolution;
12580 ktime_t (*get_time)(void);
12581 ktime_t softirq_time;
12582
12583
12584
12585 };
12586 # 196 "include/linux/hrtimer.h"
12587 struct hrtimer_cpu_base {
12588 spinlock_t lock;
12589 struct hrtimer_clock_base clock_base[2];
12590 struct list_head cb_pending;
12591
12592
12593
12594
12595
12596 };
12597
12598 static inline __attribute__((always_inline)) void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
12599 {
12600 timer->_expires = time;
12601 timer->_softexpires = time;
12602 }
12603
12604 static inline __attribute__((always_inline)) void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta)
12605 {
12606 timer->_softexpires = time;
12607 timer->_expires = ktime_add_safe(time, delta);
12608 }
12609
12610 static inline __attribute__((always_inline)) void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta)
12611 {
12612 timer->_softexpires = time;
12613 timer->_expires = ktime_add_safe(time, ns_to_ktime(delta));
12614 }
12615
12616 static inline __attribute__((always_inline)) void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
12617 {
12618 timer->_expires.tv64 = tv64;
12619 timer->_softexpires.tv64 = tv64;
12620 }
12621
12622 static inline __attribute__((always_inline)) void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
12623 {
12624 timer->_expires = ktime_add_safe(timer->_expires, time);
12625 timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
12626 }
12627
12628 static inline __attribute__((always_inline)) void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns)
12629 {
12630 timer->_expires = ktime_add_ns(timer->_expires, ns);
12631 timer->_softexpires = ktime_add_ns(timer->_softexpires, ns);
12632 }
12633
12634 static inline __attribute__((always_inline)) ktime_t hrtimer_get_expires(const struct hrtimer *timer)
12635 {
12636 return timer->_expires;
12637 }
12638
12639 static inline __attribute__((always_inline)) ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
12640 {
12641 return timer->_softexpires;
12642 }
12643
12644 static inline __attribute__((always_inline)) s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
12645 {
12646 return timer->_expires.tv64;
12647 }
12648 static inline __attribute__((always_inline)) s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
12649 {
12650 return timer->_softexpires.tv64;
12651 }
12652
12653 static inline __attribute__((always_inline)) s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
12654 {
12655 return ktime_to_ns(timer->_expires);
12656 }
12657
12658 static inline __attribute__((always_inline)) ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
12659 {
12660 return ktime_sub(timer->_expires, timer->base->get_time());
12661 }
12662 # 315 "include/linux/hrtimer.h"
12663 static inline __attribute__((always_inline)) void clock_was_set(void) { }
12664 static inline __attribute__((always_inline)) void hrtimer_peek_ahead_timers(void) { }
12665
12666 static inline __attribute__((always_inline)) void hres_timers_resume(void) { }
12667
12668
12669
12670
12671
12672 static inline __attribute__((always_inline)) ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
12673 {
12674 return timer->base->softirq_time;
12675 }
12676
12677 static inline __attribute__((always_inline)) int hrtimer_is_hres_active(struct hrtimer *timer)
12678 {
12679 return 0;
12680 }
12681
12682
12683 extern ktime_t ktime_get(void);
12684 extern ktime_t ktime_get_real(void);
12685
12686
12687 extern __typeof__(struct tick_device) per_cpu__tick_cpu_device;
12688
12689
12690
12691
12692
12693 extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
12694 enum hrtimer_mode mode);
12695
12696
12697
12698
12699
12700
12701
12702 static inline __attribute__((always_inline)) void hrtimer_init_on_stack(struct hrtimer *timer,
12703 clockid_t which_clock,
12704 enum hrtimer_mode mode)
12705 {
12706 hrtimer_init(timer, which_clock, mode);
12707 }
12708 static inline __attribute__((always_inline)) void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
12709
12710
12711
12712 extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
12713 const enum hrtimer_mode mode);
12714 extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
12715 unsigned long range_ns, const enum hrtimer_mode mode);
12716 extern int hrtimer_cancel(struct hrtimer *timer);
12717 extern int hrtimer_try_to_cancel(struct hrtimer *timer);
12718
12719 static inline __attribute__((always_inline)) int hrtimer_start_expires(struct hrtimer *timer,
12720 enum hrtimer_mode mode)
12721 {
12722 unsigned long delta;
12723 ktime_t soft, hard;
12724 soft = hrtimer_get_softexpires(timer);
12725 hard = hrtimer_get_expires(timer);
12726 delta = ktime_to_ns(ktime_sub(hard, soft));
12727 return hrtimer_start_range_ns(timer, soft, delta, mode);
12728 }
12729
12730 static inline __attribute__((always_inline)) int hrtimer_restart(struct hrtimer *timer)
12731 {
12732 return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
12733 }
12734
12735
12736 extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
12737 extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
12738
12739 extern ktime_t hrtimer_get_next_event(void);
12740
12741
12742
12743
12744
12745 static inline __attribute__((always_inline)) int hrtimer_active(const struct hrtimer *timer)
12746 {
12747 return timer->state != 0x00;
12748 }
12749
12750
12751
12752
12753 static inline __attribute__((always_inline)) int hrtimer_is_queued(struct hrtimer *timer)
12754 {
12755 return timer->state &
12756 (0x01 | 0x04);
12757 }
12758
12759
12760
12761
12762
12763 static inline __attribute__((always_inline)) int hrtimer_callback_running(struct hrtimer *timer)
12764 {
12765 return timer->state & 0x02;
12766 }
12767
12768
12769 extern u64
12770 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
12771
12772
12773 static inline __attribute__((always_inline)) u64 hrtimer_forward_now(struct hrtimer *timer,
12774 ktime_t interval)
12775 {
12776 return hrtimer_forward(timer, timer->base->get_time(), interval);
12777 }
12778
12779
12780 extern long hrtimer_nanosleep(struct timespec *rqtp,
12781 struct timespec *rmtp,
12782 const enum hrtimer_mode mode,
12783 const clockid_t clockid);
12784 extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
12785
12786 extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
12787 struct task_struct *tsk);
12788
12789 extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
12790 const enum hrtimer_mode mode);
12791 extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
12792
12793
12794 extern void hrtimer_run_queues(void);
12795 extern void hrtimer_run_pending(void);
12796
12797
12798 extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) hrtimers_init(void);
12799
12800
12801 extern u64 ktime_divns(const ktime_t kt, s64 div);
12802
12803
12804
12805
12806
12807 extern void sysrq_timer_list_show(void);
12808
12809
12810
12811
12812
12813
12814 extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
12815 void *timerf, char *comm,
12816 unsigned int timer_flag);
12817
12818 static inline __attribute__((always_inline)) void timer_stats_account_hrtimer(struct hrtimer *timer)
12819 {
12820 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
12821 timer->function, timer->start_comm, 0);
12822 }
12823
12824 extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer,
12825 void *addr);
12826
12827 static inline __attribute__((always_inline)) void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
12828 {
12829 __timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0));
12830 }
12831
12832 static inline __attribute__((always_inline)) void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
12833 {
12834 timer->start_site = ((void *)0);
12835 }
12836 # 87 "include/linux/sched.h" 2
12837 # 1 "include/linux/task_io_accounting.h" 1
12838 # 11 "include/linux/task_io_accounting.h"
12839 struct task_io_accounting {
12840 # 45 "include/linux/task_io_accounting.h"
12841 };
12842 # 88 "include/linux/sched.h" 2
12843
12844 # 1 "include/linux/latencytop.h" 1
12845 # 33 "include/linux/latencytop.h"
12846 static inline __attribute__((always_inline)) void
12847 account_scheduler_latency(struct task_struct *task, int usecs, int inter)
12848 {
12849 }
12850
12851 static inline __attribute__((always_inline)) void clear_all_latency_tracing(struct task_struct *p)
12852 {
12853 }
12854 # 90 "include/linux/sched.h" 2
12855 # 1 "include/linux/cred.h" 1
12856 # 91 "include/linux/sched.h" 2
12857
12858
12859
12860 struct mem_cgroup;
12861 struct exec_domain;
12862 struct futex_pi_state;
12863 struct robust_list_head;
12864 struct bio;
12865 # 116 "include/linux/sched.h"
12866 extern unsigned long avenrun[];
12867 # 130 "include/linux/sched.h"
12868 extern unsigned long total_forks;
12869 extern int nr_threads;
12870 extern __typeof__(unsigned long) per_cpu__process_counts;
12871 extern int nr_processes(void);
12872 extern unsigned long nr_running(void);
12873 extern unsigned long nr_uninterruptible(void);
12874 extern unsigned long nr_active(void);
12875 extern unsigned long nr_iowait(void);
12876
12877 struct seq_file;
12878 struct cfs_rq;
12879 struct task_group;
12880
12881 extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
12882 extern void proc_sched_set_task(struct task_struct *p);
12883 extern void
12884 print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
12885 # 161 "include/linux/sched.h"
12886 extern unsigned long long time_sync_thresh;
12887 # 238 "include/linux/sched.h"
12888 extern rwlock_t tasklist_lock;
12889 extern spinlock_t mmlist_lock;
12890
12891 struct task_struct;
12892
12893 extern void sched_init(void);
12894 extern void sched_init_smp(void);
12895 extern void schedule_tail(struct task_struct *prev);
12896 extern void init_idle(struct task_struct *idle, int cpu);
12897 extern void init_idle_bootup_task(struct task_struct *idle);
12898
12899 extern int runqueue_is_locked(void);
12900 extern void task_rq_unlock_wait(struct task_struct *p);
12901
12902 extern cpumask_t nohz_cpu_mask;
12903
12904
12905
12906 static inline __attribute__((always_inline)) int select_nohz_load_balancer(int cpu)
12907 {
12908 return 0;
12909 }
12910
12911
12912 extern unsigned long rt_needs_cpu(int cpu);
12913
12914
12915
12916
12917 extern void show_state_filter(unsigned long state_filter);
12918
12919 static inline __attribute__((always_inline)) void show_state(void)
12920 {
12921 show_state_filter(0);
12922 }
12923
12924 extern void show_regs(struct pt_regs *);
12925
12926
12927
12928
12929
12930
12931 extern void show_stack(struct task_struct *task, unsigned long *sp);
12932
12933 void io_schedule(void);
12934 long io_schedule_timeout(long timeout);
12935
12936 extern void cpu_init (void);
12937 extern void trap_init(void);
12938 extern void account_process_tick(struct task_struct *task, int user);
12939 extern void update_process_times(int user);
12940 extern void scheduler_tick(void);
12941
12942 extern void sched_show_task(struct task_struct *p);
12943 # 304 "include/linux/sched.h"
12944 static inline __attribute__((always_inline)) void softlockup_tick(void)
12945 {
12946 }
12947 static inline __attribute__((always_inline)) void spawn_softlockup_task(void)
12948 {
12949 }
12950 static inline __attribute__((always_inline)) void touch_softlockup_watchdog(void)
12951 {
12952 }
12953 static inline __attribute__((always_inline)) void touch_all_softlockup_watchdogs(void)
12954 {
12955 }
12956
12957
12958
12959
12960
12961
12962
12963 extern char __sched_text_start[], __sched_text_end[];
12964
12965
12966 extern int in_sched_functions(unsigned long addr);
12967
12968
12969 extern signed long schedule_timeout(signed long timeout);
12970 extern signed long schedule_timeout_interruptible(signed long timeout);
12971 extern signed long schedule_timeout_killable(signed long timeout);
12972 extern signed long schedule_timeout_uninterruptible(signed long timeout);
12973 void schedule(void);
12974
12975 struct nsproxy;
12976 struct user_namespace;
12977
12978
12979
12980
12981 extern int sysctl_max_map_count;
12982
12983 # 1 "include/linux/aio.h" 1
12984
12985
12986
12987
12988
12989 # 1 "include/linux/aio_abi.h" 1
12990 # 32 "include/linux/aio_abi.h"
12991 typedef unsigned long aio_context_t;
12992
12993 enum {
12994 IOCB_CMD_PREAD = 0,
12995 IOCB_CMD_PWRITE = 1,
12996 IOCB_CMD_FSYNC = 2,
12997 IOCB_CMD_FDSYNC = 3,
12998
12999
13000
13001
13002 IOCB_CMD_NOOP = 6,
13003 IOCB_CMD_PREADV = 7,
13004 IOCB_CMD_PWRITEV = 8,
13005 };
13006 # 57 "include/linux/aio_abi.h"
13007 struct io_event {
13008 __u64 data;
13009 __u64 obj;
13010 __s64 res;
13011 __s64 res2;
13012 };
13013 # 78 "include/linux/aio_abi.h"
13014 struct iocb {
13015
13016 __u64 aio_data;
13017 __u32 aio_key, aio_reserved1;
13018
13019
13020
13021 __u16 aio_lio_opcode;
13022 __s16 aio_reqprio;
13023 __u32 aio_fildes;
13024
13025 __u64 aio_buf;
13026 __u64 aio_nbytes;
13027 __s64 aio_offset;
13028
13029
13030 __u64 aio_reserved2;
13031
13032
13033 __u32 aio_flags;
13034
13035
13036
13037
13038
13039 __u32 aio_resfd;
13040 };
13041 # 7 "include/linux/aio.h" 2
13042 # 1 "include/linux/uio.h" 1
13043 # 16 "include/linux/uio.h"
13044 struct iovec
13045 {
13046 void *iov_base;
13047 __kernel_size_t iov_len;
13048 };
13049
13050
13051
13052 struct kvec {
13053 void *iov_base;
13054 size_t iov_len;
13055 };
13056 # 45 "include/linux/uio.h"
13057 static inline __attribute__((always_inline)) size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
13058 {
13059 unsigned long seg;
13060 size_t ret = 0;
13061
13062 for (seg = 0; seg < nr_segs; seg++)
13063 ret += iov[seg].iov_len;
13064 return ret;
13065 }
13066
13067 unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
13068 # 8 "include/linux/aio.h" 2
13069
13070
13071
13072
13073
13074
13075 struct kioctx;
13076 # 86 "include/linux/aio.h"
13077 struct kiocb {
13078 struct list_head ki_run_list;
13079 unsigned long ki_flags;
13080 int ki_users;
13081 unsigned ki_key;
13082
13083 struct file *ki_filp;
13084 struct kioctx *ki_ctx;
13085 int (*ki_cancel)(struct kiocb *, struct io_event *);
13086 ssize_t (*ki_retry)(struct kiocb *);
13087 void (*ki_dtor)(struct kiocb *);
13088
13089 union {
13090 void *user;
13091 struct task_struct *tsk;
13092 } ki_obj;
13093
13094 __u64 ki_user_data;
13095 wait_queue_t ki_wait;
13096 loff_t ki_pos;
13097
13098 void *private;
13099
13100 unsigned short ki_opcode;
13101 size_t ki_nbytes;
13102 char *ki_buf;
13103 size_t ki_left;
13104 struct iovec ki_inline_vec;
13105 struct iovec *ki_iovec;
13106 unsigned long ki_nr_segs;
13107 unsigned long ki_cur_seg;
13108
13109 struct list_head ki_list;
13110
13111
13112
13113
13114
13115
13116 struct file *ki_eventfd;
13117 };
13118 # 148 "include/linux/aio.h"
13119 struct aio_ring {
13120 unsigned id;
13121 unsigned nr;
13122 unsigned head;
13123 unsigned tail;
13124
13125 unsigned magic;
13126 unsigned compat_features;
13127 unsigned incompat_features;
13128 unsigned header_length;
13129
13130
13131 struct io_event io_events[0];
13132 };
13133
13134
13135
13136
13137 struct aio_ring_info {
13138 unsigned long mmap_base;
13139 unsigned long mmap_size;
13140
13141 struct page **ring_pages;
13142 spinlock_t ring_lock;
13143 long nr_pages;
13144
13145 unsigned nr, tail;
13146
13147 struct page *internal_pages[8];
13148 };
13149
13150 struct kioctx {
13151 atomic_t users;
13152 int dead;
13153 struct mm_struct *mm;
13154
13155
13156 unsigned long user_id;
13157 struct kioctx *next;
13158
13159 wait_queue_head_t wait;
13160
13161 spinlock_t ctx_lock;
13162
13163 int reqs_active;
13164 struct list_head active_reqs;
13165 struct list_head run_list;
13166
13167
13168 unsigned max_reqs;
13169
13170 struct aio_ring_info ring_info;
13171
13172 struct delayed_work wq;
13173 };
13174
13175
13176 extern unsigned aio_max_size;
13177 # 215 "include/linux/aio.h"
13178 static inline __attribute__((always_inline)) ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
13179 static inline __attribute__((always_inline)) int aio_put_req(struct kiocb *iocb) { return 0; }
13180 static inline __attribute__((always_inline)) void kick_iocb(struct kiocb *iocb) { }
13181 static inline __attribute__((always_inline)) int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; }
13182 struct mm_struct;
13183 static inline __attribute__((always_inline)) void exit_aio(struct mm_struct *mm) { }
13184
13185
13186
13187
13188
13189
13190 static inline __attribute__((always_inline)) struct kiocb *list_kiocb(struct list_head *h)
13191 {
13192 return ({ const typeof( ((struct kiocb *)0)->ki_list ) *__mptr = (h); (struct kiocb *)( (char *)__mptr - __builtin_offsetof(struct kiocb,ki_list) );});
13193 }
13194
13195
13196 extern unsigned long aio_nr;
13197 extern unsigned long aio_max_nr;
13198 # 344 "include/linux/sched.h" 2
13199
13200 extern unsigned long
13201 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
13202 unsigned long, unsigned long);
13203 extern unsigned long
13204 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
13205 unsigned long len, unsigned long pgoff,
13206 unsigned long flags);
13207 extern void arch_unmap_area(struct mm_struct *, unsigned long);
13208 extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
13209 # 391 "include/linux/sched.h"
13210 extern void set_dumpable(struct mm_struct *mm, int value);
13211 extern int get_dumpable(struct mm_struct *mm);
13212 # 422 "include/linux/sched.h"
13213 struct sighand_struct {
13214 atomic_t count;
13215 struct k_sigaction action[64];
13216 spinlock_t siglock;
13217 wait_queue_head_t signalfd_wqh;
13218 };
13219
13220 struct pacct_struct {
13221 int ac_flag;
13222 long ac_exitcode;
13223 unsigned long ac_mem;
13224 cputime_t ac_utime, ac_stime;
13225 unsigned long ac_minflt, ac_majflt;
13226 };
13227 # 448 "include/linux/sched.h"
13228 struct task_cputime {
13229 cputime_t utime;
13230 cputime_t stime;
13231 unsigned long long sum_exec_runtime;
13232 };
13233 # 466 "include/linux/sched.h"
13234 struct thread_group_cputime {
13235 struct task_cputime *totals;
13236 };
13237 # 477 "include/linux/sched.h"
13238 struct signal_struct {
13239 atomic_t count;
13240 atomic_t live;
13241
13242 wait_queue_head_t wait_chldexit;
13243
13244
13245 struct task_struct *curr_target;
13246
13247
13248 struct sigpending shared_pending;
13249
13250
13251 int group_exit_code;
13252
13253
13254
13255
13256
13257 int notify_count;
13258 struct task_struct *group_exit_task;
13259
13260
13261 int group_stop_count;
13262 unsigned int flags;
13263
13264
13265 struct list_head posix_timers;
13266
13267
13268 struct hrtimer real_timer;
13269 struct pid *leader_pid;
13270 ktime_t it_real_incr;
13271
13272
13273 cputime_t it_prof_expires, it_virt_expires;
13274 cputime_t it_prof_incr, it_virt_incr;
13275
13276
13277
13278
13279
13280 struct thread_group_cputime cputime;
13281
13282
13283 struct task_cputime cputime_expires;
13284
13285 struct list_head cpu_timers[3];
13286 # 533 "include/linux/sched.h"
13287 union {
13288 pid_t pgrp __attribute__((deprecated));
13289 pid_t __pgrp;
13290 };
13291
13292 struct pid *tty_old_pgrp;
13293
13294 union {
13295 pid_t session __attribute__((deprecated));
13296 pid_t __session;
13297 };
13298
13299
13300 int leader;
13301
13302 struct tty_struct *tty;
13303
13304
13305
13306
13307
13308
13309
13310 cputime_t cutime, cstime;
13311 cputime_t gtime;
13312 cputime_t cgtime;
13313 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
13314 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
13315 unsigned long inblock, oublock, cinblock, coublock;
13316 struct task_io_accounting ioac;
13317 # 573 "include/linux/sched.h"
13318 struct rlimit rlim[16];
13319 # 582 "include/linux/sched.h"
13320 struct pacct_struct pacct;
13321 # 591 "include/linux/sched.h"
13322 };
13323 # 615 "include/linux/sched.h"
13324 static inline __attribute__((always_inline)) int signal_group_exit(const struct signal_struct *sig)
13325 {
13326 return (sig->flags & 0x00000008) ||
13327 (sig->group_exit_task != ((void *)0));
13328 }
13329
13330
13331
13332
13333 struct user_struct {
13334 atomic_t __count;
13335 atomic_t processes;
13336 atomic_t files;
13337 atomic_t sigpending;
13338
13339 atomic_t inotify_watches;
13340 atomic_t inotify_devs;
13341
13342
13343 atomic_t epoll_devs;
13344 atomic_t epoll_watches;
13345
13346
13347
13348
13349
13350 unsigned long locked_shm;
13351
13352
13353
13354
13355
13356
13357
13358 struct hlist_node uidhash_node;
13359 uid_t uid;
13360 # 659 "include/linux/sched.h"
13361 };
13362
13363 extern int uids_sysfs_init(void);
13364
13365 extern struct user_struct *find_user(uid_t);
13366
13367 extern struct user_struct root_user;
13368
13369
13370 struct backing_dev_info;
13371 struct reclaim_state;
13372
13373
13374 struct sched_info {
13375
13376 unsigned long pcount;
13377 unsigned long long cpu_time,
13378 run_delay;
13379
13380
13381 unsigned long long last_arrival,
13382 last_queued;
13383
13384
13385 unsigned int bkl_count;
13386
13387 };
13388 # 722 "include/linux/sched.h"
13389 static inline __attribute__((always_inline)) int sched_info_on(void)
13390 {
13391
13392 return 1;
13393
13394
13395
13396
13397
13398
13399 }
13400
13401 enum cpu_idle_type {
13402 CPU_IDLE,
13403 CPU_NOT_IDLE,
13404 CPU_NEWLY_IDLE,
13405 CPU_MAX_IDLE_TYPES
13406 };
13407 # 881 "include/linux/sched.h"
13408 struct sched_domain_attr;
13409
13410 static inline __attribute__((always_inline)) void
13411 partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
13412 struct sched_domain_attr *dattr_new)
13413 {
13414 }
13415
13416
13417 struct io_context;
13418
13419
13420 struct group_info {
13421 int ngroups;
13422 atomic_t usage;
13423 gid_t small_block[32];
13424 int nblocks;
13425 gid_t *blocks[0];
13426 };
13427 # 916 "include/linux/sched.h"
13428 extern struct group_info *groups_alloc(int gidsetsize);
13429 extern void groups_free(struct group_info *group_info);
13430 extern int set_current_groups(struct group_info *group_info);
13431 extern int groups_search(struct group_info *group_info, gid_t grp);
13432
13433
13434
13435
13436
13437
13438
13439 static inline __attribute__((always_inline)) void prefetch_stack(struct task_struct *t) { }
13440
13441
13442 struct audit_context;
13443 struct mempolicy;
13444 struct pipe_inode_info;
13445 struct uts_namespace;
13446
13447 struct rq;
13448 struct sched_domain;
13449
13450 struct sched_class {
13451 const struct sched_class *next;
13452
13453 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
13454 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
13455 void (*yield_task) (struct rq *rq);
13456
13457 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
13458
13459 struct task_struct * (*pick_next_task) (struct rq *rq);
13460 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
13461 # 972 "include/linux/sched.h"
13462 void (*set_curr_task) (struct rq *rq);
13463 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
13464 void (*task_new) (struct rq *rq, struct task_struct *p);
13465
13466 void (*switched_from) (struct rq *this_rq, struct task_struct *task,
13467 int running);
13468 void (*switched_to) (struct rq *this_rq, struct task_struct *task,
13469 int running);
13470 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
13471 int oldprio, int running);
13472
13473
13474
13475
13476 };
13477
13478 struct load_weight {
13479 unsigned long weight, inv_weight;
13480 };
13481 # 1002 "include/linux/sched.h"
13482 struct sched_entity {
13483 struct load_weight load;
13484 struct rb_node run_node;
13485 struct list_head group_node;
13486 unsigned int on_rq;
13487
13488 u64 exec_start;
13489 u64 sum_exec_runtime;
13490 u64 vruntime;
13491 u64 prev_sum_exec_runtime;
13492
13493 u64 last_wakeup;
13494 u64 avg_overlap;
13495
13496
13497 u64 wait_start;
13498 u64 wait_max;
13499 u64 wait_count;
13500 u64 wait_sum;
13501
13502 u64 sleep_start;
13503 u64 sleep_max;
13504 s64 sum_sleep_runtime;
13505
13506 u64 block_start;
13507 u64 block_max;
13508 u64 exec_max;
13509 u64 slice_max;
13510
13511 u64 nr_migrations;
13512 u64 nr_migrations_cold;
13513 u64 nr_failed_migrations_affine;
13514 u64 nr_failed_migrations_running;
13515 u64 nr_failed_migrations_hot;
13516 u64 nr_forced_migrations;
13517 u64 nr_forced2_migrations;
13518
13519 u64 nr_wakeups;
13520 u64 nr_wakeups_sync;
13521 u64 nr_wakeups_migrate;
13522 u64 nr_wakeups_local;
13523 u64 nr_wakeups_remote;
13524 u64 nr_wakeups_affine;
13525 u64 nr_wakeups_affine_attempts;
13526 u64 nr_wakeups_passive;
13527 u64 nr_wakeups_idle;
13528 # 1057 "include/linux/sched.h"
13529 };
13530
13531 struct sched_rt_entity {
13532 struct list_head run_list;
13533 unsigned long timeout;
13534 unsigned int time_slice;
13535 int nr_cpus_allowed;
13536
13537 struct sched_rt_entity *back;
13538
13539
13540
13541
13542
13543
13544
13545 };
13546
13547 struct task_struct {
13548 volatile long state;
13549 void *stack;
13550 atomic_t usage;
13551 unsigned int flags;
13552 unsigned int ptrace;
13553
13554 int lock_depth;
13555
13556
13557
13558
13559
13560
13561
13562 int prio, static_prio, normal_prio;
13563 unsigned int rt_priority;
13564 const struct sched_class *sched_class;
13565 struct sched_entity se;
13566 struct sched_rt_entity rt;
13567 # 1109 "include/linux/sched.h"
13568 unsigned char fpu_counter;
13569 s8 oomkilladj;
13570
13571
13572
13573
13574 unsigned int policy;
13575 cpumask_t cpus_allowed;
13576
13577
13578
13579
13580
13581
13582
13583 struct sched_info sched_info;
13584
13585
13586 struct list_head tasks;
13587
13588 struct mm_struct *mm, *active_mm;
13589
13590
13591 struct linux_binfmt *binfmt;
13592 int exit_state;
13593 int exit_code, exit_signal;
13594 int pdeath_signal;
13595
13596 unsigned int personality;
13597 unsigned did_exec:1;
13598 pid_t pid;
13599 pid_t tgid;
13600 # 1151 "include/linux/sched.h"
13601 struct task_struct *real_parent;
13602 struct task_struct *parent;
13603
13604
13605
13606 struct list_head children;
13607 struct list_head sibling;
13608 struct task_struct *group_leader;
13609
13610
13611
13612
13613
13614
13615 struct list_head ptraced;
13616 struct list_head ptrace_entry;
13617
13618
13619 struct pid_link pids[PIDTYPE_MAX];
13620 struct list_head thread_group;
13621
13622 struct completion *vfork_done;
13623 int *set_child_tid;
13624 int *clear_child_tid;
13625
13626 cputime_t utime, stime, utimescaled, stimescaled;
13627 cputime_t gtime;
13628 cputime_t prev_utime, prev_stime;
13629 unsigned long nvcsw, nivcsw;
13630 struct timespec start_time;
13631 struct timespec real_start_time;
13632
13633 unsigned long min_flt, maj_flt;
13634
13635 struct task_cputime cputime_expires;
13636 struct list_head cpu_timers[3];
13637
13638
13639 uid_t uid,euid,suid,fsuid;
13640 gid_t gid,egid,sgid,fsgid;
13641 struct group_info *group_info;
13642 kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
13643 struct user_struct *user;
13644 unsigned securebits;
13645
13646
13647
13648
13649
13650 char comm[16];
13651
13652
13653
13654
13655 int link_count, total_link_count;
13656
13657
13658 struct sysv_sem sysvsem;
13659
13660
13661
13662
13663
13664
13665
13666 struct thread_struct thread;
13667
13668 struct fs_struct *fs;
13669
13670 struct files_struct *files;
13671
13672 struct nsproxy *nsproxy;
13673
13674 struct signal_struct *signal;
13675 struct sighand_struct *sighand;
13676
13677 sigset_t blocked, real_blocked;
13678 sigset_t saved_sigmask;
13679 struct sigpending pending;
13680
13681 unsigned long sas_ss_sp;
13682 size_t sas_ss_size;
13683 int (*notifier)(void *priv);
13684 void *notifier_data;
13685 sigset_t *notifier_mask;
13686
13687
13688
13689 struct audit_context *audit_context;
13690
13691
13692
13693
13694 seccomp_t seccomp;
13695
13696
13697 u32 parent_exec_id;
13698 u32 self_exec_id;
13699
13700 spinlock_t alloc_lock;
13701
13702
13703 spinlock_t pi_lock;
13704
13705
13706
13707 struct plist_head pi_waiters;
13708
13709 struct rt_mutex_waiter *pi_blocked_on;
13710
13711
13712
13713
13714 struct mutex_waiter *blocked_on;
13715 # 1290 "include/linux/sched.h"
13716 void *journal_info;
13717
13718
13719 struct bio *bio_list, **bio_tail;
13720
13721
13722 struct reclaim_state *reclaim_state;
13723
13724 struct backing_dev_info *backing_dev_info;
13725
13726 struct io_context *io_context;
13727
13728 unsigned long ptrace_message;
13729 siginfo_t *last_siginfo;
13730 struct task_io_accounting ioac;
13731 # 1317 "include/linux/sched.h"
13732 struct css_set *cgroups;
13733
13734 struct list_head cg_list;
13735
13736
13737 struct robust_list_head *robust_list;
13738
13739
13740
13741 struct list_head pi_state_list;
13742 struct futex_pi_state *pi_state_cache;
13743
13744
13745
13746
13747
13748 atomic_t fs_excl;
13749 struct rcu_head rcu;
13750
13751
13752
13753
13754 struct pipe_inode_info *splice_pipe;
13755
13756
13757
13758
13759 int make_it_fail;
13760
13761 struct prop_local_single dirties;
13762 # 1355 "include/linux/sched.h"
13763 unsigned long timer_slack_ns;
13764 unsigned long default_timer_slack_ns;
13765
13766 struct list_head *scm_work_list;
13767 };
13768 # 1380 "include/linux/sched.h"
13769 static inline __attribute__((always_inline)) int rt_prio(int prio)
13770 {
13771 if (__builtin_expect(!!(prio < 100), 0))
13772 return 1;
13773 return 0;
13774 }
13775
13776 static inline __attribute__((always_inline)) int rt_task(struct task_struct *p)
13777 {
13778 return rt_prio(p->prio);
13779 }
13780
13781 static inline __attribute__((always_inline)) void set_task_session(struct task_struct *tsk, pid_t session)
13782 {
13783 tsk->signal->__session = session;
13784 }
13785
13786 static inline __attribute__((always_inline)) void set_task_pgrp(struct task_struct *tsk, pid_t pgrp)
13787 {
13788 tsk->signal->__pgrp = pgrp;
13789 }
13790
13791 static inline __attribute__((always_inline)) struct pid *task_pid(struct task_struct *task)
13792 {
13793 return task->pids[PIDTYPE_PID].pid;
13794 }
13795
13796 static inline __attribute__((always_inline)) struct pid *task_tgid(struct task_struct *task)
13797 {
13798 return task->group_leader->pids[PIDTYPE_PID].pid;
13799 }
13800
13801 static inline __attribute__((always_inline)) struct pid *task_pgrp(struct task_struct *task)
13802 {
13803 return task->group_leader->pids[PIDTYPE_PGID].pid;
13804 }
13805
13806 static inline __attribute__((always_inline)) struct pid *task_session(struct task_struct *task)
13807 {
13808 return task->group_leader->pids[PIDTYPE_SID].pid;
13809 }
13810
13811 struct pid_namespace;
13812 # 1438 "include/linux/sched.h"
13813 static inline __attribute__((always_inline)) pid_t task_pid_nr(struct task_struct *tsk)
13814 {
13815 return tsk->pid;
13816 }
13817
13818 pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
13819
13820 static inline __attribute__((always_inline)) pid_t task_pid_vnr(struct task_struct *tsk)
13821 {
13822 return pid_vnr(task_pid(tsk));
13823 }
13824
13825
13826 static inline __attribute__((always_inline)) pid_t task_tgid_nr(struct task_struct *tsk)
13827 {
13828 return tsk->tgid;
13829 }
13830
13831 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
13832
13833 static inline __attribute__((always_inline)) pid_t task_tgid_vnr(struct task_struct *tsk)
13834 {
13835 return pid_vnr(task_tgid(tsk));
13836 }
13837
13838
13839 static inline __attribute__((always_inline)) pid_t task_pgrp_nr(struct task_struct *tsk)
13840 {
13841 return tsk->signal->__pgrp;
13842 }
13843
13844 pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
13845
13846 static inline __attribute__((always_inline)) pid_t task_pgrp_vnr(struct task_struct *tsk)
13847 {
13848 return pid_vnr(task_pgrp(tsk));
13849 }
13850
13851
13852 static inline __attribute__((always_inline)) pid_t task_session_nr(struct task_struct *tsk)
13853 {
13854 return tsk->signal->__session;
13855 }
13856
13857 pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
13858
13859 static inline __attribute__((always_inline)) pid_t task_session_vnr(struct task_struct *tsk)
13860 {
13861 return pid_vnr(task_session(tsk));
13862 }
13863 # 1498 "include/linux/sched.h"
13864 static inline __attribute__((always_inline)) int pid_alive(struct task_struct *p)
13865 {
13866 return p->pids[PIDTYPE_PID].pid != ((void *)0);
13867 }
13868
13869
13870
13871
13872
13873
13874
13875 static inline __attribute__((always_inline)) int is_global_init(struct task_struct *tsk)
13876 {
13877 return tsk->pid == 1;
13878 }
13879
13880
13881
13882
13883
13884 extern int is_container_init(struct task_struct *tsk);
13885
13886 extern struct pid *cad_pid;
13887
13888 extern void free_task(struct task_struct *tsk);
13889
13890
13891 extern void __put_task_struct(struct task_struct *t);
13892
13893 static inline __attribute__((always_inline)) void put_task_struct(struct task_struct *t)
13894 {
13895 if ((atomic_sub_return(1, (&t->usage)) == 0))
13896 __put_task_struct(t);
13897 }
13898
13899 extern cputime_t task_utime(struct task_struct *p);
13900 extern cputime_t task_stime(struct task_struct *p);
13901 extern cputime_t task_gtime(struct task_struct *p);
13902 # 1599 "include/linux/sched.h"
13903 static inline __attribute__((always_inline)) int set_cpus_allowed_ptr(struct task_struct *p,
13904 const cpumask_t *new_mask)
13905 {
13906 if (!test_bit((0), (*new_mask).bits))
13907 return -22;
13908 return 0;
13909 }
13910
13911 static inline __attribute__((always_inline)) int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
13912 {
13913 return set_cpus_allowed_ptr(p, &new_mask);
13914 }
13915
13916 extern unsigned long long sched_clock(void);
13917
13918 extern void sched_clock_init(void);
13919 extern u64 sched_clock_cpu(int cpu);
13920
13921
13922 static inline __attribute__((always_inline)) void sched_clock_tick(void)
13923 {
13924 }
13925
13926 static inline __attribute__((always_inline)) void sched_clock_idle_sleep_event(void)
13927 {
13928 }
13929
13930 static inline __attribute__((always_inline)) void sched_clock_idle_wakeup_event(u64 delta_ns)
13931 {
13932 }
13933 # 1639 "include/linux/sched.h"
13934 extern unsigned long long cpu_clock(int cpu);
13935
13936 extern unsigned long long
13937 task_sched_runtime(struct task_struct *task);
13938 extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
13939 # 1652 "include/linux/sched.h"
13940 extern void sched_clock_idle_sleep_event(void);
13941 extern void sched_clock_idle_wakeup_event(u64 delta_ns);
13942
13943
13944
13945
13946 static inline __attribute__((always_inline)) void idle_task_exit(void) {}
13947
13948
13949 extern void sched_idle_next(void);
13950
13951
13952
13953
13954 static inline __attribute__((always_inline)) void wake_up_idle_cpu(int cpu) { }
13955
13956
13957
13958 extern unsigned int sysctl_sched_latency;
13959 extern unsigned int sysctl_sched_min_granularity;
13960 extern unsigned int sysctl_sched_wakeup_granularity;
13961 extern unsigned int sysctl_sched_child_runs_first;
13962 extern unsigned int sysctl_sched_features;
13963 extern unsigned int sysctl_sched_migration_cost;
13964 extern unsigned int sysctl_sched_nr_migrate;
13965 extern unsigned int sysctl_sched_shares_ratelimit;
13966 extern unsigned int sysctl_sched_shares_thresh;
13967
13968 int sched_nr_latency_handler(struct ctl_table *table, int write,
13969 struct file *file, void *buffer, size_t *length,
13970 loff_t *ppos);
13971
13972 extern unsigned int sysctl_sched_rt_period;
13973 extern int sysctl_sched_rt_runtime;
13974
13975 int sched_rt_handler(struct ctl_table *table, int write,
13976 struct file *filp, void *buffer, size_t *lenp,
13977 loff_t *ppos);
13978
13979 extern unsigned int sysctl_sched_compat_yield;
13980
13981
13982 extern int rt_mutex_getprio(struct task_struct *p);
13983 extern void rt_mutex_setprio(struct task_struct *p, int prio);
13984 extern void rt_mutex_adjust_pi(struct task_struct *p);
13985 # 1705 "include/linux/sched.h"
13986 extern void set_user_nice(struct task_struct *p, long nice);
13987 extern int task_prio(const struct task_struct *p);
13988 extern int task_nice(const struct task_struct *p);
13989 extern int can_nice(const struct task_struct *p, const int nice);
13990 extern int task_curr(const struct task_struct *p);
13991 extern int idle_cpu(int cpu);
13992 extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
13993 extern int sched_setscheduler_nocheck(struct task_struct *, int,
13994 struct sched_param *);
13995 extern struct task_struct *idle_task(int cpu);
13996 extern struct task_struct *curr_task(int cpu);
13997 extern void set_curr_task(int cpu, struct task_struct *p);
13998
13999 void yield(void);
14000
14001
14002
14003
14004 extern struct exec_domain default_exec_domain;
14005
14006 union thread_union {
14007 struct thread_info thread_info;
14008 unsigned long stack[8192/sizeof(long)];
14009 };
14010
14011
14012 static inline __attribute__((always_inline)) int kstack_end(void *addr)
14013 {
14014
14015
14016
14017 return !(((unsigned long)addr+sizeof(void*)-1) & (8192 -sizeof(void*)));
14018 }
14019
14020
14021 extern union thread_union init_thread_union;
14022 extern struct task_struct init_task;
14023
14024 extern struct mm_struct init_mm;
14025
14026 extern struct pid_namespace init_pid_ns;
14027 # 1761 "include/linux/sched.h"
14028 extern struct task_struct *find_task_by_pid_type_ns(int type, int pid,
14029 struct pid_namespace *ns);
14030
14031 extern struct task_struct *find_task_by_vpid(pid_t nr);
14032 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
14033 struct pid_namespace *ns);
14034
14035 extern void __set_special_pids(struct pid *pid);
14036
14037
14038 extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
14039 static inline __attribute__((always_inline)) struct user_struct *get_uid(struct user_struct *u)
14040 {
14041 atomic_inc(&u->__count);
14042 return u;
14043 }
14044 extern void free_uid(struct user_struct *);
14045 extern void switch_uid(struct user_struct *);
14046 extern void release_uids(struct user_namespace *ns);
14047
14048
14049
14050 extern void do_timer(unsigned long ticks);
14051
14052 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
14053 extern int wake_up_process(struct task_struct *tsk);
14054 extern void wake_up_new_task(struct task_struct *tsk,
14055 unsigned long clone_flags);
14056
14057
14058
14059 static inline __attribute__((always_inline)) void kick_process(struct task_struct *tsk) { }
14060
14061 extern void sched_fork(struct task_struct *p, int clone_flags);
14062 extern void sched_dead(struct task_struct *p);
14063
14064 extern int in_group_p(gid_t);
14065 extern int in_egroup_p(gid_t);
14066
14067 extern void proc_caches_init(void);
14068 extern void flush_signals(struct task_struct *);
14069 extern void ignore_signals(struct task_struct *);
14070 extern void flush_signal_handlers(struct task_struct *, int force_default);
14071 extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
14072
14073 static inline __attribute__((always_inline)) int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
14074 {
14075 unsigned long flags;
14076 int ret;
14077
14078 do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _spin_lock_irqsave(&tsk->sighand->siglock); } while (0);
14079 ret = dequeue_signal(tsk, mask, info);
14080 do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _spin_unlock_irqrestore(&tsk->sighand->siglock, flags); } while (0);
14081
14082 return ret;
14083 }
14084
14085 extern void block_all_signals(int (*notifier)(void *priv), void *priv,
14086 sigset_t *mask);
14087 extern void unblock_all_signals(void);
14088 extern void release_task(struct task_struct * p);
14089 extern int send_sig_info(int, struct siginfo *, struct task_struct *);
14090 extern int force_sigsegv(int, struct task_struct *);
14091 extern int force_sig_info(int, struct siginfo *, struct task_struct *);
14092 extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
14093 extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
14094 extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
14095 extern int kill_pgrp(struct pid *pid, int sig, int priv);
14096 extern int kill_pid(struct pid *pid, int sig, int priv);
14097 extern int kill_proc_info(int, struct siginfo *, pid_t);
14098 extern int do_notify_parent(struct task_struct *, int);
14099 extern void force_sig(int, struct task_struct *);
14100 extern void force_sig_specific(int, struct task_struct *);
14101 extern int send_sig(int, struct task_struct *, int);
14102 extern void zap_other_threads(struct task_struct *p);
14103 extern struct sigqueue *sigqueue_alloc(void);
14104 extern void sigqueue_free(struct sigqueue *);
14105 extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
14106 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
14107 extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long);
14108
14109 static inline __attribute__((always_inline)) int kill_cad_pid(int sig, int priv)
14110 {
14111 return kill_pid(cad_pid, sig, priv);
14112 }
14113
14114
14115
14116
14117
14118
14119 static inline __attribute__((always_inline)) int is_si_special(const struct siginfo *info)
14120 {
14121 return info <= ((struct siginfo *) 2);
14122 }
14123
14124
14125
14126 static inline __attribute__((always_inline)) int on_sig_stack(unsigned long sp)
14127 {
14128 return (sp - (get_current())->sas_ss_sp < (get_current())->sas_ss_size);
14129 }
14130
14131 static inline __attribute__((always_inline)) int sas_ss_flags(unsigned long sp)
14132 {
14133 return ((get_current())->sas_ss_size == 0 ? 2
14134 : on_sig_stack(sp) ? 1 : 0);
14135 }
14136
14137
14138
14139
14140 extern struct mm_struct * mm_alloc(void);
14141
14142
14143 extern void __mmdrop(struct mm_struct *);
14144 static inline __attribute__((always_inline)) void mmdrop(struct mm_struct * mm)
14145 {
14146 if (__builtin_expect(!!((atomic_sub_return(1, (&mm->mm_count)) == 0)), 0))
14147 __mmdrop(mm);
14148 }
14149
14150
14151 extern void mmput(struct mm_struct *);
14152
14153 extern struct mm_struct *get_task_mm(struct task_struct *task);
14154
14155 extern void mm_release(struct task_struct *, struct mm_struct *);
14156
14157 extern struct mm_struct *dup_mm(struct task_struct *tsk);
14158
14159 extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
14160 extern void flush_thread(void);
14161 extern void exit_thread(void);
14162
14163 extern void exit_files(struct task_struct *);
14164 extern void __cleanup_signal(struct signal_struct *);
14165 extern void __cleanup_sighand(struct sighand_struct *);
14166
14167 extern void exit_itimers(struct signal_struct *);
14168 extern void flush_itimer_signals(void);
14169
14170 extern void do_group_exit(int);
14171
14172 extern void daemonize(const char *, ...);
14173 extern int allow_signal(int);
14174 extern int disallow_signal(int);
14175
14176 extern int do_execve(char *, char * *, char * *, struct pt_regs *);
14177 extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int *, int *);
14178 struct task_struct *fork_idle(int);
14179
14180 extern void set_task_comm(struct task_struct *tsk, char *from);
14181 extern char *get_task_comm(char *to, struct task_struct *tsk);
14182
14183
14184
14185
14186 static inline __attribute__((always_inline)) unsigned long wait_task_inactive(struct task_struct *p,
14187 long match_state)
14188 {
14189 return 1;
14190 }
14191 # 1950 "include/linux/sched.h"
14192 static inline __attribute__((always_inline)) int has_group_leader_pid(struct task_struct *p)
14193 {
14194 return p->pid == p->tgid;
14195 }
14196
14197 static inline __attribute__((always_inline))
14198 int same_thread_group(struct task_struct *p1, struct task_struct *p2)
14199 {
14200 return p1->tgid == p2->tgid;
14201 }
14202
14203 static inline __attribute__((always_inline)) struct task_struct *next_thread(const struct task_struct *p)
14204 {
14205 return ({ const typeof( ((struct task_struct *)0)->thread_group ) *__mptr = (({ typeof(p->thread_group.next) _________p1 = (*(volatile typeof(p->thread_group.next) *)&(p->thread_group.next)); do { } while(0); (_________p1); })); (struct task_struct *)( (char *)__mptr - __builtin_offsetof(struct task_struct,thread_group) );});
14206
14207 }
14208
14209 static inline __attribute__((always_inline)) int thread_group_empty(struct task_struct *p)
14210 {
14211 return list_empty(&p->thread_group);
14212 }
14213 # 1985 "include/linux/sched.h"
14214 static inline __attribute__((always_inline)) void task_lock(struct task_struct *p)
14215 {
14216 _spin_lock(&p->alloc_lock);
14217 }
14218
14219 static inline __attribute__((always_inline)) void task_unlock(struct task_struct *p)
14220 {
14221 _spin_unlock(&p->alloc_lock);
14222 }
14223
14224 extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
14225 unsigned long *flags);
14226
14227 static inline __attribute__((always_inline)) void unlock_task_sighand(struct task_struct *tsk,
14228 unsigned long *flags)
14229 {
14230 do { ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); } while (0);
14231 }
14232
14233
14234
14235
14236
14237
14238 static inline __attribute__((always_inline)) void setup_thread_stack(struct task_struct *p, struct task_struct *org)
14239 {
14240 *((struct thread_info *)(p)->stack) = *((struct thread_info *)(org)->stack);
14241 ((struct thread_info *)(p)->stack)->task = p;
14242 }
14243
14244 static inline __attribute__((always_inline)) unsigned long *end_of_stack(struct task_struct *p)
14245 {
14246 return (unsigned long *)(((struct thread_info *)(p)->stack) + 1);
14247 }
14248
14249
14250
14251 static inline __attribute__((always_inline)) int object_is_on_stack(void *obj)
14252 {
14253 void *stack = (((get_current()))->stack);
14254
14255 return (obj >= stack) && (obj < (stack + 8192));
14256 }
14257
14258 extern void thread_info_cache_init(void);
14259
14260
14261
14262
14263 static inline __attribute__((always_inline)) void set_tsk_thread_flag(struct task_struct *tsk, int flag)
14264 {
14265 set_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
14266 }
14267
14268 static inline __attribute__((always_inline)) void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
14269 {
14270 clear_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
14271 }
14272
14273 static inline __attribute__((always_inline)) int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
14274 {
14275 return test_and_set_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
14276 }
14277
14278 static inline __attribute__((always_inline)) int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
14279 {
14280 return test_and_clear_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
14281 }
14282
14283 static inline __attribute__((always_inline)) int test_tsk_thread_flag(struct task_struct *tsk, int flag)
14284 {
14285 return test_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
14286 }
14287
14288 static inline __attribute__((always_inline)) void set_tsk_need_resched(struct task_struct *tsk)
14289 {
14290 set_tsk_thread_flag(tsk,2);
14291 }
14292
14293 static inline __attribute__((always_inline)) void clear_tsk_need_resched(struct task_struct *tsk)
14294 {
14295 clear_tsk_thread_flag(tsk,2);
14296 }
14297
14298 static inline __attribute__((always_inline)) int test_tsk_need_resched(struct task_struct *tsk)
14299 {
14300 return __builtin_expect(!!(test_tsk_thread_flag(tsk,2)), 0);
14301 }
14302
14303 static inline __attribute__((always_inline)) int signal_pending(struct task_struct *p)
14304 {
14305 return __builtin_expect(!!(test_tsk_thread_flag(p,1)), 0);
14306 }
14307
14308 extern int __fatal_signal_pending(struct task_struct *p);
14309
14310 static inline __attribute__((always_inline)) int fatal_signal_pending(struct task_struct *p)
14311 {
14312 return signal_pending(p) && __fatal_signal_pending(p);
14313 }
14314
14315 static inline __attribute__((always_inline)) int signal_pending_state(long state, struct task_struct *p)
14316 {
14317 if (!(state & (1 | 128)))
14318 return 0;
14319 if (!signal_pending(p))
14320 return 0;
14321
14322 return (state & 1) || __fatal_signal_pending(p);
14323 }
14324
14325 static inline __attribute__((always_inline)) int need_resched(void)
14326 {
14327 return __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 2)), 0);
14328 }
14329 # 2108 "include/linux/sched.h"
14330 extern int _cond_resched(void);
14331
14332
14333
14334
14335
14336
14337 static inline __attribute__((always_inline)) int cond_resched(void)
14338 {
14339 return _cond_resched();
14340 }
14341
14342 extern int cond_resched_lock(spinlock_t * lock);
14343 extern int cond_resched_softirq(void);
14344 static inline __attribute__((always_inline)) int cond_resched_bkl(void)
14345 {
14346 return _cond_resched();
14347 }
14348
14349
14350
14351
14352
14353
14354 static inline __attribute__((always_inline)) int spin_needbreak(spinlock_t *lock)
14355 {
14356
14357
14358
14359 return 0;
14360
14361 }
14362
14363
14364
14365
14366
14367 extern int thread_group_cputime_alloc(struct task_struct *);
14368 extern void thread_group_cputime(struct task_struct *, struct task_cputime *);
14369
14370 static inline __attribute__((always_inline)) void thread_group_cputime_init(struct signal_struct *sig)
14371 {
14372 sig->cputime.totals = ((void *)0);
14373 }
14374
14375 static inline __attribute__((always_inline)) int thread_group_cputime_clone_thread(struct task_struct *curr)
14376 {
14377 if (curr->signal->cputime.totals)
14378 return 0;
14379 return thread_group_cputime_alloc(curr);
14380 }
14381
14382 static inline __attribute__((always_inline)) void thread_group_cputime_free(struct signal_struct *sig)
14383 {
14384 percpu_free((sig->cputime.totals));
14385 }
14386
14387
14388
14389
14390
14391
14392
14393 extern void recalc_sigpending_and_wake(struct task_struct *t);
14394 extern void recalc_sigpending(void);
14395
14396 extern void signal_wake_up(struct task_struct *t, int resume_stopped);
14397 # 2190 "include/linux/sched.h"
14398 static inline __attribute__((always_inline)) unsigned int task_cpu(const struct task_struct *p)
14399 {
14400 return 0;
14401 }
14402
14403 static inline __attribute__((always_inline)) void set_task_cpu(struct task_struct *p, unsigned int cpu)
14404 {
14405 }
14406
14407
14408
14409 extern void arch_pick_mmap_layout(struct mm_struct *mm);
14410
14411
14412 extern void
14413 __trace_special(void *__tr, void *__data,
14414 unsigned long arg1, unsigned long arg2, unsigned long arg3);
14415 # 2215 "include/linux/sched.h"
14416 extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
14417 extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
14418
14419 extern int sched_mc_power_savings, sched_smt_power_savings;
14420
14421 extern void normalize_rt_tasks(void);
14422 # 2267 "include/linux/sched.h"
14423 static inline __attribute__((always_inline)) void add_rchar(struct task_struct *tsk, ssize_t amt)
14424 {
14425 }
14426
14427 static inline __attribute__((always_inline)) void add_wchar(struct task_struct *tsk, ssize_t amt)
14428 {
14429 }
14430
14431 static inline __attribute__((always_inline)) void inc_syscr(struct task_struct *tsk)
14432 {
14433 }
14434
14435 static inline __attribute__((always_inline)) void inc_syscw(struct task_struct *tsk)
14436 {
14437 }
14438 # 2292 "include/linux/sched.h"
14439 static inline __attribute__((always_inline)) void mm_update_next_owner(struct mm_struct *mm)
14440 {
14441 }
14442
14443 static inline __attribute__((always_inline)) void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
14444 {
14445 }
14446 # 13 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h" 2
14447 # 24 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h"
14448 static inline __attribute__((always_inline)) void set_fs(mm_segment_t fs)
14449 {
14450 current_thread_info()->addr_limit = fs;
14451 }
14452 # 36 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h"
14453 static inline __attribute__((always_inline)) int is_in_rom(unsigned long addr)
14454 {
14455 # 46 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h"
14456 if ((addr < _ramstart) || (addr >= _ramend))
14457 return (1);
14458
14459
14460 return (0);
14461 }
14462 # 60 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h"
14463 static inline __attribute__((always_inline)) int _access_ok(unsigned long addr, unsigned long size) { return 1; }
14464 # 82 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h"
14465 struct exception_table_entry {
14466 unsigned long insn, fixup;
14467 };
14468
14469
14470 extern unsigned long search_exception_table(unsigned long);
14471 # 129 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h"
14472 static inline __attribute__((always_inline)) int bad_user_access_length(void)
14473 {
14474 panic("bad_user_access_length");
14475 return -1;
14476 }
14477 # 200 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h"
14478 static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result))
14479 copy_from_user(void *to, const void *from, unsigned long n)
14480 {
14481 if (_access_ok((unsigned long)(from), (n)))
14482 memcpy(to, from, n);
14483 else
14484 return n;
14485 return 0;
14486 }
14487
14488 static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result))
14489 copy_to_user(void *to, const void *from, unsigned long n)
14490 {
14491 if (_access_ok((unsigned long)(to), (n)))
14492 memcpy(to, from, n);
14493 else
14494 return n;
14495 return 0;
14496 }
14497
14498
14499
14500
14501
14502 static inline __attribute__((always_inline)) long __attribute__((warn_unused_result))
14503 strncpy_from_user(char *dst, const char *src, long count)
14504 {
14505 char *tmp;
14506 if (!_access_ok((unsigned long)(src), (1)))
14507 return -14;
14508 strncpy(dst, src, count);
14509 for (tmp = dst; *tmp && count > 0; tmp++, count--) ;
14510 return (tmp - dst);
14511 }
14512
14513
14514
14515
14516
14517
14518 static inline __attribute__((always_inline)) long strnlen_user(const char *src, long n)
14519 {
14520 return (strlen(src) + 1);
14521 }
14522
14523
14524
14525
14526
14527
14528
14529 static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result))
14530 __clear_user(void *to, unsigned long n)
14531 {
14532 memset(to, 0, n);
14533 return 0;
14534 }
14535 # 6 "include/linux/uaccess.h" 2
14536 # 16 "include/linux/uaccess.h"
14537 static inline __attribute__((always_inline)) void pagefault_disable(void)
14538 {
14539 do { (current_thread_info()->preempt_count) += (1); } while (0);
14540
14541
14542
14543
14544 __asm__ __volatile__("": : :"memory");
14545 }
14546
14547 static inline __attribute__((always_inline)) void pagefault_enable(void)
14548 {
14549
14550
14551
14552
14553 __asm__ __volatile__("": : :"memory");
14554 do { (current_thread_info()->preempt_count) -= (1); } while (0);
14555
14556
14557
14558 __asm__ __volatile__("": : :"memory");
14559 do { } while (0);
14560 }
14561
14562
14563
14564 static inline __attribute__((always_inline)) unsigned long __copy_from_user_inatomic_nocache(void *to,
14565 const void *from, unsigned long n)
14566 {
14567 return copy_from_user(to, from, n);
14568 }
14569
14570 static inline __attribute__((always_inline)) unsigned long __copy_from_user_nocache(void *to,
14571 const void *from, unsigned long n)
14572 {
14573 return copy_from_user(to, from, n);
14574 }
14575 # 96 "include/linux/uaccess.h"
14576 extern long probe_kernel_read(void *dst, void *src, size_t size);
14577 # 107 "include/linux/uaccess.h"
14578 extern long probe_kernel_write(void *dst, void *src, size_t size);
14579 # 7 "include/linux/highmem.h" 2
14580
14581 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cacheflush.h" 1
14582 # 33 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cacheflush.h"
14583 extern void blackfin_icache_dcache_flush_range(unsigned long start_address, unsigned long end_address);
14584 extern void blackfin_icache_flush_range(unsigned long start_address, unsigned long end_address);
14585 extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address);
14586 extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address);
14587 extern void blackfin_dflush_page(void *page);
14588 extern void blackfin_invalidate_entire_dcache(void);
14589 # 55 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cacheflush.h"
14590 static inline __attribute__((always_inline)) void flush_icache_range(unsigned start, unsigned end)
14591 {
14592
14593
14594
14595
14596
14597
14598 blackfin_icache_dcache_flush_range((start), (end));
14599 # 77 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cacheflush.h"
14600 }
14601 # 100 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cacheflush.h"
14602 extern unsigned long reserved_mem_dcache_on;
14603 extern unsigned long reserved_mem_icache_on;
14604
14605 static inline __attribute__((always_inline)) int bfin_addr_dcachable(unsigned long addr)
14606 {
14607
14608 if (addr < (_ramend - (1024 * 1024)))
14609 return 1;
14610
14611
14612 if (reserved_mem_dcache_on &&
14613 addr >= _ramend && addr < physical_mem_end)
14614 return 1;
14615
14616 return 0;
14617 }
14618 # 9 "include/linux/highmem.h" 2
14619
14620
14621 static inline __attribute__((always_inline)) void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
14622 {
14623 }
14624
14625
14626
14627 static inline __attribute__((always_inline)) void flush_kernel_dcache_page(struct page *page)
14628 {
14629 }
14630 # 34 "include/linux/highmem.h"
14631 static inline __attribute__((always_inline)) unsigned int nr_free_highpages(void) { return 0; }
14632
14633
14634
14635
14636 static inline __attribute__((always_inline)) void *kmap(struct page *page)
14637 {
14638 do { do { } while (0); } while (0);
14639 return lowmem_page_address(page);
14640 }
14641
14642
14643
14644 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/kmap_types.h" 1
14645
14646
14647
14648 enum km_type {
14649 KM_BOUNCE_READ,
14650 KM_SKB_SUNRPC_DATA,
14651 KM_SKB_DATA_SOFTIRQ,
14652 KM_USER0,
14653 KM_USER1,
14654 KM_BIO_SRC_IRQ,
14655 KM_BIO_DST_IRQ,
14656 KM_PTE0,
14657 KM_PTE1,
14658 KM_IRQ0,
14659 KM_IRQ1,
14660 KM_SOFTIRQ0,
14661 KM_SOFTIRQ1,
14662 KM_TYPE_NR
14663 };
14664 # 48 "include/linux/highmem.h" 2
14665
14666 static inline __attribute__((always_inline)) void *kmap_atomic(struct page *page, enum km_type idx)
14667 {
14668 pagefault_disable();
14669 return lowmem_page_address(page);
14670 }
14671 # 67 "include/linux/highmem.h"
14672 static inline __attribute__((always_inline)) void clear_user_highpage(struct page *page, unsigned long vaddr)
14673 {
14674 void *addr = kmap_atomic(page, KM_USER0);
14675 memset((addr), 0, (1UL << 12));
14676 do { pagefault_enable(); } while (0);
14677 }
14678 # 90 "include/linux/highmem.h"
14679 static inline __attribute__((always_inline)) struct page *
14680 __alloc_zeroed_user_highpage(gfp_t movableflags,
14681 struct vm_area_struct *vma,
14682 unsigned long vaddr)
14683 {
14684 struct page *page = alloc_pages_node((((void)(0),0)), ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u) | (( gfp_t)0x20000u) | (( gfp_t)0x02u)) | movableflags, 0);
14685
14686
14687 if (page)
14688 clear_user_highpage(page, vaddr);
14689
14690 return page;
14691 }
14692 # 113 "include/linux/highmem.h"
14693 static inline __attribute__((always_inline)) struct page *
14694 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
14695 unsigned long vaddr)
14696 {
14697 return __alloc_zeroed_user_highpage((( gfp_t)0x100000u), vma, vaddr);
14698 }
14699
14700 static inline __attribute__((always_inline)) void clear_highpage(struct page *page)
14701 {
14702 void *kaddr = kmap_atomic(page, KM_USER0);
14703 memset((kaddr), 0, (1UL << 12));
14704 do { pagefault_enable(); } while (0);
14705 }
14706
14707 static inline __attribute__((always_inline)) void zero_user_segments(struct page *page,
14708 unsigned start1, unsigned end1,
14709 unsigned start2, unsigned end2)
14710 {
14711 void *kaddr = kmap_atomic(page, KM_USER0);
14712
14713 do { if (__builtin_expect(!!(end1 > (1UL << 12) || end2 > (1UL << 12)), 0)) do { dump_bfin_trace_buffer(); printk("<0>" "BUG: failure at %s:%d/%s()!\n", "include/linux/highmem.h", 133, __func__); panic("BUG!"); } while (0); } while(0);
14714
14715 if (end1 > start1)
14716 memset(kaddr + start1, 0, end1 - start1);
14717
14718 if (end2 > start2)
14719 memset(kaddr + start2, 0, end2 - start2);
14720
14721 do { pagefault_enable(); } while (0);
14722 blackfin_dflush_page(lowmem_page_address(page));
14723 }
14724
14725 static inline __attribute__((always_inline)) void zero_user_segment(struct page *page,
14726 unsigned start, unsigned end)
14727 {
14728 zero_user_segments(page, start, end, 0, 0);
14729 }
14730
14731 static inline __attribute__((always_inline)) void zero_user(struct page *page,
14732 unsigned start, unsigned size)
14733 {
14734 zero_user_segments(page, start, start + size, 0, 0);
14735 }
14736
14737 static inline __attribute__((always_inline)) void __attribute__((deprecated)) memclear_highpage_flush(struct page *page,
14738 unsigned int offset, unsigned int size)
14739 {
14740 zero_user(page, offset, size);
14741 }
14742
14743
14744
14745 static inline __attribute__((always_inline)) void copy_user_highpage(struct page *to, struct page *from,
14746 unsigned long vaddr, struct vm_area_struct *vma)
14747 {
14748 char *vfrom, *vto;
14749
14750 vfrom = kmap_atomic(from, KM_USER0);
14751 vto = kmap_atomic(to, KM_USER1);
14752 memcpy((vto), (vfrom), (1UL << 12));
14753 do { pagefault_enable(); } while (0);
14754 do { pagefault_enable(); } while (0);
14755 }
14756
14757
14758
14759 static inline __attribute__((always_inline)) void copy_highpage(struct page *to, struct page *from)
14760 {
14761 char *vfrom, *vto;
14762
14763 vfrom = kmap_atomic(from, KM_USER0);
14764 vto = kmap_atomic(to, KM_USER1);
14765 memcpy((vto), (vfrom), (1UL << 12));
14766 do { pagefault_enable(); } while (0);
14767 do { pagefault_enable(); } while (0);
14768 }
14769 # 11 "include/linux/pagemap.h" 2
14770
14771
14772
14773
14774 # 1 "include/linux/hardirq.h" 1
14775
14776
14777
14778
14779 # 1 "include/linux/smp_lock.h" 1
14780 # 6 "include/linux/hardirq.h" 2
14781
14782 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/hardirq.h" 1
14783
14784
14785
14786
14787
14788
14789
14790 typedef struct {
14791 unsigned int __softirq_pending;
14792 unsigned int __syscall_count;
14793 struct task_struct *__ksoftirqd_task;
14794 } irq_cpustat_t;
14795
14796 # 1 "include/linux/irq_cpustat.h" 1
14797 # 20 "include/linux/irq_cpustat.h"
14798 extern irq_cpustat_t irq_stat[];
14799 # 15 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/hardirq.h" 2
14800 # 45 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/hardirq.h"
14801 extern void ack_bad_irq(unsigned int irq);
14802 # 8 "include/linux/hardirq.h" 2
14803 # 113 "include/linux/hardirq.h"
14804 struct task_struct;
14805
14806
14807 static inline __attribute__((always_inline)) void account_system_vtime(struct task_struct *tsk)
14808 {
14809 }
14810 # 146 "include/linux/hardirq.h"
14811 extern void irq_enter(void);
14812 # 162 "include/linux/hardirq.h"
14813 extern void irq_exit(void);
14814 # 16 "include/linux/pagemap.h" 2
14815 # 25 "include/linux/pagemap.h"
14816 static inline __attribute__((always_inline)) void mapping_set_error(struct address_space *mapping, int error)
14817 {
14818 if (__builtin_expect(!!(error), 0)) {
14819 if (error == -28)
14820 set_bit((21 + 1), &mapping->flags);
14821 else
14822 set_bit((21 + 0), &mapping->flags);
14823 }
14824 }
14825 # 55 "include/linux/pagemap.h"
14826 static inline __attribute__((always_inline)) void mapping_set_unevictable(struct address_space *mapping) { }
14827 static inline __attribute__((always_inline)) void mapping_clear_unevictable(struct address_space *mapping) { }
14828 static inline __attribute__((always_inline)) int mapping_unevictable(struct address_space *mapping)
14829 {
14830 return 0;
14831 }
14832
14833
14834 static inline __attribute__((always_inline)) gfp_t mapping_gfp_mask(struct address_space * mapping)
14835 {
14836 return ( gfp_t)mapping->flags & (( gfp_t)((1 << 21) - 1));
14837 }
14838
14839
14840
14841
14842
14843 static inline __attribute__((always_inline)) void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
14844 {
14845 m->flags = (m->flags & ~( unsigned long)(( gfp_t)((1 << 21) - 1))) |
14846 ( unsigned long)mask;
14847 }
14848 # 93 "include/linux/pagemap.h"
14849 void release_pages(struct page **pages, int nr, int cold);
14850 # 139 "include/linux/pagemap.h"
14851 static inline __attribute__((always_inline)) int page_cache_get_speculative(struct page *page)
14852 {
14853 do { } while (0);
14854 # 156 "include/linux/pagemap.h"
14855 do { } while (0);
14856 atomic_inc(&page->_count);
14857 # 169 "include/linux/pagemap.h"
14858 do { } while (0);
14859
14860 return 1;
14861 }
14862
14863
14864
14865
14866 static inline __attribute__((always_inline)) int page_cache_add_speculative(struct page *page, int count)
14867 {
14868 do { } while (0);
14869
14870
14871
14872
14873
14874 do { } while (0);
14875 atomic_add(count, &page->_count);
14876
14877
14878
14879
14880
14881 do { } while (0);
14882
14883 return 1;
14884 }
14885
14886 static inline __attribute__((always_inline)) int page_freeze_refs(struct page *page, int count)
14887 {
14888 return __builtin_expect(!!(((int)((__typeof__(*((&((&page->_count)->counter)))))__cmpxchg_local_generic(((&((&page->_count)->counter))), (unsigned long)(((count))), (unsigned long)(((0))), sizeof(*((&((&page->_count)->counter))))))) == count), 1);
14889 }
14890
14891 static inline __attribute__((always_inline)) void page_unfreeze_refs(struct page *page, int count)
14892 {
14893 do { } while (0);
14894 do { } while (0);
14895
14896 (((&page->_count)->counter) = count);
14897 }
14898
14899
14900
14901
14902 static inline __attribute__((always_inline)) struct page *__page_cache_alloc(gfp_t gfp)
14903 {
14904 return alloc_pages_node((((void)(0),0)), gfp, 0);
14905 }
14906
14907
14908 static inline __attribute__((always_inline)) struct page *page_cache_alloc(struct address_space *x)
14909 {
14910 return __page_cache_alloc(mapping_gfp_mask(x));
14911 }
14912
14913 static inline __attribute__((always_inline)) struct page *page_cache_alloc_cold(struct address_space *x)
14914 {
14915 return __page_cache_alloc(mapping_gfp_mask(x)|(( gfp_t)0x100u));
14916 }
14917
14918 typedef int filler_t(void *, struct page *);
14919
14920 extern struct page * find_get_page(struct address_space *mapping,
14921 unsigned long index);
14922 extern struct page * find_lock_page(struct address_space *mapping,
14923 unsigned long index);
14924 extern struct page * find_or_create_page(struct address_space *mapping,
14925 unsigned long index, gfp_t gfp_mask);
14926 unsigned find_get_pages(struct address_space *mapping, unsigned long start,
14927 unsigned int nr_pages, struct page **pages);
14928 unsigned find_get_pages_contig(struct address_space *mapping, unsigned long start,
14929 unsigned int nr_pages, struct page **pages);
14930 unsigned find_get_pages_tag(struct address_space *mapping, unsigned long *index,
14931 int tag, unsigned int nr_pages, struct page **pages);
14932
14933 struct page *__grab_cache_page(struct address_space *mapping, unsigned long index);
14934
14935
14936
14937
14938 static inline __attribute__((always_inline)) struct page *grab_cache_page(struct address_space *mapping,
14939 unsigned long index)
14940 {
14941 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
14942 }
14943
14944 extern struct page * grab_cache_page_nowait(struct address_space *mapping,
14945 unsigned long index);
14946 extern struct page * read_cache_page_async(struct address_space *mapping,
14947 unsigned long index, filler_t *filler,
14948 void *data);
14949 extern struct page * read_cache_page(struct address_space *mapping,
14950 unsigned long index, filler_t *filler,
14951 void *data);
14952 extern int read_cache_pages(struct address_space *mapping,
14953 struct list_head *pages, filler_t *filler, void *data);
14954
14955 static inline __attribute__((always_inline)) struct page *read_mapping_page_async(
14956 struct address_space *mapping,
14957 unsigned long index, void *data)
14958 {
14959 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
14960 return read_cache_page_async(mapping, index, filler, data);
14961 }
14962
14963 static inline __attribute__((always_inline)) struct page *read_mapping_page(struct address_space *mapping,
14964 unsigned long index, void *data)
14965 {
14966 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
14967 return read_cache_page(mapping, index, filler, data);
14968 }
14969
14970
14971
14972
14973 static inline __attribute__((always_inline)) loff_t page_offset(struct page *page)
14974 {
14975 return ((loff_t)page->index) << 12;
14976 }
14977
14978 static inline __attribute__((always_inline)) unsigned long linear_page_index(struct vm_area_struct *vma,
14979 unsigned long address)
14980 {
14981 unsigned long pgoff = (address - vma->vm_start) >> 12;
14982 pgoff += vma->vm_pgoff;
14983 return pgoff >> (12 - 12);
14984 }
14985
14986 extern void __lock_page(struct page *page);
14987 extern int __lock_page_killable(struct page *page);
14988 extern void __lock_page_nosync(struct page *page);
14989 extern void unlock_page(struct page *page);
14990
14991 static inline __attribute__((always_inline)) void __set_page_locked(struct page *page)
14992 {
14993 __set_bit(PG_locked, &page->flags);
14994 }
14995
14996 static inline __attribute__((always_inline)) void __clear_page_locked(struct page *page)
14997 {
14998 __clear_bit(PG_locked, &page->flags);
14999 }
15000
15001 static inline __attribute__((always_inline)) int trylock_page(struct page *page)
15002 {
15003 return (__builtin_expect(!!(!test_and_set_bit(PG_locked, &page->flags)), 1));
15004 }
15005
15006
15007
15008
15009 static inline __attribute__((always_inline)) void lock_page(struct page *page)
15010 {
15011 do { do { } while (0); } while (0);
15012 if (!trylock_page(page))
15013 __lock_page(page);
15014 }
15015
15016
15017
15018
15019
15020
15021 static inline __attribute__((always_inline)) int lock_page_killable(struct page *page)
15022 {
15023 do { do { } while (0); } while (0);
15024 if (!trylock_page(page))
15025 return __lock_page_killable(page);
15026 return 0;
15027 }
15028
15029
15030
15031
15032
15033 static inline __attribute__((always_inline)) void lock_page_nosync(struct page *page)
15034 {
15035 do { do { } while (0); } while (0);
15036 if (!trylock_page(page))
15037 __lock_page_nosync(page);
15038 }
15039
15040
15041
15042
15043
15044 extern void wait_on_page_bit(struct page *page, int bit_nr);
15045 # 364 "include/linux/pagemap.h"
15046 static inline __attribute__((always_inline)) void wait_on_page_locked(struct page *page)
15047 {
15048 if (PageLocked(page))
15049 wait_on_page_bit(page, PG_locked);
15050 }
15051
15052
15053
15054
15055 static inline __attribute__((always_inline)) void wait_on_page_writeback(struct page *page)
15056 {
15057 if (PageWriteback(page))
15058 wait_on_page_bit(page, PG_writeback);
15059 }
15060
15061 extern void end_page_writeback(struct page *page);
15062
15063
15064
15065
15066
15067
15068
15069 static inline __attribute__((always_inline)) int fault_in_pages_writeable(char *uaddr, int size)
15070 {
15071 int ret;
15072
15073 if (__builtin_expect(!!(size == 0), 0))
15074 return 0;
15075
15076
15077
15078
15079
15080 ret = ({ int _err = 0; typeof(*(uaddr)) _x = (0); typeof(*(uaddr)) *_p = (uaddr); if (!_access_ok((unsigned long)(_p), (sizeof(*(_p))))) { _err = -14; } else { switch (sizeof (*(_p))) { case 1: __asm__ ("B""[%1] = %0;\n\t" : :"d" (_x),"a" (((unsigned long *)(_p))) : "memory"); break; case 2: __asm__ ("W""[%1] = %0;\n\t" : :"d" (_x),"a" (((unsigned long *)(_p))) : "memory"); break; case 4: __asm__ ("""[%1] = %0;\n\t" : :"d" (_x),"a" (((unsigned long *)(_p))) : "memory"); break; case 8: { long _xl, _xh; _xl = ((long *)&_x)[0]; _xh = ((long *)&_x)[1]; __asm__ ("""[%1] = %0;\n\t" : :"d" (_xl),"a" (((unsigned long *)(((long *)_p)+0))) : "memory"); __asm__ ("""[%1] = %0;\n\t" : :"d" (_xh),"a" (((unsigned long *)(((long *)_p)+1))) : "memory"); } break; default: _err = (printk("<6>" "put_user_bad %s:%d %s\n", "include/linux/pagemap.h", 398, __func__), bad_user_access_length(), (-14)); break; } } _err; });
15081 if (ret == 0) {
15082 char *end = uaddr + size - 1;
15083
15084
15085
15086
15087
15088 if (((unsigned long)uaddr & (~((1UL << 12)-1))) !=
15089 ((unsigned long)end & (~((1UL << 12)-1))))
15090 ret = ({ int _err = 0; typeof(*(end)) _x = (0); typeof(*(end)) *_p = (end); if (!_access_ok((unsigned long)(_p), (sizeof(*(_p))))) { _err = -14; } else { switch (sizeof (*(_p))) { case 1: __asm__ ("B""[%1] = %0;\n\t" : :"d" (_x),"a" (((unsigned long *)(_p))) : "memory"); break; case 2: __asm__ ("W""[%1] = %0;\n\t" : :"d" (_x),"a" (((unsigned long *)(_p))) : "memory"); break; case 4: __asm__ ("""[%1] = %0;\n\t" : :"d" (_x),"a" (((unsigned long *)(_p))) : "memory"); break; case 8: { long _xl, _xh; _xl = ((long *)&_x)[0]; _xh = ((long *)&_x)[1]; __asm__ ("""[%1] = %0;\n\t" : :"d" (_xl),"a" (((unsigned long *)(((long *)_p)+0))) : "memory"); __asm__ ("""[%1] = %0;\n\t" : :"d" (_xh),"a" (((unsigned long *)(((long *)_p)+1))) : "memory"); } break; default: _err = (printk("<6>" "put_user_bad %s:%d %s\n", "include/linux/pagemap.h", 408, __func__), bad_user_access_length(), (-14)); break; } } _err; });
15091 }
15092 return ret;
15093 }
15094
15095 static inline __attribute__((always_inline)) int fault_in_pages_readable(const char *uaddr, int size)
15096 {
15097 volatile char c;
15098 int ret;
15099
15100 if (__builtin_expect(!!(size == 0), 0))
15101 return 0;
15102
15103 ret = ({ int _err = 0; unsigned long _val = 0; const typeof(*(uaddr)) *_p = (uaddr); const size_t ptr_size = sizeof(*(_p)); if (__builtin_expect(!!(_access_ok((unsigned long)(_p), (ptr_size))), 1)) { ((void)sizeof(char[1 - 2*!!(ptr_size >= 8)])); switch (ptr_size) { case 1: ({ __asm__ __volatile__ ( "%0 =" "B" "[%1]" "(Z)" ";" : "=d" (_val) : "a" (((unsigned long *)(_p)))); }); break; case 2: ({ __asm__ __volatile__ ( "%0 =" "W" "[%1]" "(Z)" ";" : "=d" (_val) : "a" (((unsigned long *)(_p)))); }); break; case 4: ({ __asm__ __volatile__ ( "%0 =" "" "[%1]" "" ";" : "=d" (_val) : "a" (((unsigned long *)(_p)))); }); break; } } else _err = -14; c = (typeof(*(uaddr)))_val; _err; });
15104 if (ret == 0) {
15105 const char *end = uaddr + size - 1;
15106
15107 if (((unsigned long)uaddr & (~((1UL << 12)-1))) !=
15108 ((unsigned long)end & (~((1UL << 12)-1))))
15109 ret = ({ int _err = 0; unsigned long _val = 0; const typeof(*(end)) *_p = (end); const size_t ptr_size = sizeof(*(_p)); if (__builtin_expect(!!(_access_ok((unsigned long)(_p), (ptr_size))), 1)) { ((void)sizeof(char[1 - 2*!!(ptr_size >= 8)])); switch (ptr_size) { case 1: ({ __asm__ __volatile__ ( "%0 =" "B" "[%1]" "(Z)" ";" : "=d" (_val) : "a" (((unsigned long *)(_p)))); }); break; case 2: ({ __asm__ __volatile__ ( "%0 =" "W" "[%1]" "(Z)" ";" : "=d" (_val) : "a" (((unsigned long *)(_p)))); }); break; case 4: ({ __asm__ __volatile__ ( "%0 =" "" "[%1]" "" ";" : "=d" (_val) : "a" (((unsigned long *)(_p)))); }); break; } } else _err = -14; c = (typeof(*(end)))_val; _err; });
15110 }
15111 return ret;
15112 }
15113
15114 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
15115 unsigned long index, gfp_t gfp_mask);
15116 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
15117 unsigned long index, gfp_t gfp_mask);
15118 extern void remove_from_page_cache(struct page *page);
15119 extern void __remove_from_page_cache(struct page *page);
15120
15121
15122
15123
15124
15125 static inline __attribute__((always_inline)) int add_to_page_cache(struct page *page,
15126 struct address_space *mapping, unsigned long offset, gfp_t gfp_mask)
15127 {
15128 int error;
15129
15130 __set_page_locked(page);
15131 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
15132 if (__builtin_expect(!!(error), 0))
15133 __clear_page_locked(page);
15134 return error;
15135 }
15136 # 20 "kernel/trace/trace.c" 2
15137
15138
15139
15140 # 1 "include/linux/ftrace.h" 1
15141 # 9 "include/linux/ftrace.h"
15142 # 1 "include/linux/module.h" 1
15143 # 13 "include/linux/module.h"
15144 # 1 "include/linux/kmod.h" 1
15145 # 35 "include/linux/kmod.h"
15146 static inline __attribute__((always_inline)) int request_module(const char * name, ...) { return -38; }
15147
15148
15149
15150
15151 struct key;
15152 struct file;
15153 struct subprocess_info;
15154
15155
15156 struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
15157 char **envp, gfp_t gfp_mask);
15158
15159
15160 void call_usermodehelper_setkeys(struct subprocess_info *info,
15161 struct key *session_keyring);
15162 int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info,
15163 struct file **filp);
15164 void call_usermodehelper_setcleanup(struct subprocess_info *info,
15165 void (*cleanup)(char **argv, char **envp));
15166
15167 enum umh_wait {
15168 UMH_NO_WAIT = -1,
15169 UMH_WAIT_EXEC = 0,
15170 UMH_WAIT_PROC = 1,
15171 };
15172
15173
15174 int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait);
15175
15176
15177
15178 void call_usermodehelper_freeinfo(struct subprocess_info *info);
15179
15180 static inline __attribute__((always_inline)) int
15181 call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait)
15182 {
15183 struct subprocess_info *info;
15184 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? ((( gfp_t)0x20u)) : ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u));
15185
15186 info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
15187 if (info == ((void *)0))
15188 return -12;
15189 return call_usermodehelper_exec(info, wait);
15190 }
15191
15192 static inline __attribute__((always_inline)) int
15193 call_usermodehelper_keys(char *path, char **argv, char **envp,
15194 struct key *session_keyring, enum umh_wait wait)
15195 {
15196 struct subprocess_info *info;
15197 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? ((( gfp_t)0x20u)) : ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u));
15198
15199 info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
15200 if (info == ((void *)0))
15201 return -12;
15202
15203 call_usermodehelper_setkeys(info, session_keyring);
15204 return call_usermodehelper_exec(info, wait);
15205 }
15206
15207 extern void usermodehelper_init(void);
15208
15209 struct file;
15210 extern int call_usermodehelper_pipe(char *path, char *argv[], char *envp[],
15211 struct file **filp);
15212
15213 extern int usermodehelper_disable(void);
15214 extern void usermodehelper_enable(void);
15215 # 14 "include/linux/module.h" 2
15216 # 1 "include/linux/elf.h" 1
15217
15218
15219
15220
15221 # 1 "include/linux/elf-em.h" 1
15222 # 6 "include/linux/elf.h" 2
15223
15224 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/elf.h" 1
15225 # 21 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/elf.h"
15226 typedef unsigned long elf_greg_t;
15227
15228
15229 typedef elf_greg_t elf_gregset_t[(sizeof(struct user_regs_struct) / sizeof(elf_greg_t))];
15230
15231 typedef struct user_bfinfp_struct elf_fpregset_t;
15232 # 8 "include/linux/elf.h" 2
15233
15234
15235 struct file;
15236 # 20 "include/linux/elf.h"
15237 typedef __u32 Elf32_Addr;
15238 typedef __u16 Elf32_Half;
15239 typedef __u32 Elf32_Off;
15240 typedef __s32 Elf32_Sword;
15241 typedef __u32 Elf32_Word;
15242
15243
15244 typedef __u64 Elf64_Addr;
15245 typedef __u16 Elf64_Half;
15246 typedef __s16 Elf64_SHalf;
15247 typedef __u64 Elf64_Off;
15248 typedef __s32 Elf64_Sword;
15249 typedef __u32 Elf64_Word;
15250 typedef __u64 Elf64_Xword;
15251 typedef __s64 Elf64_Sxword;
15252 # 127 "include/linux/elf.h"
15253 typedef struct dynamic{
15254 Elf32_Sword d_tag;
15255 union{
15256 Elf32_Sword d_val;
15257 Elf32_Addr d_ptr;
15258 } d_un;
15259 } Elf32_Dyn;
15260
15261 typedef struct {
15262 Elf64_Sxword d_tag;
15263 union {
15264 Elf64_Xword d_val;
15265 Elf64_Addr d_ptr;
15266 } d_un;
15267 } Elf64_Dyn;
15268 # 150 "include/linux/elf.h"
15269 typedef struct elf32_rel {
15270 Elf32_Addr r_offset;
15271 Elf32_Word r_info;
15272 } Elf32_Rel;
15273
15274 typedef struct elf64_rel {
15275 Elf64_Addr r_offset;
15276 Elf64_Xword r_info;
15277 } Elf64_Rel;
15278
15279 typedef struct elf32_rela{
15280 Elf32_Addr r_offset;
15281 Elf32_Word r_info;
15282 Elf32_Sword r_addend;
15283 } Elf32_Rela;
15284
15285 typedef struct elf64_rela {
15286 Elf64_Addr r_offset;
15287 Elf64_Xword r_info;
15288 Elf64_Sxword r_addend;
15289 } Elf64_Rela;
15290
15291 typedef struct elf32_sym{
15292 Elf32_Word st_name;
15293 Elf32_Addr st_value;
15294 Elf32_Word st_size;
15295 unsigned char st_info;
15296 unsigned char st_other;
15297 Elf32_Half st_shndx;
15298 } Elf32_Sym;
15299
15300 typedef struct elf64_sym {
15301 Elf64_Word st_name;
15302 unsigned char st_info;
15303 unsigned char st_other;
15304 Elf64_Half st_shndx;
15305 Elf64_Addr st_value;
15306 Elf64_Xword st_size;
15307 } Elf64_Sym;
15308
15309
15310
15311
15312 typedef struct elf32_hdr{
15313 unsigned char e_ident[16];
15314 Elf32_Half e_type;
15315 Elf32_Half e_machine;
15316 Elf32_Word e_version;
15317 Elf32_Addr e_entry;
15318 Elf32_Off e_phoff;
15319 Elf32_Off e_shoff;
15320 Elf32_Word e_flags;
15321 Elf32_Half e_ehsize;
15322 Elf32_Half e_phentsize;
15323 Elf32_Half e_phnum;
15324 Elf32_Half e_shentsize;
15325 Elf32_Half e_shnum;
15326 Elf32_Half e_shstrndx;
15327 } Elf32_Ehdr;
15328
15329 typedef struct elf64_hdr {
15330 unsigned char e_ident[16];
15331 Elf64_Half e_type;
15332 Elf64_Half e_machine;
15333 Elf64_Word e_version;
15334 Elf64_Addr e_entry;
15335 Elf64_Off e_phoff;
15336 Elf64_Off e_shoff;
15337 Elf64_Word e_flags;
15338 Elf64_Half e_ehsize;
15339 Elf64_Half e_phentsize;
15340 Elf64_Half e_phnum;
15341 Elf64_Half e_shentsize;
15342 Elf64_Half e_shnum;
15343 Elf64_Half e_shstrndx;
15344 } Elf64_Ehdr;
15345
15346
15347
15348
15349
15350
15351
15352 typedef struct elf32_phdr{
15353 Elf32_Word p_type;
15354 Elf32_Off p_offset;
15355 Elf32_Addr p_vaddr;
15356 Elf32_Addr p_paddr;
15357 Elf32_Word p_filesz;
15358 Elf32_Word p_memsz;
15359 Elf32_Word p_flags;
15360 Elf32_Word p_align;
15361 } Elf32_Phdr;
15362
15363 typedef struct elf64_phdr {
15364 Elf64_Word p_type;
15365 Elf64_Word p_flags;
15366 Elf64_Off p_offset;
15367 Elf64_Addr p_vaddr;
15368 Elf64_Addr p_paddr;
15369 Elf64_Xword p_filesz;
15370 Elf64_Xword p_memsz;
15371 Elf64_Xword p_align;
15372 } Elf64_Phdr;
15373 # 289 "include/linux/elf.h"
15374 typedef struct {
15375 Elf32_Word sh_name;
15376 Elf32_Word sh_type;
15377 Elf32_Word sh_flags;
15378 Elf32_Addr sh_addr;
15379 Elf32_Off sh_offset;
15380 Elf32_Word sh_size;
15381 Elf32_Word sh_link;
15382 Elf32_Word sh_info;
15383 Elf32_Word sh_addralign;
15384 Elf32_Word sh_entsize;
15385 } Elf32_Shdr;
15386
15387 typedef struct elf64_shdr {
15388 Elf64_Word sh_name;
15389 Elf64_Word sh_type;
15390 Elf64_Xword sh_flags;
15391 Elf64_Addr sh_addr;
15392 Elf64_Off sh_offset;
15393 Elf64_Xword sh_size;
15394 Elf64_Word sh_link;
15395 Elf64_Word sh_info;
15396 Elf64_Xword sh_addralign;
15397 Elf64_Xword sh_entsize;
15398 } Elf64_Shdr;
15399 # 367 "include/linux/elf.h"
15400 typedef struct elf32_note {
15401 Elf32_Word n_namesz;
15402 Elf32_Word n_descsz;
15403 Elf32_Word n_type;
15404 } Elf32_Nhdr;
15405
15406
15407 typedef struct elf64_note {
15408 Elf64_Word n_namesz;
15409 Elf64_Word n_descsz;
15410 Elf64_Word n_type;
15411 } Elf64_Nhdr;
15412
15413
15414
15415 extern Elf32_Dyn _DYNAMIC [];
15416 # 400 "include/linux/elf.h"
15417 static inline __attribute__((always_inline)) int elf_coredump_extra_notes_size(void) { return 0; }
15418 static inline __attribute__((always_inline)) int elf_coredump_extra_notes_write(struct file *file,
15419 loff_t *foffset) { return 0; }
15420 # 15 "include/linux/module.h" 2
15421
15422
15423 # 1 "include/linux/moduleparam.h" 1
15424 # 32 "include/linux/moduleparam.h"
15425 struct kernel_param;
15426
15427
15428 typedef int (*param_set_fn)(const char *val, struct kernel_param *kp);
15429
15430 typedef int (*param_get_fn)(char *buffer, struct kernel_param *kp);
15431
15432 struct kernel_param {
15433 const char *name;
15434 unsigned int perm;
15435 param_set_fn set;
15436 param_get_fn get;
15437 union {
15438 void *arg;
15439 const struct kparam_string *str;
15440 const struct kparam_array *arr;
15441 };
15442 };
15443
15444
15445 struct kparam_string {
15446 unsigned int maxlen;
15447 char *string;
15448 };
15449
15450
15451 struct kparam_array
15452 {
15453 unsigned int max;
15454 unsigned int *num;
15455 param_set_fn set;
15456 param_get_fn get;
15457 unsigned int elemsize;
15458 void *elem;
15459 };
15460 # 135 "include/linux/moduleparam.h"
15461 extern int parse_args(const char *name,
15462 char *args,
15463 struct kernel_param *params,
15464 unsigned num,
15465 int (*unknown)(char *param, char *val));
15466
15467
15468
15469
15470
15471
15472
15473 extern int param_set_byte(const char *val, struct kernel_param *kp);
15474 extern int param_get_byte(char *buffer, struct kernel_param *kp);
15475
15476
15477 extern int param_set_short(const char *val, struct kernel_param *kp);
15478 extern int param_get_short(char *buffer, struct kernel_param *kp);
15479
15480
15481 extern int param_set_ushort(const char *val, struct kernel_param *kp);
15482 extern int param_get_ushort(char *buffer, struct kernel_param *kp);
15483
15484
15485 extern int param_set_int(const char *val, struct kernel_param *kp);
15486 extern int param_get_int(char *buffer, struct kernel_param *kp);
15487
15488
15489 extern int param_set_uint(const char *val, struct kernel_param *kp);
15490 extern int param_get_uint(char *buffer, struct kernel_param *kp);
15491
15492
15493 extern int param_set_long(const char *val, struct kernel_param *kp);
15494 extern int param_get_long(char *buffer, struct kernel_param *kp);
15495
15496
15497 extern int param_set_ulong(const char *val, struct kernel_param *kp);
15498 extern int param_get_ulong(char *buffer, struct kernel_param *kp);
15499
15500
15501 extern int param_set_charp(const char *val, struct kernel_param *kp);
15502 extern int param_get_charp(char *buffer, struct kernel_param *kp);
15503
15504
15505 extern int param_set_bool(const char *val, struct kernel_param *kp);
15506 extern int param_get_bool(char *buffer, struct kernel_param *kp);
15507
15508
15509 extern int param_set_invbool(const char *val, struct kernel_param *kp);
15510 extern int param_get_invbool(char *buffer, struct kernel_param *kp);
15511 # 199 "include/linux/moduleparam.h"
15512 extern int param_array_set(const char *val, struct kernel_param *kp);
15513 extern int param_array_get(char *buffer, struct kernel_param *kp);
15514
15515 extern int param_set_copystring(const char *val, struct kernel_param *kp);
15516 extern int param_get_string(char *buffer, struct kernel_param *kp);
15517
15518
15519
15520 struct module;
15521 # 216 "include/linux/moduleparam.h"
15522 static inline __attribute__((always_inline)) int module_param_sysfs_setup(struct module *mod,
15523 struct kernel_param *kparam,
15524 unsigned int num_params)
15525 {
15526 return 0;
15527 }
15528
15529 static inline __attribute__((always_inline)) void module_param_sysfs_remove(struct module *mod)
15530 { }
15531 # 18 "include/linux/module.h" 2
15532 # 1 "include/linux/marker.h" 1
15533 # 17 "include/linux/marker.h"
15534 struct module;
15535 struct marker;
15536 # 32 "include/linux/marker.h"
15537 typedef void marker_probe_func(void *probe_private, void *call_private,
15538 const char *fmt, va_list *args);
15539
15540 struct marker_probe_closure {
15541 marker_probe_func *func;
15542 void *probe_private;
15543 };
15544
15545 struct marker {
15546 const char *name;
15547 const char *format;
15548
15549
15550 char state;
15551 char ptype;
15552
15553 void (*call)(const struct marker *mdata, void *call_private, ...);
15554 struct marker_probe_closure single;
15555 struct marker_probe_closure *multi;
15556 } __attribute__((aligned(8)));
15557 # 83 "include/linux/marker.h"
15558 extern void marker_update_probe_range(struct marker *begin,
15559 struct marker *end);
15560 # 125 "include/linux/marker.h"
15561 static inline __attribute__((always_inline)) void __attribute__((format(printf,1,2))) ___mark_check_format(const char *fmt, ...)
15562 {
15563 }
15564
15565
15566
15567
15568
15569
15570
15571 extern marker_probe_func __mark_empty_function;
15572
15573 extern void marker_probe_cb(const struct marker *mdata,
15574 void *call_private, ...);
15575 extern void marker_probe_cb_noarg(const struct marker *mdata,
15576 void *call_private, ...);
15577
15578
15579
15580
15581
15582 extern int marker_probe_register(const char *name, const char *format,
15583 marker_probe_func *probe, void *probe_private);
15584
15585
15586
15587
15588 extern int marker_probe_unregister(const char *name,
15589 marker_probe_func *probe, void *probe_private);
15590
15591
15592
15593 extern int marker_probe_unregister_private_data(marker_probe_func *probe,
15594 void *probe_private);
15595
15596 extern void *marker_get_private_data(const char *name, marker_probe_func *probe,
15597 int num);
15598 # 19 "include/linux/module.h" 2
15599 # 1 "include/linux/tracepoint.h" 1
15600 # 20 "include/linux/tracepoint.h"
15601 struct module;
15602 struct tracepoint;
15603
15604 struct tracepoint {
15605 const char *name;
15606 int state;
15607 void **funcs;
15608 } __attribute__((aligned(8)));
15609 # 82 "include/linux/tracepoint.h"
15610 extern void tracepoint_update_probe_range(struct tracepoint *begin,
15611 struct tracepoint *end);
15612 # 107 "include/linux/tracepoint.h"
15613 extern int tracepoint_probe_register(const char *name, void *probe);
15614
15615
15616
15617
15618
15619 extern int tracepoint_probe_unregister(const char *name, void *probe);
15620
15621 struct tracepoint_iter {
15622 struct module *module;
15623 struct tracepoint *tracepoint;
15624 };
15625
15626 extern void tracepoint_iter_start(struct tracepoint_iter *iter);
15627 extern void tracepoint_iter_next(struct tracepoint_iter *iter);
15628 extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
15629 extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
15630 extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
15631 struct tracepoint *begin, struct tracepoint *end);
15632
15633
15634
15635
15636
15637
15638 static inline __attribute__((always_inline)) void tracepoint_synchronize_unregister(void)
15639 {
15640 synchronize_rcu();
15641 }
15642 # 20 "include/linux/module.h" 2
15643 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/local.h" 1
15644
15645
15646
15647 # 1 "include/asm-generic/local.h" 1
15648 # 22 "include/asm-generic/local.h"
15649 typedef struct
15650 {
15651 atomic_long_t a;
15652 } local_t;
15653 # 5 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/local.h" 2
15654 # 21 "include/linux/module.h" 2
15655
15656 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/module.h" 1
15657 # 10 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/module.h"
15658 struct mod_arch_specific {
15659 Elf32_Shdr *text_l1;
15660 Elf32_Shdr *data_a_l1;
15661 Elf32_Shdr *bss_a_l1;
15662 Elf32_Shdr *data_b_l1;
15663 Elf32_Shdr *bss_b_l1;
15664 Elf32_Shdr *text_l2;
15665 Elf32_Shdr *data_l2;
15666 Elf32_Shdr *bss_l2;
15667 };
15668 # 23 "include/linux/module.h" 2
15669 # 34 "include/linux/module.h"
15670 struct kernel_symbol
15671 {
15672 unsigned long value;
15673 const char *name;
15674 };
15675
15676 struct modversion_info
15677 {
15678 unsigned long crc;
15679 char name[(64 - sizeof(unsigned long))];
15680 };
15681
15682 struct module;
15683
15684 struct module_attribute {
15685 struct attribute attr;
15686 ssize_t (*show)(struct module_attribute *, struct module *, char *);
15687 ssize_t (*store)(struct module_attribute *, struct module *,
15688 const char *, size_t count);
15689 void (*setup)(struct module *, const char *);
15690 int (*test)(struct module *);
15691 void (*free)(struct module *);
15692 };
15693
15694 struct module_kobject
15695 {
15696 struct kobject kobj;
15697 struct module *mod;
15698 struct kobject *drivers_dir;
15699 struct module_param_attrs *mp;
15700 };
15701
15702
15703 extern int init_module(void);
15704 extern void cleanup_module(void);
15705
15706
15707 struct exception_table_entry;
15708
15709 const struct exception_table_entry *
15710 search_extable(const struct exception_table_entry *first,
15711 const struct exception_table_entry *last,
15712 unsigned long value);
15713 void sort_extable(struct exception_table_entry *start,
15714 struct exception_table_entry *finish);
15715 void sort_main_extable(void);
15716 # 165 "include/linux/module.h"
15717 const struct exception_table_entry *search_exception_tables(unsigned long add);
15718
15719 struct notifier_block;
15720 # 472 "include/linux/module.h"
15721 static inline __attribute__((always_inline)) const struct exception_table_entry *
15722 search_module_extables(unsigned long addr)
15723 {
15724 return ((void *)0);
15725 }
15726
15727
15728 static inline __attribute__((always_inline)) struct module *module_text_address(unsigned long addr)
15729 {
15730 return ((void *)0);
15731 }
15732
15733
15734 static inline __attribute__((always_inline)) struct module *__module_text_address(unsigned long addr)
15735 {
15736 return ((void *)0);
15737 }
15738
15739 static inline __attribute__((always_inline)) int is_module_address(unsigned long addr)
15740 {
15741 return 0;
15742 }
15743
15744
15745
15746
15747
15748
15749 static inline __attribute__((always_inline)) void __module_get(struct module *module)
15750 {
15751 }
15752
15753 static inline __attribute__((always_inline)) int try_module_get(struct module *module)
15754 {
15755 return 1;
15756 }
15757
15758 static inline __attribute__((always_inline)) void module_put(struct module *module)
15759 {
15760 }
15761
15762
15763
15764
15765 static inline __attribute__((always_inline)) const char *module_address_lookup(unsigned long addr,
15766 unsigned long *symbolsize,
15767 unsigned long *offset,
15768 char **modname,
15769 char *namebuf)
15770 {
15771 return ((void *)0);
15772 }
15773
15774 static inline __attribute__((always_inline)) int lookup_module_symbol_name(unsigned long addr, char *symname)
15775 {
15776 return -34;
15777 }
15778
15779 static inline __attribute__((always_inline)) int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name)
15780 {
15781 return -34;
15782 }
15783
15784 static inline __attribute__((always_inline)) int module_get_kallsym(unsigned int symnum, unsigned long *value,
15785 char *type, char *name,
15786 char *module_name, int *exported)
15787 {
15788 return -34;
15789 }
15790
15791 static inline __attribute__((always_inline)) unsigned long module_kallsyms_lookup_name(const char *name)
15792 {
15793 return 0;
15794 }
15795
15796 static inline __attribute__((always_inline)) int register_module_notifier(struct notifier_block * nb)
15797 {
15798
15799 return 0;
15800 }
15801
15802 static inline __attribute__((always_inline)) int unregister_module_notifier(struct notifier_block * nb)
15803 {
15804 return 0;
15805 }
15806
15807
15808
15809 static inline __attribute__((always_inline)) void print_modules(void)
15810 {
15811 }
15812
15813 static inline __attribute__((always_inline)) void module_update_markers(void)
15814 {
15815 }
15816
15817 static inline __attribute__((always_inline)) void module_update_tracepoints(void)
15818 {
15819 }
15820
15821 static inline __attribute__((always_inline)) int module_get_iter_tracepoints(struct tracepoint_iter *iter)
15822 {
15823 return 0;
15824 }
15825
15826
15827
15828 struct device_driver;
15829 # 596 "include/linux/module.h"
15830 static inline __attribute__((always_inline)) int mod_sysfs_init(struct module *mod)
15831 {
15832 return 0;
15833 }
15834
15835 static inline __attribute__((always_inline)) int mod_sysfs_setup(struct module *mod,
15836 struct kernel_param *kparam,
15837 unsigned int num_params)
15838 {
15839 return 0;
15840 }
15841
15842 static inline __attribute__((always_inline)) int module_add_modinfo_attrs(struct module *mod)
15843 {
15844 return 0;
15845 }
15846
15847 static inline __attribute__((always_inline)) void module_remove_modinfo_attrs(struct module *mod)
15848 { }
15849 # 10 "include/linux/ftrace.h" 2
15850 # 44 "include/linux/ftrace.h"
15851 static inline __attribute__((always_inline)) void ftrace_kill(void) { }
15852 # 113 "include/linux/ftrace.h"
15853 static inline __attribute__((always_inline)) void ftrace_release(void *start, unsigned long size) { }
15854
15855
15856
15857 void ftrace_kill(void);
15858
15859 static inline __attribute__((always_inline)) void tracer_disable(void)
15860 {
15861
15862
15863
15864 }
15865
15866
15867
15868
15869
15870
15871 static inline __attribute__((always_inline)) int __ftrace_enabled_save(void)
15872 {
15873
15874
15875
15876
15877
15878 return 0;
15879
15880 }
15881
15882 static inline __attribute__((always_inline)) void __ftrace_enabled_restore(int enabled)
15883 {
15884
15885
15886
15887 }
15888 # 185 "include/linux/ftrace.h"
15889 extern void
15890 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
15891 # 205 "include/linux/ftrace.h"
15892 extern int
15893 __ftrace_printk(unsigned long ip, const char *fmt, ...)
15894 __attribute__ ((format (printf, 2, 3)));
15895 extern void ftrace_dump(void);
15896 # 227 "include/linux/ftrace.h"
15897 static inline __attribute__((always_inline)) void ftrace_init(void) { }
15898 static inline __attribute__((always_inline)) void
15899 ftrace_init_module(unsigned long *start, unsigned long *end) { }
15900
15901
15902
15903 struct boot_trace {
15904 pid_t caller;
15905 char func[(sizeof("%s+%#lx/%#lx [%s]") + (128 - 1) + 2*(32*3/10) + ((64 - sizeof(unsigned long)) - 1) + 1)];
15906 int result;
15907 unsigned long long duration;
15908 ktime_t calltime;
15909 ktime_t rettime;
15910 };
15911
15912
15913 extern void trace_boot(struct boot_trace *it, initcall_t fn);
15914 extern void start_boot_trace(void);
15915 extern void stop_boot_trace(void);
15916 # 24 "kernel/trace/trace.c" 2
15917
15918
15919 # 1 "include/linux/kdebug.h" 1
15920
15921
15922
15923 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/kdebug.h" 1
15924 # 1 "include/asm-generic/kdebug.h" 1
15925
15926
15927
15928 enum die_val {
15929 DIE_UNUSED,
15930 DIE_OOPS=1
15931 };
15932 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/kdebug.h" 2
15933 # 5 "include/linux/kdebug.h" 2
15934
15935 struct notifier_block;
15936
15937 struct die_args {
15938 struct pt_regs *regs;
15939 const char *str;
15940 long err;
15941 int trapnr;
15942 int signr;
15943 };
15944
15945 int register_die_notifier(struct notifier_block *nb);
15946 int unregister_die_notifier(struct notifier_block *nb);
15947
15948 int notify_die(enum die_val val, const char *str,
15949 struct pt_regs *regs, long err, int trap, int sig);
15950 # 27 "kernel/trace/trace.c" 2
15951 # 1 "include/linux/ctype.h" 1
15952 # 18 "include/linux/ctype.h"
15953 extern unsigned char _ctype[];
15954 # 37 "include/linux/ctype.h"
15955 static inline __attribute__((always_inline)) unsigned char __tolower(unsigned char c)
15956 {
15957 if ((((_ctype[(int)(unsigned char)(c)])&(0x01)) != 0))
15958 c -= 'A'-'a';
15959 return c;
15960 }
15961
15962 static inline __attribute__((always_inline)) unsigned char __toupper(unsigned char c)
15963 {
15964 if ((((_ctype[(int)(unsigned char)(c)])&(0x02)) != 0))
15965 c -= 'a'-'A';
15966 return c;
15967 }
15968 # 28 "kernel/trace/trace.c" 2
15969
15970 # 1 "include/linux/poll.h" 1
15971
15972
15973
15974 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/poll.h" 1
15975 # 18 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/poll.h"
15976 struct pollfd {
15977 int fd;
15978 short events;
15979 short revents;
15980 };
15981 # 5 "include/linux/poll.h" 2
15982 # 26 "include/linux/poll.h"
15983 struct poll_table_struct;
15984
15985
15986
15987
15988 typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
15989
15990 typedef struct poll_table_struct {
15991 poll_queue_proc qproc;
15992 } poll_table;
15993
15994 static inline __attribute__((always_inline)) void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
15995 {
15996 if (p && wait_address)
15997 p->qproc(filp, wait_address, p);
15998 }
15999
16000 static inline __attribute__((always_inline)) void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
16001 {
16002 pt->qproc = qproc;
16003 }
16004
16005 struct poll_table_entry {
16006 struct file * filp;
16007 wait_queue_t wait;
16008 wait_queue_head_t * wait_address;
16009 };
16010
16011
16012
16013
16014 struct poll_wqueues {
16015 poll_table pt;
16016 struct poll_table_page * table;
16017 int error;
16018 int inline_index;
16019 struct poll_table_entry inline_entries[((832 - 256) / sizeof(struct poll_table_entry))];
16020 };
16021
16022 extern void poll_initwait(struct poll_wqueues *pwq);
16023 extern void poll_freewait(struct poll_wqueues *pwq);
16024
16025
16026
16027
16028
16029 typedef struct {
16030 unsigned long *in, *out, *ex;
16031 unsigned long *res_in, *res_out, *res_ex;
16032 } fd_set_bits;
16033 # 90 "include/linux/poll.h"
16034 static inline __attribute__((always_inline))
16035 int get_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
16036 {
16037 nr = ((((nr)+(8*sizeof(long))-1)/(8*sizeof(long)))*sizeof(long));
16038 if (ufdset)
16039 return copy_from_user(fdset, ufdset, nr) ? -14 : 0;
16040
16041 memset(fdset, 0, nr);
16042 return 0;
16043 }
16044
16045 static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result))
16046 set_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
16047 {
16048 if (ufdset)
16049 return copy_to_user(ufdset, fdset, ((((nr)+(8*sizeof(long))-1)/(8*sizeof(long)))*sizeof(long)));
16050 return 0;
16051 }
16052
16053 static inline __attribute__((always_inline))
16054 void zero_fd_set(unsigned long nr, unsigned long *fdset)
16055 {
16056 memset(fdset, 0, ((((nr)+(8*sizeof(long))-1)/(8*sizeof(long)))*sizeof(long)));
16057 }
16058
16059
16060
16061 extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time);
16062 extern int do_sys_poll(struct pollfd * ufds, unsigned int nfds,
16063 struct timespec *end_time);
16064 extern int core_sys_select(int n, fd_set *inp, fd_set *outp,
16065 fd_set *exp, struct timespec *end_time);
16066
16067 extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec);
16068 # 30 "kernel/trace/trace.c" 2
16069
16070
16071 # 1 "include/linux/kprobes.h" 1
16072 # 261 "include/linux/kprobes.h"
16073 struct jprobe;
16074 struct kretprobe;
16075
16076 static inline __attribute__((always_inline)) struct kprobe *get_kprobe(void *addr)
16077 {
16078 return ((void *)0);
16079 }
16080 static inline __attribute__((always_inline)) struct kprobe *kprobe_running(void)
16081 {
16082 return ((void *)0);
16083 }
16084 static inline __attribute__((always_inline)) int register_kprobe(struct kprobe *p)
16085 {
16086 return -38;
16087 }
16088 static inline __attribute__((always_inline)) int register_kprobes(struct kprobe **kps, int num)
16089 {
16090 return -38;
16091 }
16092 static inline __attribute__((always_inline)) void unregister_kprobe(struct kprobe *p)
16093 {
16094 }
16095 static inline __attribute__((always_inline)) void unregister_kprobes(struct kprobe **kps, int num)
16096 {
16097 }
16098 static inline __attribute__((always_inline)) int register_jprobe(struct jprobe *p)
16099 {
16100 return -38;
16101 }
16102 static inline __attribute__((always_inline)) int register_jprobes(struct jprobe **jps, int num)
16103 {
16104 return -38;
16105 }
16106 static inline __attribute__((always_inline)) void unregister_jprobe(struct jprobe *p)
16107 {
16108 }
16109 static inline __attribute__((always_inline)) void unregister_jprobes(struct jprobe **jps, int num)
16110 {
16111 }
16112 static inline __attribute__((always_inline)) void jprobe_return(void)
16113 {
16114 }
16115 static inline __attribute__((always_inline)) int register_kretprobe(struct kretprobe *rp)
16116 {
16117 return -38;
16118 }
16119 static inline __attribute__((always_inline)) int register_kretprobes(struct kretprobe **rps, int num)
16120 {
16121 return -38;
16122 }
16123 static inline __attribute__((always_inline)) void unregister_kretprobe(struct kretprobe *rp)
16124 {
16125 }
16126 static inline __attribute__((always_inline)) void unregister_kretprobes(struct kretprobe **rps, int num)
16127 {
16128 }
16129 static inline __attribute__((always_inline)) void kprobe_flush_task(struct task_struct *tk)
16130 {
16131 }
16132 # 33 "kernel/trace/trace.c" 2
16133 # 1 "include/linux/writeback.h" 1
16134 # 10 "include/linux/writeback.h"
16135 struct backing_dev_info;
16136
16137 extern spinlock_t inode_lock;
16138 extern struct list_head inode_in_use;
16139 extern struct list_head inode_unused;
16140
16141
16142
16143
16144
16145 static inline __attribute__((always_inline)) int task_is_pdflush(struct task_struct *task)
16146 {
16147 return task->flags & 0x00001000;
16148 }
16149
16150
16151
16152
16153
16154
16155 enum writeback_sync_modes {
16156 WB_SYNC_NONE,
16157 WB_SYNC_ALL,
16158 WB_SYNC_HOLD,
16159 };
16160
16161
16162
16163
16164
16165
16166 struct writeback_control {
16167 struct backing_dev_info *bdi;
16168
16169 enum writeback_sync_modes sync_mode;
16170 unsigned long *older_than_this;
16171
16172 long nr_to_write;
16173
16174 long pages_skipped;
16175
16176
16177
16178
16179
16180
16181 loff_t range_start;
16182 loff_t range_end;
16183
16184 unsigned nonblocking:1;
16185 unsigned encountered_congestion:1;
16186 unsigned for_kupdate:1;
16187 unsigned for_reclaim:1;
16188 unsigned for_writepages:1;
16189 unsigned range_cyclic:1;
16190 unsigned more_io:1;
16191 # 74 "include/linux/writeback.h"
16192 unsigned no_nrwrite_index_update:1;
16193 };
16194
16195
16196
16197
16198 void writeback_inodes(struct writeback_control *wbc);
16199 int inode_wait(void *);
16200 void sync_inodes_sb(struct super_block *, int wait);
16201 void sync_inodes(int wait);
16202
16203
16204 static inline __attribute__((always_inline)) void wait_on_inode(struct inode *inode)
16205 {
16206 do { do { } while (0); } while (0);
16207 wait_on_bit(&inode->i_state, 7, inode_wait,
16208 2);
16209 }
16210 static inline __attribute__((always_inline)) void inode_sync_wait(struct inode *inode)
16211 {
16212 do { do { } while (0); } while (0);
16213 wait_on_bit(&inode->i_state, 8, inode_wait,
16214 2);
16215 }
16216
16217
16218
16219
16220
16221 int wakeup_pdflush(long nr_pages);
16222 void laptop_io_completion(void);
16223 void laptop_sync_completion(void);
16224 void throttle_vm_writeout(gfp_t gfp_mask);
16225
16226
16227 extern int dirty_background_ratio;
16228 extern int vm_dirty_ratio;
16229 extern int dirty_writeback_interval;
16230 extern int dirty_expire_interval;
16231 extern int vm_highmem_is_dirtyable;
16232 extern int block_dump;
16233 extern int laptop_mode;
16234
16235 extern unsigned long determine_dirtyable_memory(void);
16236
16237 extern int dirty_ratio_handler(struct ctl_table *table, int write,
16238 struct file *filp, void *buffer, size_t *lenp,
16239 loff_t *ppos);
16240
16241 struct ctl_table;
16242 struct file;
16243 int dirty_writeback_centisecs_handler(struct ctl_table *, int, struct file *,
16244 void *, size_t *, loff_t *);
16245
16246 void get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
16247 struct backing_dev_info *bdi);
16248
16249 void page_writeback_init(void);
16250 void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
16251 unsigned long nr_pages_dirtied);
16252
16253 static inline __attribute__((always_inline)) void
16254 balance_dirty_pages_ratelimited(struct address_space *mapping)
16255 {
16256 balance_dirty_pages_ratelimited_nr(mapping, 1);
16257 }
16258
16259 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
16260 void *data);
16261
16262 int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0);
16263 int generic_writepages(struct address_space *mapping,
16264 struct writeback_control *wbc);
16265 int write_cache_pages(struct address_space *mapping,
16266 struct writeback_control *wbc, writepage_t writepage,
16267 void *data);
16268 int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
16269 int sync_page_range(struct inode *inode, struct address_space *mapping,
16270 loff_t pos, loff_t count);
16271 int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
16272 loff_t pos, loff_t count);
16273 void set_page_dirty_balance(struct page *page, int page_mkwrite);
16274 void writeback_set_ratelimit(void);
16275
16276
16277 extern int nr_pdflush_threads;
16278 # 34 "kernel/trace/trace.c" 2
16279
16280 # 1 "include/linux/stacktrace.h" 1
16281
16282
16283
16284 struct task_struct;
16285 # 36 "kernel/trace/trace.c" 2
16286 # 1 "include/linux/ring_buffer.h" 1
16287
16288
16289
16290
16291
16292
16293 struct ring_buffer;
16294 struct ring_buffer_iter;
16295
16296
16297
16298
16299 struct ring_buffer_event {
16300 u32 type:2, len:3, time_delta:27;
16301 u32 array[];
16302 };
16303 # 43 "include/linux/ring_buffer.h"
16304 enum ring_buffer_type {
16305 RINGBUF_TYPE_PADDING,
16306 RINGBUF_TYPE_TIME_EXTEND,
16307
16308 RINGBUF_TYPE_TIME_STAMP,
16309 RINGBUF_TYPE_DATA,
16310 };
16311
16312 unsigned ring_buffer_event_length(struct ring_buffer_event *event);
16313 void *ring_buffer_event_data(struct ring_buffer_event *event);
16314
16315
16316
16317
16318
16319
16320
16321 static inline __attribute__((always_inline)) unsigned
16322 ring_buffer_event_time_delta(struct ring_buffer_event *event)
16323 {
16324 return event->time_delta;
16325 }
16326
16327
16328
16329
16330 struct ring_buffer *
16331 ring_buffer_alloc(unsigned long size, unsigned flags);
16332 void ring_buffer_free(struct ring_buffer *buffer);
16333
16334 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
16335
16336 struct ring_buffer_event *
16337 ring_buffer_lock_reserve(struct ring_buffer *buffer,
16338 unsigned long length,
16339 unsigned long *flags);
16340 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
16341 struct ring_buffer_event *event,
16342 unsigned long flags);
16343 int ring_buffer_write(struct ring_buffer *buffer,
16344 unsigned long length, void *data);
16345
16346 struct ring_buffer_event *
16347 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts);
16348 struct ring_buffer_event *
16349 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts);
16350
16351 struct ring_buffer_iter *
16352 ring_buffer_read_start(struct ring_buffer *buffer, int cpu);
16353 void ring_buffer_read_finish(struct ring_buffer_iter *iter);
16354
16355 struct ring_buffer_event *
16356 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
16357 struct ring_buffer_event *
16358 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
16359 void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
16360 int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
16361
16362 unsigned long ring_buffer_size(struct ring_buffer *buffer);
16363
16364 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
16365 void ring_buffer_reset(struct ring_buffer *buffer);
16366
16367 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
16368 struct ring_buffer *buffer_b, int cpu);
16369
16370 int ring_buffer_empty(struct ring_buffer *buffer);
16371 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
16372
16373 void ring_buffer_record_disable(struct ring_buffer *buffer);
16374 void ring_buffer_record_enable(struct ring_buffer *buffer);
16375 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
16376 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
16377
16378 unsigned long ring_buffer_entries(struct ring_buffer *buffer);
16379 unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
16380
16381 u64 ring_buffer_time_stamp(int cpu);
16382 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
16383
16384 void tracing_on(void);
16385 void tracing_off(void);
16386
16387 enum ring_buffer_flags {
16388 RB_FL_OVERWRITE = 1 << 0,
16389 };
16390 # 37 "kernel/trace/trace.c" 2
16391 # 1 "include/linux/irqflags.h" 1
16392 # 38 "kernel/trace/trace.c" 2
16393
16394 # 1 "kernel/trace/trace.h" 1
16395
16396
16397
16398
16399
16400
16401 # 1 "include/linux/clocksource.h" 1
16402 # 17 "include/linux/clocksource.h"
16403 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/div64.h" 1
16404 # 18 "include/linux/clocksource.h" 2
16405
16406
16407
16408 typedef u64 cycle_t;
16409 struct clocksource;
16410 # 57 "include/linux/clocksource.h"
16411 struct clocksource {
16412
16413
16414
16415 char *name;
16416 struct list_head list;
16417 int rating;
16418 cycle_t (*read)(void);
16419 cycle_t mask;
16420 u32 mult;
16421 u32 mult_orig;
16422 u32 shift;
16423 unsigned long flags;
16424 cycle_t (*vread)(void);
16425 void (*resume)(void);
16426 # 80 "include/linux/clocksource.h"
16427 cycle_t cycle_interval;
16428 u64 xtime_interval;
16429 u32 raw_interval;
16430
16431
16432
16433
16434
16435 cycle_t cycle_last ;
16436 u64 xtime_nsec;
16437 s64 error;
16438 struct timespec raw_time;
16439
16440
16441
16442
16443
16444
16445 };
16446
16447 extern struct clocksource *clock;
16448 # 122 "include/linux/clocksource.h"
16449 static inline __attribute__((always_inline)) u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
16450 {
16451
16452
16453
16454
16455
16456
16457
16458 u64 tmp = ((u64)1000000) << shift_constant;
16459
16460 tmp += khz/2;
16461 ({ uint32_t __base = (khz); uint32_t __rem; (void)(((typeof((tmp)) *)0) == ((uint64_t *)0)); if (__builtin_expect(!!(((tmp) >> 32) == 0), 1)) { __rem = (uint32_t)(tmp) % __base; (tmp) = (uint32_t)(tmp) / __base; } else __rem = __div64_32(&(tmp), __base); __rem; });
16462
16463 return (u32)tmp;
16464 }
16465 # 148 "include/linux/clocksource.h"
16466 static inline __attribute__((always_inline)) u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
16467 {
16468
16469
16470
16471
16472
16473
16474
16475 u64 tmp = ((u64)1000000000) << shift_constant;
16476
16477 tmp += hz/2;
16478 ({ uint32_t __base = (hz); uint32_t __rem; (void)(((typeof((tmp)) *)0) == ((uint64_t *)0)); if (__builtin_expect(!!(((tmp) >> 32) == 0), 1)) { __rem = (uint32_t)(tmp) % __base; (tmp) = (uint32_t)(tmp) / __base; } else __rem = __div64_32(&(tmp), __base); __rem; });
16479
16480 return (u32)tmp;
16481 }
16482
16483
16484
16485
16486
16487
16488
16489 static inline __attribute__((always_inline)) cycle_t clocksource_read(struct clocksource *cs)
16490 {
16491 return cs->read();
16492 }
16493 # 185 "include/linux/clocksource.h"
16494 static inline __attribute__((always_inline)) s64 cyc2ns(struct clocksource *cs, cycle_t cycles)
16495 {
16496 u64 ret = (u64)cycles;
16497 ret = (ret * cs->mult) >> cs->shift;
16498 return ret;
16499 }
16500 # 203 "include/linux/clocksource.h"
16501 static inline __attribute__((always_inline)) void clocksource_calculate_interval(struct clocksource *c,
16502 unsigned long length_nsec)
16503 {
16504 u64 tmp;
16505
16506
16507 tmp = length_nsec;
16508 tmp <<= c->shift;
16509 tmp += c->mult_orig/2;
16510 ({ uint32_t __base = (c->mult_orig); uint32_t __rem; (void)(((typeof((tmp)) *)0) == ((uint64_t *)0)); if (__builtin_expect(!!(((tmp) >> 32) == 0), 1)) { __rem = (uint32_t)(tmp) % __base; (tmp) = (uint32_t)(tmp) / __base; } else __rem = __div64_32(&(tmp), __base); __rem; });
16511
16512 c->cycle_interval = (cycle_t)tmp;
16513 if (c->cycle_interval == 0)
16514 c->cycle_interval = 1;
16515
16516
16517 c->xtime_interval = (u64)c->cycle_interval * c->mult;
16518 c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift;
16519 }
16520
16521
16522
16523 extern int clocksource_register(struct clocksource*);
16524 extern void clocksource_unregister(struct clocksource*);
16525 extern void clocksource_touch_watchdog(void);
16526 extern struct clocksource* clocksource_get_next(void);
16527 extern void clocksource_change_rating(struct clocksource *cs, int rating);
16528 extern void clocksource_resume(void);
16529
16530
16531
16532
16533
16534 static inline __attribute__((always_inline)) void update_vsyscall(struct timespec *ts, struct clocksource *c)
16535 {
16536 }
16537
16538 static inline __attribute__((always_inline)) void update_vsyscall_tz(void)
16539 {
16540 }
16541 # 8 "kernel/trace/trace.h" 2
16542
16543 # 1 "include/linux/mmiotrace.h" 1
16544
16545
16546
16547
16548
16549
16550 struct kmmio_probe;
16551 struct pt_regs;
16552
16553 typedef void (*kmmio_pre_handler_t)(struct kmmio_probe *,
16554 struct pt_regs *, unsigned long addr);
16555 typedef void (*kmmio_post_handler_t)(struct kmmio_probe *,
16556 unsigned long condition, struct pt_regs *);
16557
16558 struct kmmio_probe {
16559 struct list_head list;
16560 unsigned long addr;
16561 unsigned long len;
16562 kmmio_pre_handler_t pre_handler;
16563 kmmio_post_handler_t post_handler;
16564 void *private;
16565 };
16566
16567
16568 static inline __attribute__((always_inline)) int is_kmmio_active(void)
16569 {
16570 extern unsigned int kmmio_count;
16571 return kmmio_count;
16572 }
16573
16574 extern int register_kmmio_probe(struct kmmio_probe *p);
16575 extern void unregister_kmmio_probe(struct kmmio_probe *p);
16576
16577
16578 extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
16579 # 47 "include/linux/mmiotrace.h"
16580 static inline __attribute__((always_inline)) void mmiotrace_ioremap(resource_size_t offset,
16581 unsigned long size, void *addr)
16582 {
16583 }
16584
16585 static inline __attribute__((always_inline)) void mmiotrace_iounmap(volatile void *addr)
16586 {
16587 }
16588
16589 static inline __attribute__((always_inline)) int mmiotrace_printk(const char *fmt, ...)
16590 __attribute__ ((format (printf, 1, 0)));
16591
16592 static inline __attribute__((always_inline)) int mmiotrace_printk(const char *fmt, ...)
16593 {
16594 return 0;
16595 }
16596
16597
16598 enum mm_io_opcode {
16599 MMIO_READ = 0x1,
16600 MMIO_WRITE = 0x2,
16601 MMIO_PROBE = 0x3,
16602 MMIO_UNPROBE = 0x4,
16603 MMIO_UNKNOWN_OP = 0x5,
16604 };
16605
16606 struct mmiotrace_rw {
16607 resource_size_t phys;
16608 unsigned long value;
16609 unsigned long pc;
16610 int map_id;
16611 unsigned char opcode;
16612 unsigned char width;
16613 };
16614
16615 struct mmiotrace_map {
16616 resource_size_t phys;
16617 unsigned long virt;
16618 unsigned long len;
16619 int map_id;
16620 unsigned char opcode;
16621 };
16622
16623
16624 extern void enable_mmiotrace(void);
16625 extern void disable_mmiotrace(void);
16626 extern void mmio_trace_rw(struct mmiotrace_rw *rw);
16627 extern void mmio_trace_mapping(struct mmiotrace_map *map);
16628 extern int mmio_trace_printk(const char *fmt, va_list args);
16629 # 10 "kernel/trace/trace.h" 2
16630
16631
16632 enum trace_type {
16633 __TRACE_FIRST_TYPE = 0,
16634
16635 TRACE_FN,
16636 TRACE_CTX,
16637 TRACE_WAKE,
16638 TRACE_CONT,
16639 TRACE_STACK,
16640 TRACE_PRINT,
16641 TRACE_SPECIAL,
16642 TRACE_MMIO_RW,
16643 TRACE_MMIO_MAP,
16644 TRACE_BOOT,
16645
16646 __TRACE_LAST_TYPE
16647 };
16648
16649
16650
16651
16652
16653
16654
16655 struct trace_entry {
16656 unsigned char type;
16657 unsigned char cpu;
16658 unsigned char flags;
16659 unsigned char preempt_count;
16660 int pid;
16661 };
16662
16663
16664
16665
16666 struct ftrace_entry {
16667 struct trace_entry ent;
16668 unsigned long ip;
16669 unsigned long parent_ip;
16670 };
16671 extern struct tracer boot_tracer;
16672
16673
16674
16675
16676 struct ctx_switch_entry {
16677 struct trace_entry ent;
16678 unsigned int prev_pid;
16679 unsigned char prev_prio;
16680 unsigned char prev_state;
16681 unsigned int next_pid;
16682 unsigned char next_prio;
16683 unsigned char next_state;
16684 unsigned int next_cpu;
16685 };
16686
16687
16688
16689
16690 struct special_entry {
16691 struct trace_entry ent;
16692 unsigned long arg1;
16693 unsigned long arg2;
16694 unsigned long arg3;
16695 };
16696
16697
16698
16699
16700
16701
16702
16703 struct stack_entry {
16704 struct trace_entry ent;
16705 unsigned long caller[8];
16706 };
16707
16708
16709
16710
16711 struct print_entry {
16712 struct trace_entry ent;
16713 unsigned long ip;
16714 char buf[];
16715 };
16716
16717
16718
16719 struct trace_field_cont {
16720 unsigned char type;
16721
16722 char buf[88 - 1];
16723 };
16724
16725 struct trace_mmiotrace_rw {
16726 struct trace_entry ent;
16727 struct mmiotrace_rw rw;
16728 };
16729
16730 struct trace_mmiotrace_map {
16731 struct trace_entry ent;
16732 struct mmiotrace_map map;
16733 };
16734
16735 struct trace_boot {
16736 struct trace_entry ent;
16737 struct boot_trace initcall;
16738 };
16739 # 130 "kernel/trace/trace.h"
16740 enum trace_flag_type {
16741 TRACE_FLAG_IRQS_OFF = 0x01,
16742 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
16743 TRACE_FLAG_NEED_RESCHED = 0x04,
16744 TRACE_FLAG_HARDIRQ = 0x08,
16745 TRACE_FLAG_SOFTIRQ = 0x10,
16746 TRACE_FLAG_CONT = 0x20,
16747 };
16748 # 146 "kernel/trace/trace.h"
16749 struct trace_array_cpu {
16750 atomic_t disabled;
16751
16752
16753 unsigned long trace_idx;
16754 unsigned long overrun;
16755 unsigned long saved_latency;
16756 unsigned long critical_start;
16757 unsigned long critical_end;
16758 unsigned long critical_sequence;
16759 unsigned long nice;
16760 unsigned long policy;
16761 unsigned long rt_priority;
16762 cycle_t preempt_timestamp;
16763 pid_t pid;
16764 uid_t uid;
16765 char comm[16];
16766 };
16767
16768 struct trace_iterator;
16769
16770
16771
16772
16773
16774
16775 struct trace_array {
16776 struct ring_buffer *buffer;
16777 unsigned long entries;
16778 long ctrl;
16779 int cpu;
16780 cycle_t time_start;
16781 struct task_struct *waiter;
16782 struct trace_array_cpu *data[1];
16783 };
16784 # 194 "kernel/trace/trace.h"
16785 extern void __ftrace_bad_type(void);
16786 # 226 "kernel/trace/trace.h"
16787 enum print_line_t {
16788 TRACE_TYPE_PARTIAL_LINE = 0,
16789 TRACE_TYPE_HANDLED = 1,
16790 TRACE_TYPE_UNHANDLED = 2
16791 };
16792
16793
16794
16795
16796 struct tracer {
16797 const char *name;
16798 void (*init)(struct trace_array *tr);
16799 void (*reset)(struct trace_array *tr);
16800 void (*open)(struct trace_iterator *iter);
16801 void (*pipe_open)(struct trace_iterator *iter);
16802 void (*close)(struct trace_iterator *iter);
16803 void (*start)(struct trace_iterator *iter);
16804 void (*stop)(struct trace_iterator *iter);
16805 ssize_t (*read)(struct trace_iterator *iter,
16806 struct file *filp, char *ubuf,
16807 size_t cnt, loff_t *ppos);
16808 void (*ctrl_update)(struct trace_array *tr);
16809
16810
16811
16812
16813 enum print_line_t (*print_line)(struct trace_iterator *iter);
16814 struct tracer *next;
16815 int print_max;
16816 };
16817
16818 struct trace_seq {
16819 unsigned char buffer[(1UL << 12)];
16820 unsigned int len;
16821 unsigned int readpos;
16822 };
16823
16824
16825
16826
16827
16828 struct trace_iterator {
16829 struct trace_array *tr;
16830 struct tracer *trace;
16831 void *private;
16832 struct ring_buffer_iter *buffer_iter[1];
16833
16834
16835 struct trace_seq seq;
16836 struct trace_entry *ent;
16837 int cpu;
16838 u64 ts;
16839
16840 unsigned long iter_flags;
16841 loff_t pos;
16842 long idx;
16843 };
16844
16845 void trace_wake_up(void);
16846 void tracing_reset(struct trace_array *tr, int cpu);
16847 int tracing_open_generic(struct inode *inode, struct file *filp);
16848 struct dentry *tracing_init_dentry(void);
16849 void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
16850
16851 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
16852 struct trace_array_cpu *data);
16853 void tracing_generic_entry_update(struct trace_entry *entry,
16854 unsigned long flags,
16855 int pc);
16856
16857 void ftrace(struct trace_array *tr,
16858 struct trace_array_cpu *data,
16859 unsigned long ip,
16860 unsigned long parent_ip,
16861 unsigned long flags, int pc);
16862 void tracing_sched_switch_trace(struct trace_array *tr,
16863 struct trace_array_cpu *data,
16864 struct task_struct *prev,
16865 struct task_struct *next,
16866 unsigned long flags, int pc);
16867 void tracing_record_cmdline(struct task_struct *tsk);
16868
16869 void tracing_sched_wakeup_trace(struct trace_array *tr,
16870 struct trace_array_cpu *data,
16871 struct task_struct *wakee,
16872 struct task_struct *cur,
16873 unsigned long flags, int pc);
16874 void trace_special(struct trace_array *tr,
16875 struct trace_array_cpu *data,
16876 unsigned long arg1,
16877 unsigned long arg2,
16878 unsigned long arg3, int pc);
16879 void trace_function(struct trace_array *tr,
16880 struct trace_array_cpu *data,
16881 unsigned long ip,
16882 unsigned long parent_ip,
16883 unsigned long flags, int pc);
16884
16885 void tracing_start_cmdline_record(void);
16886 void tracing_stop_cmdline_record(void);
16887 int register_tracer(struct tracer *type);
16888 void unregister_tracer(struct tracer *type);
16889
16890 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
16891
16892 extern unsigned long tracing_max_latency;
16893 extern unsigned long tracing_thresh;
16894
16895 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
16896 void update_max_tr_single(struct trace_array *tr,
16897 struct task_struct *tsk, int cpu);
16898
16899 extern cycle_t ftrace_now(int cpu);
16900 # 349 "kernel/trace/trace.h"
16901 typedef void
16902 (*tracer_switch_func_t)(void *private,
16903 void *__rq,
16904 struct task_struct *prev,
16905 struct task_struct *next);
16906
16907 struct tracer_switch_ops {
16908 tracer_switch_func_t func;
16909 void *private;
16910 struct tracer_switch_ops *next;
16911 };
16912 # 388 "kernel/trace/trace.h"
16913 extern void *head_page(struct trace_array_cpu *data);
16914 extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
16915 extern void trace_seq_print_cont(struct trace_seq *s,
16916 struct trace_iterator *iter);
16917 extern ssize_t trace_seq_to_user(struct trace_seq *s, char *ubuf,
16918 size_t cnt);
16919 extern long ns2usecs(cycle_t nsec);
16920 extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
16921
16922 extern unsigned long trace_flags;
16923 # 406 "kernel/trace/trace.h"
16924 enum trace_iterator_flags {
16925 TRACE_ITER_PRINT_PARENT = 0x01,
16926 TRACE_ITER_SYM_OFFSET = 0x02,
16927 TRACE_ITER_SYM_ADDR = 0x04,
16928 TRACE_ITER_VERBOSE = 0x08,
16929 TRACE_ITER_RAW = 0x10,
16930 TRACE_ITER_HEX = 0x20,
16931 TRACE_ITER_BIN = 0x40,
16932 TRACE_ITER_BLOCK = 0x80,
16933 TRACE_ITER_STACKTRACE = 0x100,
16934 TRACE_ITER_SCHED_TREE = 0x200,
16935 TRACE_ITER_PRINTK = 0x400,
16936 };
16937
16938 extern struct tracer nop_trace;
16939 # 40 "kernel/trace/trace.c" 2
16940
16941
16942
16943 unsigned long tracing_max_latency = (cycle_t)(~0UL);
16944 unsigned long tracing_thresh;
16945
16946 static __typeof__(local_t) per_cpu__ftrace_cpu_disabled;
16947
16948 static inline __attribute__((always_inline)) void ftrace_disable_cpu(void)
16949 {
16950 do { } while (0);
16951 atomic_long_inc(&(&per_cpu__ftrace_cpu_disabled)->a);
16952 }
16953
16954 static inline __attribute__((always_inline)) void ftrace_enable_cpu(void)
16955 {
16956 atomic_long_dec(&(&per_cpu__ftrace_cpu_disabled)->a);
16957 do { } while (0);
16958 }
16959
16960 static cpumask_t tracing_buffer_mask;
16961
16962
16963
16964
16965 static int tracing_disabled = 1;
16966
16967 long
16968 ns2usecs(cycle_t nsec)
16969 {
16970 nsec += 500;
16971 ({ uint32_t __base = (1000); uint32_t __rem; (void)(((typeof((nsec)) *)0) == ((uint64_t *)0)); if (__builtin_expect(!!(((nsec) >> 32) == 0), 1)) { __rem = (uint32_t)(nsec) % __base; (nsec) = (uint32_t)(nsec) / __base; } else __rem = __div64_32(&(nsec), __base); __rem; });
16972 return nsec;
16973 }
16974
16975 cycle_t ftrace_now(int cpu)
16976 {
16977 u64 ts = ring_buffer_time_stamp(cpu);
16978 ring_buffer_normalize_time_stamp(cpu, &ts);
16979 return ts;
16980 }
16981 # 94 "kernel/trace/trace.c"
16982 static struct trace_array global_trace;
16983
16984 static __typeof__(struct trace_array_cpu) per_cpu__global_trace_cpu;
16985 # 108 "kernel/trace/trace.c"
16986 static struct trace_array max_tr;
16987
16988 static __typeof__(struct trace_array_cpu) per_cpu__max_data;
16989
16990
16991 static int tracer_enabled = 1;
16992
16993
16994 int ftrace_function_enabled;
16995 # 130 "kernel/trace/trace.c"
16996 static unsigned long trace_buf_size = 1441792UL;
16997
16998
16999 static struct tracer *trace_types ;
17000
17001
17002 static struct tracer *current_trace ;
17003
17004
17005
17006
17007
17008
17009 static int max_tracer_type_len;
17010
17011
17012
17013
17014
17015
17016
17017 static struct mutex trace_types_lock = { .count = { (1) } , .wait_lock = (spinlock_t) { .raw_lock = { 1 }, .magic = 0xdead4ead, .owner = ((void *)-1L), .owner_cpu = -1, } , .wait_list = { &(trace_types_lock.wait_list), &(trace_types_lock.wait_list) } , .magic = &trace_types_lock };
17018
17019
17020 static wait_queue_head_t trace_wait = { .lock = (spinlock_t) { .raw_lock = { 1 }, .magic = 0xdead4ead, .owner = ((void *)-1L), .owner_cpu = -1, }, .task_list = { &(trace_wait).task_list, &(trace_wait).task_list } };
17021
17022
17023 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
17024
17025
17026
17027
17028
17029
17030
17031 void trace_wake_up(void)
17032 {
17033
17034
17035
17036
17037 if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
17038 __wake_up(&trace_wait, (1 | 2), 1, ((void *)0));
17039 }
17040
17041 static int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) set_buf_size(char *str)
17042 {
17043 unsigned long buf_size;
17044 int ret;
17045
17046 if (!str)
17047 return 0;
17048 ret = strict_strtoul(str, 0, &buf_size);
17049
17050 if (ret < 0 || buf_size == 0)
17051 return 0;
17052 trace_buf_size = buf_size;
17053 return 1;
17054 }
17055 static char __setup_str_set_buf_size[] __attribute__ ((__section__(".init.data"))) __attribute__((aligned(1))) = "trace_buf_size="; static struct obs_kernel_param __setup_set_buf_size __attribute__((__used__)) __attribute__ ((__section__(".init.setup"))) __attribute__((aligned((sizeof(long))))) = { __setup_str_set_buf_size, set_buf_size, 0 };
17056
17057 unsigned long nsecs_to_usecs(unsigned long nsecs)
17058 {
17059 return nsecs / 1000;
17060 }
17061 # 204 "kernel/trace/trace.c"
17062 static const char *trace_options[] = {
17063 "print-parent",
17064 "sym-offset",
17065 "sym-addr",
17066 "verbose",
17067 "raw",
17068 "hex",
17069 "bin",
17070 "block",
17071 "stacktrace",
17072 "sched-tree",
17073 "ftrace_printk",
17074 ((void *)0)
17075 };
17076 # 228 "kernel/trace/trace.c"
17077 static raw_spinlock_t ftrace_max_lock =
17078 (raw_spinlock_t){ 1 };
17079
17080
17081
17082
17083
17084
17085 static void
17086 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
17087 {
17088 struct trace_array_cpu *data = tr->data[cpu];
17089
17090 max_tr.cpu = cpu;
17091 max_tr.time_start = data->preempt_timestamp;
17092
17093 data = max_tr.data[cpu];
17094 data->saved_latency = tracing_max_latency;
17095
17096 memcpy(data->comm, tsk->comm, 16);
17097 data->pid = tsk->pid;
17098 data->uid = tsk->uid;
17099 data->nice = tsk->static_prio - 20 - 100;
17100 data->policy = tsk->policy;
17101 data->rt_priority = tsk->rt_priority;
17102
17103
17104 tracing_record_cmdline((get_current()));
17105 }
17106 # 269 "kernel/trace/trace.c"
17107 int
17108 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
17109 {
17110 int len = ((1UL << 12) - 1) - s->len;
17111 va_list ap;
17112 int ret;
17113
17114 if (!len)
17115 return 0;
17116
17117 __builtin_va_start(ap,fmt);
17118 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
17119 __builtin_va_end(ap);
17120
17121
17122 if (ret >= len)
17123 return 0;
17124
17125 s->len += ret;
17126
17127 return len;
17128 }
17129 # 302 "kernel/trace/trace.c"
17130 static int
17131 trace_seq_puts(struct trace_seq *s, const char *str)
17132 {
17133 int len = strlen(str);
17134
17135 if (len > (((1UL << 12) - 1) - s->len))
17136 return 0;
17137
17138 memcpy(s->buffer + s->len, str, len);
17139 s->len += len;
17140
17141 return len;
17142 }
17143
17144 static int
17145 trace_seq_putc(struct trace_seq *s, unsigned char c)
17146 {
17147 if (s->len >= ((1UL << 12) - 1))
17148 return 0;
17149
17150 s->buffer[s->len++] = c;
17151
17152 return 1;
17153 }
17154
17155 static int
17156 trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
17157 {
17158 if (len > (((1UL << 12) - 1) - s->len))
17159 return 0;
17160
17161 memcpy(s->buffer + s->len, mem, len);
17162 s->len += len;
17163
17164 return len;
17165 }
17166
17167
17168
17169
17170 static int
17171 trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
17172 {
17173 unsigned char hex[(8*2 + 1)];
17174 unsigned char *data = mem;
17175 int i, j;
17176
17177
17178
17179
17180 for (i = len-1, j = 0; i >= 0; i--) {
17181
17182 hex[j++] = hex_asc[((data[i]) & 0xf0) >> 4];
17183 hex[j++] = hex_asc[((data[i]) & 0x0f)];
17184 }
17185 hex[j++] = ' ';
17186
17187 return trace_seq_putmem(s, hex, j);
17188 }
17189
17190 static void
17191 trace_seq_reset(struct trace_seq *s)
17192 {
17193 s->len = 0;
17194 s->readpos = 0;
17195 }
17196
17197 ssize_t trace_seq_to_user(struct trace_seq *s, char *ubuf, size_t cnt)
17198 {
17199 int len;
17200 int ret;
17201
17202 if (s->len <= s->readpos)
17203 return -16;
17204
17205 len = s->len - s->readpos;
17206 if (cnt > len)
17207 cnt = len;
17208 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
17209 if (ret)
17210 return -14;
17211
17212 s->readpos += len;
17213 return cnt;
17214 }
17215
17216 static void
17217 trace_print_seq(struct seq_file *m, struct trace_seq *s)
17218 {
17219 int len = s->len >= (1UL << 12) ? (1UL << 12) - 1 : s->len;
17220
17221 s->buffer[len] = 0;
17222 seq_puts(m, s->buffer);
17223
17224 trace_seq_reset(s);
17225 }
17226 # 408 "kernel/trace/trace.c"
17227 void
17228 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
17229 {
17230 struct ring_buffer *buf = tr->buffer;
17231
17232 ({ static int __warned; int __ret_warn_once = !!(!({ unsigned long flags; __asm__ __volatile__( "cli %0;" "sti %0;" : "=d" (flags) ); !(((flags) & ~0x3f) != 0); })); if (__builtin_expect(!!(__ret_warn_once), 0)) if (({ int __ret_warn_on = !!(!__warned); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 413); __builtin_expect(!!(__ret_warn_on), 0); })) __warned = 1; __builtin_expect(!!(__ret_warn_once), 0); });
17233 __raw_spin_lock(&ftrace_max_lock);
17234
17235 tr->buffer = max_tr.buffer;
17236 max_tr.buffer = buf;
17237
17238 ftrace_disable_cpu();
17239 ring_buffer_reset(tr->buffer);
17240 ftrace_enable_cpu();
17241
17242 __update_max_tr(tr, tsk, cpu);
17243 __raw_spin_unlock(&ftrace_max_lock);
17244 }
17245 # 435 "kernel/trace/trace.c"
17246 void
17247 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
17248 {
17249 int ret;
17250
17251 ({ static int __warned; int __ret_warn_once = !!(!({ unsigned long flags; __asm__ __volatile__( "cli %0;" "sti %0;" : "=d" (flags) ); !(((flags) & ~0x3f) != 0); })); if (__builtin_expect(!!(__ret_warn_once), 0)) if (({ int __ret_warn_on = !!(!__warned); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 440); __builtin_expect(!!(__ret_warn_on), 0); })) __warned = 1; __builtin_expect(!!(__ret_warn_once), 0); });
17252 __raw_spin_lock(&ftrace_max_lock);
17253
17254 ftrace_disable_cpu();
17255
17256 ring_buffer_reset(max_tr.buffer);
17257 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
17258
17259 ftrace_enable_cpu();
17260
17261 ({ static int __warned; int __ret_warn_once = !!(ret); if (__builtin_expect(!!(__ret_warn_once), 0)) if (({ int __ret_warn_on = !!(!__warned); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 450); __builtin_expect(!!(__ret_warn_on), 0); })) __warned = 1; __builtin_expect(!!(__ret_warn_once), 0); });
17262
17263 __update_max_tr(tr, tsk, cpu);
17264 __raw_spin_unlock(&ftrace_max_lock);
17265 }
17266
17267
17268
17269
17270
17271
17272
17273 int register_tracer(struct tracer *type)
17274 {
17275 struct tracer *t;
17276 int len;
17277 int ret = 0;
17278
17279 if (!type->name) {
17280 printk("<6>" "Tracer must have a name\n");
17281 return -1;
17282 }
17283
17284 mutex_lock(&trace_types_lock);
17285 for (t = trace_types; t; t = t->next) {
17286 if (strcmp(type->name, t->name) == 0) {
17287
17288 printk("<6>" "Trace %s already registered\n", type->name);
17289
17290 ret = -1;
17291 goto out;
17292 }
17293 }
17294 # 520 "kernel/trace/trace.c"
17295 type->next = trace_types;
17296 trace_types = type;
17297 len = strlen(type->name);
17298 if (len > max_tracer_type_len)
17299 max_tracer_type_len = len;
17300
17301 out:
17302 mutex_unlock(&trace_types_lock);
17303
17304 return ret;
17305 }
17306
17307 void unregister_tracer(struct tracer *type)
17308 {
17309 struct tracer **t;
17310 int len;
17311
17312 mutex_lock(&trace_types_lock);
17313 for (t = &trace_types; *t; t = &(*t)->next) {
17314 if (*t == type)
17315 goto found;
17316 }
17317 printk("<6>" "Trace %s not registered\n", type->name);
17318 goto out;
17319
17320 found:
17321 *t = (*t)->next;
17322 if (strlen(type->name) != max_tracer_type_len)
17323 goto out;
17324
17325 max_tracer_type_len = 0;
17326 for (t = &trace_types; *t; t = &(*t)->next) {
17327 len = strlen((*t)->name);
17328 if (len > max_tracer_type_len)
17329 max_tracer_type_len = len;
17330 }
17331 out:
17332 mutex_unlock(&trace_types_lock);
17333 }
17334
17335 void tracing_reset(struct trace_array *tr, int cpu)
17336 {
17337 ftrace_disable_cpu();
17338 ring_buffer_reset_cpu(tr->buffer, cpu);
17339 ftrace_enable_cpu();
17340 }
17341
17342
17343 static unsigned map_pid_to_cmdline[(1 ? 0x1000 : 0x8000)+1];
17344 static unsigned map_cmdline_to_pid[128];
17345 static char saved_cmdlines[128][16];
17346 static int cmdline_idx;
17347 static spinlock_t trace_cmdline_lock = (spinlock_t) { .raw_lock = { 1 }, .magic = 0xdead4ead, .owner = ((void *)-1L), .owner_cpu = -1, };
17348
17349
17350 atomic_t trace_record_cmdline_disabled ;
17351
17352 static void trace_init_cmdlines(void)
17353 {
17354 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
17355 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
17356 cmdline_idx = 0;
17357 }
17358
17359 void trace_stop_cmdline_recording(void);
17360
17361 static void trace_save_cmdline(struct task_struct *tsk)
17362 {
17363 unsigned map;
17364 unsigned idx;
17365
17366 if (!tsk->pid || __builtin_expect(!!(tsk->pid > (1 ? 0x1000 : 0x8000)), 0))
17367 return;
17368
17369
17370
17371
17372
17373
17374
17375 if (!(_spin_trylock(&trace_cmdline_lock)))
17376 return;
17377
17378 idx = map_pid_to_cmdline[tsk->pid];
17379 if (idx >= 128) {
17380 idx = (cmdline_idx + 1) % 128;
17381
17382 map = map_cmdline_to_pid[idx];
17383 if (map <= (1 ? 0x1000 : 0x8000))
17384 map_pid_to_cmdline[map] = (unsigned)-1;
17385
17386 map_pid_to_cmdline[tsk->pid] = idx;
17387
17388 cmdline_idx = idx;
17389 }
17390
17391 memcpy(&saved_cmdlines[idx], tsk->comm, 16);
17392
17393 _spin_unlock(&trace_cmdline_lock);
17394 }
17395
17396 static char *trace_find_cmdline(int pid)
17397 {
17398 char *cmdline = "<...>";
17399 unsigned map;
17400
17401 if (!pid)
17402 return "<idle>";
17403
17404 if (pid > (1 ? 0x1000 : 0x8000))
17405 goto out;
17406
17407 map = map_pid_to_cmdline[pid];
17408 if (map >= 128)
17409 goto out;
17410
17411 cmdline = saved_cmdlines[map];
17412
17413 out:
17414 return cmdline;
17415 }
17416
17417 void tracing_record_cmdline(struct task_struct *tsk)
17418 {
17419 if (((&trace_record_cmdline_disabled)->counter))
17420 return;
17421
17422 trace_save_cmdline(tsk);
17423 }
17424
17425 void
17426 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
17427 int pc)
17428 {
17429 struct task_struct *tsk = (get_current());
17430
17431 entry->preempt_count = pc & 0xff;
17432 entry->pid = (tsk) ? tsk->pid : 0;
17433 entry->flags =
17434
17435
17436
17437 TRACE_FLAG_IRQS_NOSUPPORT |
17438
17439 ((pc & (((1UL << (8))-1) << ((0 + 8) + 8))) ? TRACE_FLAG_HARDIRQ : 0) |
17440 ((pc & (((1UL << (8))-1) << (0 + 8))) ? TRACE_FLAG_SOFTIRQ : 0) |
17441 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
17442 }
17443
17444 void
17445 trace_function(struct trace_array *tr, struct trace_array_cpu *data,
17446 unsigned long ip, unsigned long parent_ip, unsigned long flags,
17447 int pc)
17448 {
17449 struct ring_buffer_event *event;
17450 struct ftrace_entry *entry;
17451 unsigned long irq_flags;
17452
17453
17454 if (__builtin_expect(!!(atomic_long_read(&(&per_cpu__ftrace_cpu_disabled)->a)), 0))
17455 return;
17456
17457 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
17458 &irq_flags);
17459 if (!event)
17460 return;
17461 entry = ring_buffer_event_data(event);
17462 tracing_generic_entry_update(&entry->ent, flags, pc);
17463 entry->ent.type = TRACE_FN;
17464 entry->ip = ip;
17465 entry->parent_ip = parent_ip;
17466 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
17467 }
17468
17469 void
17470 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
17471 unsigned long ip, unsigned long parent_ip, unsigned long flags,
17472 int pc)
17473 {
17474 if (__builtin_expect(!!(!((&data->disabled)->counter)), 1))
17475 trace_function(tr, data, ip, parent_ip, flags, pc);
17476 }
17477
17478 static void ftrace_trace_stack(struct trace_array *tr,
17479 struct trace_array_cpu *data,
17480 unsigned long flags,
17481 int skip, int pc)
17482 {
17483 # 735 "kernel/trace/trace.c"
17484 }
17485
17486 void __trace_stack(struct trace_array *tr,
17487 struct trace_array_cpu *data,
17488 unsigned long flags,
17489 int skip)
17490 {
17491 ftrace_trace_stack(tr, data, flags, skip, (current_thread_info()->preempt_count));
17492 }
17493
17494 static void
17495 ftrace_trace_special(void *__tr, void *__data,
17496 unsigned long arg1, unsigned long arg2, unsigned long arg3,
17497 int pc)
17498 {
17499 struct ring_buffer_event *event;
17500 struct trace_array_cpu *data = __data;
17501 struct trace_array *tr = __tr;
17502 struct special_entry *entry;
17503 unsigned long irq_flags;
17504
17505 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
17506 &irq_flags);
17507 if (!event)
17508 return;
17509 entry = ring_buffer_event_data(event);
17510 tracing_generic_entry_update(&entry->ent, 0, pc);
17511 entry->ent.type = TRACE_SPECIAL;
17512 entry->arg1 = arg1;
17513 entry->arg2 = arg2;
17514 entry->arg3 = arg3;
17515 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
17516 ftrace_trace_stack(tr, data, irq_flags, 4, pc);
17517
17518 trace_wake_up();
17519 }
17520
17521 void
17522 __trace_special(void *__tr, void *__data,
17523 unsigned long arg1, unsigned long arg2, unsigned long arg3)
17524 {
17525 ftrace_trace_special(__tr, __data, arg1, arg2, arg3, (current_thread_info()->preempt_count));
17526 }
17527
17528 void
17529 tracing_sched_switch_trace(struct trace_array *tr,
17530 struct trace_array_cpu *data,
17531 struct task_struct *prev,
17532 struct task_struct *next,
17533 unsigned long flags, int pc)
17534 {
17535 struct ring_buffer_event *event;
17536 struct ctx_switch_entry *entry;
17537 unsigned long irq_flags;
17538
17539 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
17540 &irq_flags);
17541 if (!event)
17542 return;
17543 entry = ring_buffer_event_data(event);
17544 tracing_generic_entry_update(&entry->ent, flags, pc);
17545 entry->ent.type = TRACE_CTX;
17546 entry->prev_pid = prev->pid;
17547 entry->prev_prio = prev->prio;
17548 entry->prev_state = prev->state;
17549 entry->next_pid = next->pid;
17550 entry->next_prio = next->prio;
17551 entry->next_state = next->state;
17552 entry->next_cpu = task_cpu(next);
17553 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
17554 ftrace_trace_stack(tr, data, flags, 5, pc);
17555 }
17556
17557 void
17558 tracing_sched_wakeup_trace(struct trace_array *tr,
17559 struct trace_array_cpu *data,
17560 struct task_struct *wakee,
17561 struct task_struct *curr,
17562 unsigned long flags, int pc)
17563 {
17564 struct ring_buffer_event *event;
17565 struct ctx_switch_entry *entry;
17566 unsigned long irq_flags;
17567
17568 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
17569 &irq_flags);
17570 if (!event)
17571 return;
17572 entry = ring_buffer_event_data(event);
17573 tracing_generic_entry_update(&entry->ent, flags, pc);
17574 entry->ent.type = TRACE_WAKE;
17575 entry->prev_pid = curr->pid;
17576 entry->prev_prio = curr->prio;
17577 entry->prev_state = curr->state;
17578 entry->next_pid = wakee->pid;
17579 entry->next_prio = wakee->prio;
17580 entry->next_state = wakee->state;
17581 entry->next_cpu = task_cpu(wakee);
17582 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
17583 ftrace_trace_stack(tr, data, flags, 6, pc);
17584
17585 trace_wake_up();
17586 }
17587
17588 void
17589 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
17590 {
17591 struct trace_array *tr = &global_trace;
17592 struct trace_array_cpu *data;
17593 int cpu;
17594 int pc;
17595
17596 if (tracing_disabled || !tr->ctrl)
17597 return;
17598
17599 pc = (current_thread_info()->preempt_count);
17600 do { } while (0);
17601 cpu = 0;
17602 data = tr->data[cpu];
17603
17604 if (__builtin_expect(!!(!((&data->disabled)->counter)), 1))
17605 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
17606
17607 do { } while (0);
17608 }
17609 # 913 "kernel/trace/trace.c"
17610 enum trace_file_type {
17611 TRACE_FILE_LAT_FMT = 1,
17612 };
17613
17614 static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
17615 {
17616
17617 ftrace_disable_cpu();
17618
17619 iter->idx++;
17620 if (iter->buffer_iter[iter->cpu])
17621 ring_buffer_read(iter->buffer_iter[iter->cpu], ((void *)0));
17622
17623 ftrace_enable_cpu();
17624 }
17625
17626 static struct trace_entry *
17627 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
17628 {
17629 struct ring_buffer_event *event;
17630 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
17631
17632
17633 ftrace_disable_cpu();
17634
17635 if (buf_iter)
17636 event = ring_buffer_iter_peek(buf_iter, ts);
17637 else
17638 event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
17639
17640 ftrace_enable_cpu();
17641
17642 return event ? ring_buffer_event_data(event) : ((void *)0);
17643 }
17644
17645 static struct trace_entry *
17646 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
17647 {
17648 struct ring_buffer *buffer = iter->tr->buffer;
17649 struct trace_entry *ent, *next = ((void *)0);
17650 u64 next_ts = 0, ts;
17651 int next_cpu = -1;
17652 int cpu;
17653
17654 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) {
17655
17656 if (ring_buffer_empty_cpu(buffer, cpu))
17657 continue;
17658
17659 ent = peek_next_entry(iter, cpu, &ts);
17660
17661
17662
17663
17664 if (ent && (!next || ts < next_ts)) {
17665 next = ent;
17666 next_cpu = cpu;
17667 next_ts = ts;
17668 }
17669 }
17670
17671 if (ent_cpu)
17672 *ent_cpu = next_cpu;
17673
17674 if (ent_ts)
17675 *ent_ts = next_ts;
17676
17677 return next;
17678 }
17679
17680
17681 static struct trace_entry *
17682 find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
17683 {
17684 return __find_next_entry(iter, ent_cpu, ent_ts);
17685 }
17686
17687
17688 static void *find_next_entry_inc(struct trace_iterator *iter)
17689 {
17690 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
17691
17692 if (iter->ent)
17693 trace_iterator_increment(iter, iter->cpu);
17694
17695 return iter->ent ? iter : ((void *)0);
17696 }
17697
17698 static void trace_consume(struct trace_iterator *iter)
17699 {
17700
17701 ftrace_disable_cpu();
17702 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
17703 ftrace_enable_cpu();
17704 }
17705
17706 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
17707 {
17708 struct trace_iterator *iter = m->private;
17709 int i = (int)*pos;
17710 void *ent;
17711
17712 (*pos)++;
17713
17714
17715 if (iter->idx > i)
17716 return ((void *)0);
17717
17718 if (iter->idx < 0)
17719 ent = find_next_entry_inc(iter);
17720 else
17721 ent = iter;
17722
17723 while (ent && iter->idx < i)
17724 ent = find_next_entry_inc(iter);
17725
17726 iter->pos = *pos;
17727
17728 return ent;
17729 }
17730
17731 static void *s_start(struct seq_file *m, loff_t *pos)
17732 {
17733 struct trace_iterator *iter = m->private;
17734 void *p = ((void *)0);
17735 loff_t l = 0;
17736 int cpu;
17737
17738 mutex_lock(&trace_types_lock);
17739
17740 if (!current_trace || current_trace != iter->trace) {
17741 mutex_unlock(&trace_types_lock);
17742 return ((void *)0);
17743 }
17744
17745 atomic_inc(&trace_record_cmdline_disabled);
17746
17747
17748 if (current_trace->start)
17749 current_trace->start(iter);
17750
17751 if (*pos != iter->pos) {
17752 iter->ent = ((void *)0);
17753 iter->cpu = 0;
17754 iter->idx = -1;
17755
17756 ftrace_disable_cpu();
17757
17758 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) {
17759 ring_buffer_iter_reset(iter->buffer_iter[cpu]);
17760 }
17761
17762 ftrace_enable_cpu();
17763
17764 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
17765 ;
17766
17767 } else {
17768 l = *pos - 1;
17769 p = s_next(m, p, &l);
17770 }
17771
17772 return p;
17773 }
17774
17775 static void s_stop(struct seq_file *m, void *p)
17776 {
17777 struct trace_iterator *iter = m->private;
17778
17779 atomic_dec(&trace_record_cmdline_disabled);
17780
17781
17782 if (current_trace && current_trace == iter->trace && iter->trace->stop)
17783 iter->trace->stop(iter);
17784
17785 mutex_unlock(&trace_types_lock);
17786 }
17787 # 1102 "kernel/trace/trace.c"
17788 static inline __attribute__((always_inline)) const char *kretprobed(const char *name)
17789 {
17790 return name;
17791 }
17792
17793
17794 static int
17795 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
17796 {
17797 # 1121 "kernel/trace/trace.c"
17798 return 1;
17799 }
17800
17801 static int
17802 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
17803 unsigned long address)
17804 {
17805 # 1137 "kernel/trace/trace.c"
17806 return 1;
17807 }
17808
17809
17810
17811
17812
17813
17814
17815 static int
17816 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
17817 {
17818 int ret;
17819
17820 if (!ip)
17821 return trace_seq_printf(s, "0");
17822
17823 if (sym_flags & TRACE_ITER_SYM_OFFSET)
17824 ret = seq_print_sym_offset(s, "%s", ip);
17825 else
17826 ret = seq_print_sym_short(s, "%s", ip);
17827
17828 if (!ret)
17829 return 0;
17830
17831 if (sym_flags & TRACE_ITER_SYM_ADDR)
17832 ret = trace_seq_printf(s, " <" "%08lx" ">", ip);
17833 return ret;
17834 }
17835
17836 static void print_lat_help_header(struct seq_file *m)
17837 {
17838 seq_puts(m, "# _------=> CPU# \n");
17839 seq_puts(m, "# / _-----=> irqs-off \n");
17840 seq_puts(m, "# | / _----=> need-resched \n");
17841 seq_puts(m, "# || / _---=> hardirq/softirq \n");
17842 seq_puts(m, "# ||| / _--=> preempt-depth \n");
17843 seq_puts(m, "# |||| / \n");
17844 seq_puts(m, "# ||||| delay \n");
17845 seq_puts(m, "# cmd pid ||||| time | caller \n");
17846 seq_puts(m, "# \\ / ||||| \\ | / \n");
17847 }
17848
17849 static void print_func_help_header(struct seq_file *m)
17850 {
17851 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
17852 seq_puts(m, "# | | | | |\n");
17853 }
17854
17855
17856 static void
17857 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
17858 {
17859 unsigned long sym_flags = (trace_flags & (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR));
17860 struct trace_array *tr = iter->tr;
17861 struct trace_array_cpu *data = tr->data[tr->cpu];
17862 struct tracer *type = current_trace;
17863 unsigned long total;
17864 unsigned long entries;
17865 const char *name = "preemption";
17866
17867 if (type)
17868 name = type->name;
17869
17870 entries = ring_buffer_entries(iter->tr->buffer);
17871 total = entries +
17872 ring_buffer_overruns(iter->tr->buffer);
17873
17874 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
17875 name, "2.6.28-ADI-2009R1-pre-g0033e75-dirty");
17876 seq_puts(m, "-----------------------------------"
17877 "---------------------------------\n");
17878 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
17879 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
17880 nsecs_to_usecs(data->saved_latency),
17881 entries,
17882 total,
17883 tr->cpu,
17884
17885 "server",
17886 # 1225 "kernel/trace/trace.c"
17887 0, 0, 0, 0);
17888
17889
17890
17891 seq_puts(m, ")\n");
17892
17893 seq_puts(m, " -----------------\n");
17894 seq_printf(m, " | task: %.16s-%d "
17895 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
17896 data->comm, data->pid, data->uid, data->nice,
17897 data->policy, data->rt_priority);
17898 seq_puts(m, " -----------------\n");
17899
17900 if (data->critical_start) {
17901 seq_puts(m, " => started at: ");
17902 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
17903 trace_print_seq(m, &iter->seq);
17904 seq_puts(m, "\n => ended at: ");
17905 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
17906 trace_print_seq(m, &iter->seq);
17907 seq_puts(m, "\n");
17908 }
17909
17910 seq_puts(m, "\n");
17911 }
17912
17913 static void
17914 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
17915 {
17916 int hardirq, softirq;
17917 char *comm;
17918
17919 comm = trace_find_cmdline(entry->pid);
17920
17921 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
17922 trace_seq_printf(s, "%3d", cpu);
17923 trace_seq_printf(s, "%c%c",
17924 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
17925 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.',
17926 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
17927
17928 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
17929 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
17930 if (hardirq && softirq) {
17931 trace_seq_putc(s, 'H');
17932 } else {
17933 if (hardirq) {
17934 trace_seq_putc(s, 'h');
17935 } else {
17936 if (softirq)
17937 trace_seq_putc(s, 's');
17938 else
17939 trace_seq_putc(s, '.');
17940 }
17941 }
17942
17943 if (entry->preempt_count)
17944 trace_seq_printf(s, "%x", entry->preempt_count);
17945 else
17946 trace_seq_puts(s, ".");
17947 }
17948
17949 unsigned long preempt_mark_thresh = 100;
17950
17951 static void
17952 lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
17953 unsigned long rel_usecs)
17954 {
17955 trace_seq_printf(s, " %4lldus", abs_usecs);
17956 if (rel_usecs > preempt_mark_thresh)
17957 trace_seq_puts(s, "!: ");
17958 else if (rel_usecs > 1)
17959 trace_seq_puts(s, "+: ");
17960 else
17961 trace_seq_puts(s, " : ");
17962 }
17963
17964 static const char state_to_char[] = "RSDTtZX";
17965
17966
17967
17968
17969
17970 void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
17971 {
17972 struct trace_entry *ent;
17973 struct trace_field_cont *cont;
17974 bool ok = true;
17975
17976 ent = peek_next_entry(iter, iter->cpu, ((void *)0));
17977 if (!ent || ent->type != TRACE_CONT) {
17978 trace_seq_putc(s, '\n');
17979 return;
17980 }
17981
17982 do {
17983 cont = (struct trace_field_cont *)ent;
17984 if (ok)
17985 ok = (trace_seq_printf(s, "%s", cont->buf) > 0);
17986
17987 ftrace_disable_cpu();
17988
17989 if (iter->buffer_iter[iter->cpu])
17990 ring_buffer_read(iter->buffer_iter[iter->cpu], ((void *)0));
17991 else
17992 ring_buffer_consume(iter->tr->buffer, iter->cpu, ((void *)0));
17993
17994 ftrace_enable_cpu();
17995
17996 ent = peek_next_entry(iter, iter->cpu, ((void *)0));
17997 } while (ent && ent->type == TRACE_CONT);
17998
17999 if (!ok)
18000 trace_seq_putc(s, '\n');
18001 }
18002
18003 static enum print_line_t
18004 print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
18005 {
18006 struct trace_seq *s = &iter->seq;
18007 unsigned long sym_flags = (trace_flags & (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR));
18008 struct trace_entry *next_entry;
18009 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
18010 struct trace_entry *entry = iter->ent;
18011 unsigned long abs_usecs;
18012 unsigned long rel_usecs;
18013 u64 next_ts;
18014 char *comm;
18015 int S, T;
18016 int i;
18017 unsigned state;
18018
18019 if (entry->type == TRACE_CONT)
18020 return TRACE_TYPE_HANDLED;
18021
18022 next_entry = find_next_entry(iter, ((void *)0), &next_ts);
18023 if (!next_entry)
18024 next_ts = iter->ts;
18025 rel_usecs = ns2usecs(next_ts - iter->ts);
18026 abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
18027
18028 if (verbose) {
18029 comm = trace_find_cmdline(entry->pid);
18030 trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]"
18031 " %ld.%03ldms (+%ld.%03ldms): ",
18032 comm,
18033 entry->pid, cpu, entry->flags,
18034 entry->preempt_count, trace_idx,
18035 ns2usecs(iter->ts),
18036 abs_usecs/1000,
18037 abs_usecs % 1000, rel_usecs/1000,
18038 rel_usecs % 1000);
18039 } else {
18040 lat_print_generic(s, entry, cpu);
18041 lat_print_timestamp(s, abs_usecs, rel_usecs);
18042 }
18043 switch (entry->type) {
18044 case TRACE_FN: {
18045 struct ftrace_entry *field;
18046
18047 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1385); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1385); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1385); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1385); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1385); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1385); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1385); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1385); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1385); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18048
18049 seq_print_ip_sym(s, field->ip, sym_flags);
18050 trace_seq_puts(s, " (");
18051 seq_print_ip_sym(s, field->parent_ip, sym_flags);
18052 trace_seq_puts(s, ")\n");
18053 break;
18054 }
18055 case TRACE_CTX:
18056 case TRACE_WAKE: {
18057 struct ctx_switch_entry *field;
18058
18059 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1397); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1397); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1397); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1397); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1397); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1397); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1397); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1397); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1397); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18060
18061 T = field->next_state < sizeof(state_to_char) ?
18062 state_to_char[field->next_state] : 'X';
18063
18064 state = field->prev_state ?
18065 __ffs(field->prev_state) + 1 : 0;
18066 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
18067 comm = trace_find_cmdline(field->next_pid);
18068 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
18069 field->prev_pid,
18070 field->prev_prio,
18071 S, entry->type == TRACE_CTX ? "==>" : " +",
18072 field->next_cpu,
18073 field->next_pid,
18074 field->next_prio,
18075 T, comm);
18076 break;
18077 }
18078 case TRACE_SPECIAL: {
18079 struct special_entry *field;
18080
18081 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1419); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1419); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1419); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1419); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1419); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1419); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1419); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1419); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1419); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18082
18083 trace_seq_printf(s, "# %ld %ld %ld\n",
18084 field->arg1,
18085 field->arg2,
18086 field->arg3);
18087 break;
18088 }
18089 case TRACE_STACK: {
18090 struct stack_entry *field;
18091
18092 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1430); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1430); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1430); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1430); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1430); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1430); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1430); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1430); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1430); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18093
18094 for (i = 0; i < 8; i++) {
18095 if (i)
18096 trace_seq_puts(s, " <= ");
18097 seq_print_ip_sym(s, field->caller[i], sym_flags);
18098 }
18099 trace_seq_puts(s, "\n");
18100 break;
18101 }
18102 case TRACE_PRINT: {
18103 struct print_entry *field;
18104
18105 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1443); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1443); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1443); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1443); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1443); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1443); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1443); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1443); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1443); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18106
18107 seq_print_ip_sym(s, field->ip, sym_flags);
18108 trace_seq_printf(s, ": %s", field->buf);
18109 if (entry->flags & TRACE_FLAG_CONT)
18110 trace_seq_print_cont(s, iter);
18111 break;
18112 }
18113 default:
18114 trace_seq_printf(s, "Unknown type %d\n", entry->type);
18115 }
18116 return TRACE_TYPE_HANDLED;
18117 }
18118
18119 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
18120 {
18121 struct trace_seq *s = &iter->seq;
18122 unsigned long sym_flags = (trace_flags & (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR));
18123 struct trace_entry *entry;
18124 unsigned long usec_rem;
18125 unsigned long long t;
18126 unsigned long secs;
18127 char *comm;
18128 int ret;
18129 int S, T;
18130 int i;
18131
18132 entry = iter->ent;
18133
18134 if (entry->type == TRACE_CONT)
18135 return TRACE_TYPE_HANDLED;
18136
18137 comm = trace_find_cmdline(iter->ent->pid);
18138
18139 t = ns2usecs(iter->ts);
18140 usec_rem = ({ uint32_t __base = (1000000ULL); uint32_t __rem; (void)(((typeof((t)) *)0) == ((uint64_t *)0)); if (__builtin_expect(!!(((t) >> 32) == 0), 1)) { __rem = (uint32_t)(t) % __base; (t) = (uint32_t)(t) / __base; } else __rem = __div64_32(&(t), __base); __rem; });
18141 secs = (unsigned long)t;
18142
18143 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
18144 if (!ret)
18145 return TRACE_TYPE_PARTIAL_LINE;
18146 ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
18147 if (!ret)
18148 return TRACE_TYPE_PARTIAL_LINE;
18149 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
18150 if (!ret)
18151 return TRACE_TYPE_PARTIAL_LINE;
18152
18153 switch (entry->type) {
18154 case TRACE_FN: {
18155 struct ftrace_entry *field;
18156
18157 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1495); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1495); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1495); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1495); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1495); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1495); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1495); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1495); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1495); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18158
18159 ret = seq_print_ip_sym(s, field->ip, sym_flags);
18160 if (!ret)
18161 return TRACE_TYPE_PARTIAL_LINE;
18162 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
18163 field->parent_ip) {
18164 ret = trace_seq_printf(s, " <-");
18165 if (!ret)
18166 return TRACE_TYPE_PARTIAL_LINE;
18167 ret = seq_print_ip_sym(s,
18168 field->parent_ip,
18169 sym_flags);
18170 if (!ret)
18171 return TRACE_TYPE_PARTIAL_LINE;
18172 }
18173 ret = trace_seq_printf(s, "\n");
18174 if (!ret)
18175 return TRACE_TYPE_PARTIAL_LINE;
18176 break;
18177 }
18178 case TRACE_CTX:
18179 case TRACE_WAKE: {
18180 struct ctx_switch_entry *field;
18181
18182 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1520); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1520); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1520); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1520); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1520); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1520); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1520); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1520); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1520); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18183
18184 S = field->prev_state < sizeof(state_to_char) ?
18185 state_to_char[field->prev_state] : 'X';
18186 T = field->next_state < sizeof(state_to_char) ?
18187 state_to_char[field->next_state] : 'X';
18188 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
18189 field->prev_pid,
18190 field->prev_prio,
18191 S,
18192 entry->type == TRACE_CTX ? "==>" : " +",
18193 field->next_cpu,
18194 field->next_pid,
18195 field->next_prio,
18196 T);
18197 if (!ret)
18198 return TRACE_TYPE_PARTIAL_LINE;
18199 break;
18200 }
18201 case TRACE_SPECIAL: {
18202 struct special_entry *field;
18203
18204 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1542); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1542); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1542); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1542); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1542); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1542); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1542); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1542); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1542); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18205
18206 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
18207 field->arg1,
18208 field->arg2,
18209 field->arg3);
18210 if (!ret)
18211 return TRACE_TYPE_PARTIAL_LINE;
18212 break;
18213 }
18214 case TRACE_STACK: {
18215 struct stack_entry *field;
18216
18217 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1555); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1555); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1555); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1555); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1555); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1555); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1555); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1555); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1555); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18218
18219 for (i = 0; i < 8; i++) {
18220 if (i) {
18221 ret = trace_seq_puts(s, " <= ");
18222 if (!ret)
18223 return TRACE_TYPE_PARTIAL_LINE;
18224 }
18225 ret = seq_print_ip_sym(s, field->caller[i],
18226 sym_flags);
18227 if (!ret)
18228 return TRACE_TYPE_PARTIAL_LINE;
18229 }
18230 ret = trace_seq_puts(s, "\n");
18231 if (!ret)
18232 return TRACE_TYPE_PARTIAL_LINE;
18233 break;
18234 }
18235 case TRACE_PRINT: {
18236 struct print_entry *field;
18237
18238 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1576); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1576); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1576); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1576); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1576); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1576); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1576); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1576); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1576); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18239
18240 seq_print_ip_sym(s, field->ip, sym_flags);
18241 trace_seq_printf(s, ": %s", field->buf);
18242 if (entry->flags & TRACE_FLAG_CONT)
18243 trace_seq_print_cont(s, iter);
18244 break;
18245 }
18246 }
18247 return TRACE_TYPE_HANDLED;
18248 }
18249
18250 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
18251 {
18252 struct trace_seq *s = &iter->seq;
18253 struct trace_entry *entry;
18254 int ret;
18255 int S, T;
18256
18257 entry = iter->ent;
18258
18259 if (entry->type == TRACE_CONT)
18260 return TRACE_TYPE_HANDLED;
18261
18262 ret = trace_seq_printf(s, "%d %d %llu ",
18263 entry->pid, iter->cpu, iter->ts);
18264 if (!ret)
18265 return TRACE_TYPE_PARTIAL_LINE;
18266
18267 switch (entry->type) {
18268 case TRACE_FN: {
18269 struct ftrace_entry *field;
18270
18271 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1609); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1609); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1609); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1609); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1609); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1609); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1609); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1609); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1609); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18272
18273 ret = trace_seq_printf(s, "%x %x\n",
18274 field->ip,
18275 field->parent_ip);
18276 if (!ret)
18277 return TRACE_TYPE_PARTIAL_LINE;
18278 break;
18279 }
18280 case TRACE_CTX:
18281 case TRACE_WAKE: {
18282 struct ctx_switch_entry *field;
18283
18284 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1622); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1622); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1622); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1622); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1622); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1622); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1622); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1622); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1622); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18285
18286 S = field->prev_state < sizeof(state_to_char) ?
18287 state_to_char[field->prev_state] : 'X';
18288 T = field->next_state < sizeof(state_to_char) ?
18289 state_to_char[field->next_state] : 'X';
18290 if (entry->type == TRACE_WAKE)
18291 S = '+';
18292 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
18293 field->prev_pid,
18294 field->prev_prio,
18295 S,
18296 field->next_cpu,
18297 field->next_pid,
18298 field->next_prio,
18299 T);
18300 if (!ret)
18301 return TRACE_TYPE_PARTIAL_LINE;
18302 break;
18303 }
18304 case TRACE_SPECIAL:
18305 case TRACE_STACK: {
18306 struct special_entry *field;
18307
18308 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1646); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1646); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1646); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1646); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1646); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1646); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1646); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1646); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1646); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18309
18310 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
18311 field->arg1,
18312 field->arg2,
18313 field->arg3);
18314 if (!ret)
18315 return TRACE_TYPE_PARTIAL_LINE;
18316 break;
18317 }
18318 case TRACE_PRINT: {
18319 struct print_entry *field;
18320
18321 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1659); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1659); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1659); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1659); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1659); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1659); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1659); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1659); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1659); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18322
18323 trace_seq_printf(s, "# %lx %s", field->ip, field->buf);
18324 if (entry->flags & TRACE_FLAG_CONT)
18325 trace_seq_print_cont(s, iter);
18326 break;
18327 }
18328 }
18329 return TRACE_TYPE_HANDLED;
18330 }
18331 # 1683 "kernel/trace/trace.c"
18332 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
18333 {
18334 struct trace_seq *s = &iter->seq;
18335 unsigned char newline = '\n';
18336 struct trace_entry *entry;
18337 int S, T;
18338
18339 entry = iter->ent;
18340
18341 if (entry->type == TRACE_CONT)
18342 return TRACE_TYPE_HANDLED;
18343
18344 do { ((void)sizeof(char[1 - 2*!!(sizeof(entry->pid) > 8)])); if (!trace_seq_putmem_hex(s, &(entry->pid), sizeof(entry->pid))) return 0; } while (0);
18345 do { ((void)sizeof(char[1 - 2*!!(sizeof(iter->cpu) > 8)])); if (!trace_seq_putmem_hex(s, &(iter->cpu), sizeof(iter->cpu))) return 0; } while (0);
18346 do { ((void)sizeof(char[1 - 2*!!(sizeof(iter->ts) > 8)])); if (!trace_seq_putmem_hex(s, &(iter->ts), sizeof(iter->ts))) return 0; } while (0);
18347
18348 switch (entry->type) {
18349 case TRACE_FN: {
18350 struct ftrace_entry *field;
18351
18352 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1703); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1703); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1703); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1703); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1703); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1703); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1703); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1703); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1703); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18353
18354 do { ((void)sizeof(char[1 - 2*!!(sizeof(field->ip) > 8)])); if (!trace_seq_putmem_hex(s, &(field->ip), sizeof(field->ip))) return 0; } while (0);
18355 do { ((void)sizeof(char[1 - 2*!!(sizeof(field->parent_ip) > 8)])); if (!trace_seq_putmem_hex(s, &(field->parent_ip), sizeof(field->parent_ip))) return 0; } while (0);
18356 break;
18357 }
18358 case TRACE_CTX:
18359 case TRACE_WAKE: {
18360 struct ctx_switch_entry *field;
18361
18362 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1713); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1713); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1713); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1713); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1713); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1713); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1713); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1713); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1713); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18363
18364 S = field->prev_state < sizeof(state_to_char) ?
18365 state_to_char[field->prev_state] : 'X';
18366 T = field->next_state < sizeof(state_to_char) ?
18367 state_to_char[field->next_state] : 'X';
18368 if (entry->type == TRACE_WAKE)
18369 S = '+';
18370 do { ((void)sizeof(char[1 - 2*!!(sizeof(field->prev_pid) > 8)])); if (!trace_seq_putmem_hex(s, &(field->prev_pid), sizeof(field->prev_pid))) return 0; } while (0);
18371 do { ((void)sizeof(char[1 - 2*!!(sizeof(field->prev_prio) > 8)])); if (!trace_seq_putmem_hex(s, &(field->prev_prio), sizeof(field->prev_prio))) return 0; } while (0);
18372 do { ((void)sizeof(char[1 - 2*!!(sizeof(S) > 8)])); if (!trace_seq_putmem_hex(s, &(S), sizeof(S))) return 0; } while (0);
18373 do { ((void)sizeof(char[1 - 2*!!(sizeof(field->next_cpu) > 8)])); if (!trace_seq_putmem_hex(s, &(field->next_cpu), sizeof(field->next_cpu))) return 0; } while (0);
18374 do { ((void)sizeof(char[1 - 2*!!(sizeof(field->next_pid) > 8)])); if (!trace_seq_putmem_hex(s, &(field->next_pid), sizeof(field->next_pid))) return 0; } while (0);
18375 do { ((void)sizeof(char[1 - 2*!!(sizeof(field->next_prio) > 8)])); if (!trace_seq_putmem_hex(s, &(field->next_prio), sizeof(field->next_prio))) return 0; } while (0);
18376 do { ((void)sizeof(char[1 - 2*!!(sizeof(T) > 8)])); if (!trace_seq_putmem_hex(s, &(T), sizeof(T))) return 0; } while (0);
18377 break;
18378 }
18379 case TRACE_SPECIAL:
18380 case TRACE_STACK: {
18381 struct special_entry *field;
18382
18383 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1734); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1734); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1734); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1734); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1734); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1734); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1734); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1734); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1734); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18384
18385 do { ((void)sizeof(char[1 - 2*!!(sizeof(field->arg1) > 8)])); if (!trace_seq_putmem_hex(s, &(field->arg1), sizeof(field->arg1))) return 0; } while (0);
18386 do { ((void)sizeof(char[1 - 2*!!(sizeof(field->arg2) > 8)])); if (!trace_seq_putmem_hex(s, &(field->arg2), sizeof(field->arg2))) return 0; } while (0);
18387 do { ((void)sizeof(char[1 - 2*!!(sizeof(field->arg3) > 8)])); if (!trace_seq_putmem_hex(s, &(field->arg3), sizeof(field->arg3))) return 0; } while (0);
18388 break;
18389 }
18390 }
18391 do { if (!trace_seq_putmem(s, &(newline), sizeof(newline))) return 0; } while (0);
18392
18393 return TRACE_TYPE_HANDLED;
18394 }
18395
18396 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
18397 {
18398 struct trace_seq *s = &iter->seq;
18399 struct trace_entry *entry;
18400
18401 entry = iter->ent;
18402
18403 if (entry->type == TRACE_CONT)
18404 return TRACE_TYPE_HANDLED;
18405
18406 do { if (!trace_seq_putmem(s, &(entry->pid), sizeof(entry->pid))) return 0; } while (0);
18407 do { if (!trace_seq_putmem(s, &(entry->cpu), sizeof(entry->cpu))) return 0; } while (0);
18408 do { if (!trace_seq_putmem(s, &(iter->ts), sizeof(iter->ts))) return 0; } while (0);
18409
18410 switch (entry->type) {
18411 case TRACE_FN: {
18412 struct ftrace_entry *field;
18413
18414 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1765); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1765); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1765); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1765); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1765); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1765); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1765); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1765); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1765); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18415
18416 do { if (!trace_seq_putmem(s, &(field->ip), sizeof(field->ip))) return 0; } while (0);
18417 do { if (!trace_seq_putmem(s, &(field->parent_ip), sizeof(field->parent_ip))) return 0; } while (0);
18418 break;
18419 }
18420 case TRACE_CTX: {
18421 struct ctx_switch_entry *field;
18422
18423 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1774); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1774); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1774); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1774); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1774); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1774); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1774); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1774); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1774); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18424
18425 do { if (!trace_seq_putmem(s, &(field->prev_pid), sizeof(field->prev_pid))) return 0; } while (0);
18426 do { if (!trace_seq_putmem(s, &(field->prev_prio), sizeof(field->prev_prio))) return 0; } while (0);
18427 do { if (!trace_seq_putmem(s, &(field->prev_state), sizeof(field->prev_state))) return 0; } while (0);
18428 do { if (!trace_seq_putmem(s, &(field->next_pid), sizeof(field->next_pid))) return 0; } while (0);
18429 do { if (!trace_seq_putmem(s, &(field->next_prio), sizeof(field->next_prio))) return 0; } while (0);
18430 do { if (!trace_seq_putmem(s, &(field->next_state), sizeof(field->next_state))) return 0; } while (0);
18431 break;
18432 }
18433 case TRACE_SPECIAL:
18434 case TRACE_STACK: {
18435 struct special_entry *field;
18436
18437 do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1788); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1788); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1788); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1788); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1788); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1788); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1788); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1788); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1788); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0);
18438
18439 do { if (!trace_seq_putmem(s, &(field->arg1), sizeof(field->arg1))) return 0; } while (0);
18440 do { if (!trace_seq_putmem(s, &(field->arg2), sizeof(field->arg2))) return 0; } while (0);
18441 do { if (!trace_seq_putmem(s, &(field->arg3), sizeof(field->arg3))) return 0; } while (0);
18442 break;
18443 }
18444 }
18445 return 1;
18446 }
18447
18448 static int trace_empty(struct trace_iterator *iter)
18449 {
18450 int cpu;
18451
18452 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) {
18453 if (iter->buffer_iter[cpu]) {
18454 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
18455 return 0;
18456 } else {
18457 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
18458 return 0;
18459 }
18460 }
18461
18462 return 1;
18463 }
18464
18465 static enum print_line_t print_trace_line(struct trace_iterator *iter)
18466 {
18467 enum print_line_t ret;
18468
18469 if (iter->trace && iter->trace->print_line) {
18470 ret = iter->trace->print_line(iter);
18471 if (ret != TRACE_TYPE_UNHANDLED)
18472 return ret;
18473 }
18474
18475 if (trace_flags & TRACE_ITER_BIN)
18476 return print_bin_fmt(iter);
18477
18478 if (trace_flags & TRACE_ITER_HEX)
18479 return print_hex_fmt(iter);
18480
18481 if (trace_flags & TRACE_ITER_RAW)
18482 return print_raw_fmt(iter);
18483
18484 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
18485 return print_lat_fmt(iter, iter->idx, iter->cpu);
18486
18487 return print_trace_fmt(iter);
18488 }
18489
18490 static int s_show(struct seq_file *m, void *v)
18491 {
18492 struct trace_iterator *iter = v;
18493
18494 if (iter->ent == ((void *)0)) {
18495 if (iter->tr) {
18496 seq_printf(m, "# tracer: %s\n", iter->trace->name);
18497 seq_puts(m, "#\n");
18498 }
18499 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
18500
18501 if (trace_empty(iter))
18502 return 0;
18503 print_trace_header(m, iter);
18504 if (!(trace_flags & TRACE_ITER_VERBOSE))
18505 print_lat_help_header(m);
18506 } else {
18507 if (!(trace_flags & TRACE_ITER_VERBOSE))
18508 print_func_help_header(m);
18509 }
18510 } else {
18511 print_trace_line(iter);
18512 trace_print_seq(m, &iter->seq);
18513 }
18514
18515 return 0;
18516 }
18517
18518 static struct seq_operations tracer_seq_ops = {
18519 .start = s_start,
18520 .next = s_next,
18521 .stop = s_stop,
18522 .show = s_show,
18523 };
18524
18525 static struct trace_iterator *
18526 __tracing_open(struct inode *inode, struct file *file, int *ret)
18527 {
18528 struct trace_iterator *iter;
18529 struct seq_file *m;
18530 int cpu;
18531
18532 if (tracing_disabled) {
18533 *ret = -19;
18534 return ((void *)0);
18535 }
18536
18537 iter = kzalloc(sizeof(*iter), ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u)));
18538 if (!iter) {
18539 *ret = -12;
18540 goto out;
18541 }
18542
18543 mutex_lock(&trace_types_lock);
18544 if (current_trace && current_trace->print_max)
18545 iter->tr = &max_tr;
18546 else
18547 iter->tr = inode->i_private;
18548 iter->trace = current_trace;
18549 iter->pos = -1;
18550
18551 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) {
18552
18553 iter->buffer_iter[cpu] =
18554 ring_buffer_read_start(iter->tr->buffer, cpu);
18555
18556 if (!iter->buffer_iter[cpu])
18557 goto fail_buffer;
18558 }
18559
18560
18561 *ret = seq_open(file, &tracer_seq_ops);
18562 if (*ret)
18563 goto fail_buffer;
18564
18565 m = file->private_data;
18566 m->private = iter;
18567
18568
18569 if (iter->tr->ctrl) {
18570 tracer_enabled = 0;
18571 ftrace_function_enabled = 0;
18572 }
18573
18574 if (iter->trace && iter->trace->open)
18575 iter->trace->open(iter);
18576
18577 mutex_unlock(&trace_types_lock);
18578
18579 out:
18580 return iter;
18581
18582 fail_buffer:
18583 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) {
18584 if (iter->buffer_iter[cpu])
18585 ring_buffer_read_finish(iter->buffer_iter[cpu]);
18586 }
18587 mutex_unlock(&trace_types_lock);
18588 kfree(iter);
18589
18590 return ERR_PTR(-12);
18591 }
18592
18593 int tracing_open_generic(struct inode *inode, struct file *filp)
18594 {
18595 if (tracing_disabled)
18596 return -19;
18597
18598 filp->private_data = inode->i_private;
18599 return 0;
18600 }
18601
18602 int tracing_release(struct inode *inode, struct file *file)
18603 {
18604 struct seq_file *m = (struct seq_file *)file->private_data;
18605 struct trace_iterator *iter = m->private;
18606 int cpu;
18607
18608 mutex_lock(&trace_types_lock);
18609 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) {
18610 if (iter->buffer_iter[cpu])
18611 ring_buffer_read_finish(iter->buffer_iter[cpu]);
18612 }
18613
18614 if (iter->trace && iter->trace->close)
18615 iter->trace->close(iter);
18616
18617
18618 if (iter->tr->ctrl) {
18619 tracer_enabled = 1;
18620
18621
18622
18623
18624 ftrace_function_enabled = 1;
18625 }
18626 mutex_unlock(&trace_types_lock);
18627
18628 seq_release(inode, file);
18629 kfree(iter);
18630 return 0;
18631 }
18632
18633 static int tracing_open(struct inode *inode, struct file *file)
18634 {
18635 int ret;
18636
18637 __tracing_open(inode, file, &ret);
18638
18639 return ret;
18640 }
18641
18642 static int tracing_lt_open(struct inode *inode, struct file *file)
18643 {
18644 struct trace_iterator *iter;
18645 int ret;
18646
18647 iter = __tracing_open(inode, file, &ret);
18648
18649 if (!ret)
18650 iter->iter_flags |= TRACE_FILE_LAT_FMT;
18651
18652 return ret;
18653 }
18654
18655
18656 static void *
18657 t_next(struct seq_file *m, void *v, loff_t *pos)
18658 {
18659 struct tracer *t = m->private;
18660
18661 (*pos)++;
18662
18663 if (t)
18664 t = t->next;
18665
18666 m->private = t;
18667
18668 return t;
18669 }
18670
18671 static void *t_start(struct seq_file *m, loff_t *pos)
18672 {
18673 struct tracer *t = m->private;
18674 loff_t l = 0;
18675
18676 mutex_lock(&trace_types_lock);
18677 for (; t && l < *pos; t = t_next(m, t, &l))
18678 ;
18679
18680 return t;
18681 }
18682
18683 static void t_stop(struct seq_file *m, void *p)
18684 {
18685 mutex_unlock(&trace_types_lock);
18686 }
18687
18688 static int t_show(struct seq_file *m, void *v)
18689 {
18690 struct tracer *t = v;
18691
18692 if (!t)
18693 return 0;
18694
18695 seq_printf(m, "%s", t->name);
18696 if (t->next)
18697 seq_putc(m, ' ');
18698 else
18699 seq_putc(m, '\n');
18700
18701 return 0;
18702 }
18703
18704 static struct seq_operations show_traces_seq_ops = {
18705 .start = t_start,
18706 .next = t_next,
18707 .stop = t_stop,
18708 .show = t_show,
18709 };
18710
18711 static int show_traces_open(struct inode *inode, struct file *file)
18712 {
18713 int ret;
18714
18715 if (tracing_disabled)
18716 return -19;
18717
18718 ret = seq_open(file, &show_traces_seq_ops);
18719 if (!ret) {
18720 struct seq_file *m = file->private_data;
18721 m->private = trace_types;
18722 }
18723
18724 return ret;
18725 }
18726
18727 static struct file_operations tracing_fops = {
18728 .open = tracing_open,
18729 .read = seq_read,
18730 .llseek = seq_lseek,
18731 .release = tracing_release,
18732 };
18733
18734 static struct file_operations tracing_lt_fops = {
18735 .open = tracing_lt_open,
18736 .read = seq_read,
18737 .llseek = seq_lseek,
18738 .release = tracing_release,
18739 };
18740
18741 static struct file_operations show_traces_fops = {
18742 .open = show_traces_open,
18743 .read = seq_read,
18744 .release = seq_release,
18745 };
18746
18747
18748
18749
18750 static cpumask_t tracing_cpumask = (cpumask_t) { { [(((1) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))-1] = ( ((1) % 32) ? (1UL<<((1) % 32))-1 : ~0UL ) } };
18751
18752
18753
18754
18755
18756 static cpumask_t tracing_cpumask_new;
18757
18758
18759
18760
18761
18762 static struct mutex tracing_cpumask_update_lock = { .count = { (1) } , .wait_lock = (spinlock_t) { .raw_lock = { 1 }, .magic = 0xdead4ead, .owner = ((void *)-1L), .owner_cpu = -1, } , .wait_list = { &(tracing_cpumask_update_lock.wait_list), &(tracing_cpumask_update_lock.wait_list) } , .magic = &tracing_cpumask_update_lock };
18763
18764
18765
18766
18767
18768 static char mask_str[1 + 1];
18769
18770 static ssize_t
18771 tracing_cpumask_read(struct file *filp, char *ubuf,
18772 size_t count, loff_t *ppos)
18773 {
18774 int len;
18775
18776 mutex_lock(&tracing_cpumask_update_lock);
18777
18778 len = __cpumask_scnprintf((mask_str), (count), &(tracing_cpumask), 1);
18779 if (count - len < 2) {
18780 count = -22;
18781 goto out_err;
18782 }
18783 len += sprintf(mask_str + len, "\n");
18784 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, 1 +1);
18785
18786 out_err:
18787 mutex_unlock(&tracing_cpumask_update_lock);
18788
18789 return count;
18790 }
18791
18792 static ssize_t
18793 tracing_cpumask_write(struct file *filp, const char *ubuf,
18794 size_t count, loff_t *ppos)
18795 {
18796 int err, cpu;
18797
18798 mutex_lock(&tracing_cpumask_update_lock);
18799 err = __cpumask_parse_user((ubuf), (count), &(tracing_cpumask_new), 1);
18800 if (err)
18801 goto err_unlock;
18802
18803 do { int __tmp_dummy; __asm__ __volatile__( "cli %0;" : "=d" (__tmp_dummy) ); } while (0);
18804 __raw_spin_lock(&ftrace_max_lock);
18805 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) {
18806
18807
18808
18809
18810 if (test_bit((cpu), (tracing_cpumask).bits) &&
18811 !test_bit((cpu), (tracing_cpumask_new).bits)) {
18812 atomic_inc(&global_trace.data[cpu]->disabled);
18813 }
18814 if (!test_bit((cpu), (tracing_cpumask).bits) &&
18815 test_bit((cpu), (tracing_cpumask_new).bits)) {
18816 atomic_dec(&global_trace.data[cpu]->disabled);
18817 }
18818 }
18819 __raw_spin_unlock(&ftrace_max_lock);
18820 __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) );
18821
18822 tracing_cpumask = tracing_cpumask_new;
18823
18824 mutex_unlock(&tracing_cpumask_update_lock);
18825
18826 return count;
18827
18828 err_unlock:
18829 mutex_unlock(&tracing_cpumask_update_lock);
18830
18831 return err;
18832 }
18833
18834 static struct file_operations tracing_cpumask_fops = {
18835 .open = tracing_open_generic,
18836 .read = tracing_cpumask_read,
18837 .write = tracing_cpumask_write,
18838 };
18839
18840 static ssize_t
18841 tracing_iter_ctrl_read(struct file *filp, char *ubuf,
18842 size_t cnt, loff_t *ppos)
18843 {
18844 char *buf;
18845 int r = 0;
18846 int len = 0;
18847 int i;
18848
18849
18850 for (i = 0; trace_options[i]; i++) {
18851 len += strlen(trace_options[i]);
18852 len += 3;
18853 }
18854
18855
18856 buf = kmalloc(len + 2, ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u)));
18857 if (!buf)
18858 return -12;
18859
18860 for (i = 0; trace_options[i]; i++) {
18861 if (trace_flags & (1 << i))
18862 r += sprintf(buf + r, "%s ", trace_options[i]);
18863 else
18864 r += sprintf(buf + r, "no%s ", trace_options[i]);
18865 }
18866
18867 r += sprintf(buf + r, "\n");
18868 ({ int __ret_warn_on = !!(r >= len + 2); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 2219); __builtin_expect(!!(__ret_warn_on), 0); });
18869
18870 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
18871
18872 kfree(buf);
18873
18874 return r;
18875 }
18876
18877 static ssize_t
18878 tracing_iter_ctrl_write(struct file *filp, const char *ubuf,
18879 size_t cnt, loff_t *ppos)
18880 {
18881 char buf[64];
18882 char *cmp = buf;
18883 int neg = 0;
18884 int i;
18885
18886 if (cnt >= sizeof(buf))
18887 return -22;
18888
18889 if (copy_from_user(&buf, ubuf, cnt))
18890 return -14;
18891
18892 buf[cnt] = 0;
18893
18894 if (strncmp(buf, "no", 2) == 0) {
18895 neg = 1;
18896 cmp += 2;
18897 }
18898
18899 for (i = 0; trace_options[i]; i++) {
18900 int len = strlen(trace_options[i]);
18901
18902 if (strncmp(cmp, trace_options[i], len) == 0) {
18903 if (neg)
18904 trace_flags &= ~(1 << i);
18905 else
18906 trace_flags |= (1 << i);
18907 break;
18908 }
18909 }
18910
18911
18912
18913 if (!trace_options[i])
18914 return -22;
18915
18916 filp->f_pos += cnt;
18917
18918 return cnt;
18919 }
18920
18921 static struct file_operations tracing_iter_fops = {
18922 .open = tracing_open_generic,
18923 .read = tracing_iter_ctrl_read,
18924 .write = tracing_iter_ctrl_write,
18925 };
18926
18927 static const char readme_msg[] =
18928 "tracing mini-HOWTO:\n\n"
18929 "# mkdir /debug\n"
18930 "# mount -t debugfs nodev /debug\n\n"
18931 "# cat /debug/tracing/available_tracers\n"
18932 "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
18933 "# cat /debug/tracing/current_tracer\n"
18934 "none\n"
18935 "# echo sched_switch > /debug/tracing/current_tracer\n"
18936 "# cat /debug/tracing/current_tracer\n"
18937 "sched_switch\n"
18938 "# cat /debug/tracing/iter_ctrl\n"
18939 "noprint-parent nosym-offset nosym-addr noverbose\n"
18940 "# echo print-parent > /debug/tracing/iter_ctrl\n"
18941 "# echo 1 > /debug/tracing/tracing_enabled\n"
18942 "# cat /debug/tracing/trace > /tmp/trace.txt\n"
18943 "echo 0 > /debug/tracing/tracing_enabled\n"
18944 ;
18945
18946 static ssize_t
18947 tracing_readme_read(struct file *filp, char *ubuf,
18948 size_t cnt, loff_t *ppos)
18949 {
18950 return simple_read_from_buffer(ubuf, cnt, ppos,
18951 readme_msg, strlen(readme_msg));
18952 }
18953
18954 static struct file_operations tracing_readme_fops = {
18955 .open = tracing_open_generic,
18956 .read = tracing_readme_read,
18957 };
18958
18959 static ssize_t
18960 tracing_ctrl_read(struct file *filp, char *ubuf,
18961 size_t cnt, loff_t *ppos)
18962 {
18963 struct trace_array *tr = filp->private_data;
18964 char buf[64];
18965 int r;
18966
18967 r = sprintf(buf, "%ld\n", tr->ctrl);
18968 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
18969 }
18970
18971 static ssize_t
18972 tracing_ctrl_write(struct file *filp, const char *ubuf,
18973 size_t cnt, loff_t *ppos)
18974 {
18975 struct trace_array *tr = filp->private_data;
18976 char buf[64];
18977 long val;
18978 int ret;
18979
18980 if (cnt >= sizeof(buf))
18981 return -22;
18982
18983 if (copy_from_user(&buf, ubuf, cnt))
18984 return -14;
18985
18986 buf[cnt] = 0;
18987
18988 ret = strict_strtoul(buf, 10, &val);
18989 if (ret < 0)
18990 return ret;
18991
18992 val = !!val;
18993
18994 mutex_lock(&trace_types_lock);
18995 if (tr->ctrl ^ val) {
18996 if (val)
18997 tracer_enabled = 1;
18998 else
18999 tracer_enabled = 0;
19000
19001 tr->ctrl = val;
19002
19003 if (current_trace && current_trace->ctrl_update)
19004 current_trace->ctrl_update(tr);
19005 }
19006 mutex_unlock(&trace_types_lock);
19007
19008 filp->f_pos += cnt;
19009
19010 return cnt;
19011 }
19012
19013 static ssize_t
19014 tracing_set_trace_read(struct file *filp, char *ubuf,
19015 size_t cnt, loff_t *ppos)
19016 {
19017 char buf[max_tracer_type_len+2];
19018 int r;
19019
19020 mutex_lock(&trace_types_lock);
19021 if (current_trace)
19022 r = sprintf(buf, "%s\n", current_trace->name);
19023 else
19024 r = sprintf(buf, "\n");
19025 mutex_unlock(&trace_types_lock);
19026
19027 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
19028 }
19029
19030 static ssize_t
19031 tracing_set_trace_write(struct file *filp, const char *ubuf,
19032 size_t cnt, loff_t *ppos)
19033 {
19034 struct trace_array *tr = &global_trace;
19035 struct tracer *t;
19036 char buf[max_tracer_type_len+1];
19037 int i;
19038 size_t ret;
19039
19040 ret = cnt;
19041
19042 if (cnt > max_tracer_type_len)
19043 cnt = max_tracer_type_len;
19044
19045 if (copy_from_user(&buf, ubuf, cnt))
19046 return -14;
19047
19048 buf[cnt] = 0;
19049
19050
19051 for (i = cnt - 1; i > 0 && (((_ctype[(int)(unsigned char)(buf[i])])&(0x20)) != 0); i--)
19052 buf[i] = 0;
19053
19054 mutex_lock(&trace_types_lock);
19055 for (t = trace_types; t; t = t->next) {
19056 if (strcmp(t->name, buf) == 0)
19057 break;
19058 }
19059 if (!t) {
19060 ret = -22;
19061 goto out;
19062 }
19063 if (t == current_trace)
19064 goto out;
19065
19066 if (current_trace && current_trace->reset)
19067 current_trace->reset(tr);
19068
19069 current_trace = t;
19070 if (t->init)
19071 t->init(tr);
19072
19073 out:
19074 mutex_unlock(&trace_types_lock);
19075
19076 if (ret > 0)
19077 filp->f_pos += ret;
19078
19079 return ret;
19080 }
19081
19082 static ssize_t
19083 tracing_max_lat_read(struct file *filp, char *ubuf,
19084 size_t cnt, loff_t *ppos)
19085 {
19086 unsigned long *ptr = filp->private_data;
19087 char buf[64];
19088 int r;
19089
19090 r = snprintf(buf, sizeof(buf), "%ld\n",
19091 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
19092 if (r > sizeof(buf))
19093 r = sizeof(buf);
19094 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
19095 }
19096
19097 static ssize_t
19098 tracing_max_lat_write(struct file *filp, const char *ubuf,
19099 size_t cnt, loff_t *ppos)
19100 {
19101 long *ptr = filp->private_data;
19102 char buf[64];
19103 long val;
19104 int ret;
19105
19106 if (cnt >= sizeof(buf))
19107 return -22;
19108
19109 if (copy_from_user(&buf, ubuf, cnt))
19110 return -14;
19111
19112 buf[cnt] = 0;
19113
19114 ret = strict_strtoul(buf, 10, &val);
19115 if (ret < 0)
19116 return ret;
19117
19118 *ptr = val * 1000;
19119
19120 return cnt;
19121 }
19122
19123 static atomic_t tracing_reader;
19124
19125 static int tracing_open_pipe(struct inode *inode, struct file *filp)
19126 {
19127 struct trace_iterator *iter;
19128
19129 if (tracing_disabled)
19130 return -19;
19131
19132
19133 if (atomic_add_return(1,(&tracing_reader)) != 1) {
19134 atomic_dec(&tracing_reader);
19135 return -16;
19136 }
19137
19138
19139 iter = kzalloc(sizeof(*iter), ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u)));
19140 if (!iter)
19141 return -12;
19142
19143 mutex_lock(&trace_types_lock);
19144 iter->tr = &global_trace;
19145 iter->trace = current_trace;
19146 filp->private_data = iter;
19147
19148 if (iter->trace->pipe_open)
19149 iter->trace->pipe_open(iter);
19150 mutex_unlock(&trace_types_lock);
19151
19152 return 0;
19153 }
19154
19155 static int tracing_release_pipe(struct inode *inode, struct file *file)
19156 {
19157 struct trace_iterator *iter = file->private_data;
19158
19159 kfree(iter);
19160 atomic_dec(&tracing_reader);
19161
19162 return 0;
19163 }
19164
19165 static unsigned int
19166 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
19167 {
19168 struct trace_iterator *iter = filp->private_data;
19169
19170 if (trace_flags & TRACE_ITER_BLOCK) {
19171
19172
19173
19174 return 1 | 64;
19175 } else {
19176 if (!trace_empty(iter))
19177 return 1 | 64;
19178 poll_wait(filp, &trace_wait, poll_table);
19179 if (!trace_empty(iter))
19180 return 1 | 64;
19181
19182 return 0;
19183 }
19184 }
19185
19186
19187
19188
19189 static ssize_t
19190 tracing_read_pipe(struct file *filp, char *ubuf,
19191 size_t cnt, loff_t *ppos)
19192 {
19193 struct trace_iterator *iter = filp->private_data;
19194 ssize_t sret;
19195
19196
19197 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
19198 if (sret != -16)
19199 return sret;
19200
19201 trace_seq_reset(&iter->seq);
19202
19203 mutex_lock(&trace_types_lock);
19204 if (iter->trace->read) {
19205 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
19206 if (sret)
19207 goto out;
19208 }
19209
19210 waitagain:
19211 sret = 0;
19212 while (trace_empty(iter)) {
19213
19214 if ((filp->f_flags & 00004000)) {
19215 sret = -11;
19216 goto out;
19217 }
19218 # 2579 "kernel/trace/trace.c"
19219 do { (void) ((__typeof__(*(&(get_current())->state)))__xchg((unsigned long)((1)), (&(get_current())->state), sizeof(*(&(get_current())->state)))); } while (0);
19220 iter->tr->waiter = (get_current());
19221
19222 mutex_unlock(&trace_types_lock);
19223
19224
19225 schedule_timeout(250/10);
19226
19227 mutex_lock(&trace_types_lock);
19228
19229 iter->tr->waiter = ((void *)0);
19230
19231 if (signal_pending((get_current()))) {
19232 sret = -4;
19233 goto out;
19234 }
19235
19236 if (iter->trace != current_trace)
19237 goto out;
19238 # 2608 "kernel/trace/trace.c"
19239 if (!tracer_enabled && iter->pos)
19240 break;
19241
19242 continue;
19243 }
19244
19245
19246 if (trace_empty(iter))
19247 goto out;
19248
19249 if (cnt >= (1UL << 12))
19250 cnt = (1UL << 12) - 1;
19251
19252
19253 memset(&iter->seq, 0,
19254 sizeof(struct trace_iterator) -
19255 __builtin_offsetof(struct trace_iterator,seq));
19256 iter->pos = -1;
19257
19258 while (find_next_entry_inc(iter) != ((void *)0)) {
19259 enum print_line_t ret;
19260 int len = iter->seq.len;
19261
19262 ret = print_trace_line(iter);
19263 if (ret == TRACE_TYPE_PARTIAL_LINE) {
19264
19265 iter->seq.len = len;
19266 break;
19267 }
19268
19269 trace_consume(iter);
19270
19271 if (iter->seq.len >= cnt)
19272 break;
19273 }
19274
19275
19276 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
19277 if (iter->seq.readpos >= iter->seq.len)
19278 trace_seq_reset(&iter->seq);
19279
19280
19281
19282
19283
19284 if (sret == -16)
19285 goto waitagain;
19286
19287 out:
19288 mutex_unlock(&trace_types_lock);
19289
19290 return sret;
19291 }
19292
19293 static ssize_t
19294 tracing_entries_read(struct file *filp, char *ubuf,
19295 size_t cnt, loff_t *ppos)
19296 {
19297 struct trace_array *tr = filp->private_data;
19298 char buf[64];
19299 int r;
19300
19301 r = sprintf(buf, "%lu\n", tr->entries);
19302 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
19303 }
19304
19305 static ssize_t
19306 tracing_entries_write(struct file *filp, const char *ubuf,
19307 size_t cnt, loff_t *ppos)
19308 {
19309 unsigned long val;
19310 char buf[64];
19311 int ret, cpu;
19312 struct trace_array *tr = filp->private_data;
19313
19314 if (cnt >= sizeof(buf))
19315 return -22;
19316
19317 if (copy_from_user(&buf, ubuf, cnt))
19318 return -14;
19319
19320 buf[cnt] = 0;
19321
19322 ret = strict_strtoul(buf, 10, &val);
19323 if (ret < 0)
19324 return ret;
19325
19326
19327 if (!val)
19328 return -22;
19329
19330 mutex_lock(&trace_types_lock);
19331
19332 if (tr->ctrl) {
19333 cnt = -16;
19334 printk("<6>" "ftrace: please disable tracing" " before modifying buffer size\n");
19335
19336 goto out;
19337 }
19338
19339
19340 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) {
19341 if (global_trace.data[cpu])
19342 atomic_inc(&global_trace.data[cpu]->disabled);
19343 if (max_tr.data[cpu])
19344 atomic_inc(&max_tr.data[cpu]->disabled);
19345 }
19346
19347 if (val != global_trace.entries) {
19348 ret = ring_buffer_resize(global_trace.buffer, val);
19349 if (ret < 0) {
19350 cnt = ret;
19351 goto out;
19352 }
19353
19354 ret = ring_buffer_resize(max_tr.buffer, val);
19355 if (ret < 0) {
19356 int r;
19357 cnt = ret;
19358 r = ring_buffer_resize(global_trace.buffer,
19359 global_trace.entries);
19360 if (r < 0) {
19361
19362
19363 ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 2732); __builtin_expect(!!(__ret_warn_on), 0); });
19364 tracing_disabled = 1;
19365 }
19366 goto out;
19367 }
19368
19369 global_trace.entries = val;
19370 }
19371
19372 filp->f_pos += cnt;
19373
19374
19375 if (tracing_disabled)
19376 cnt = -12;
19377 out:
19378 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) {
19379 if (global_trace.data[cpu])
19380 atomic_dec(&global_trace.data[cpu]->disabled);
19381 if (max_tr.data[cpu])
19382 atomic_dec(&max_tr.data[cpu]->disabled);
19383 }
19384
19385 max_tr.entries = global_trace.entries;
19386 mutex_unlock(&trace_types_lock);
19387
19388 return cnt;
19389 }
19390
19391 static int mark_printk(const char *fmt, ...)
19392 {
19393 int ret;
19394 va_list args;
19395 __builtin_va_start(args,fmt);
19396 ret = trace_vprintk(0, fmt, args);
19397 __builtin_va_end(args);
19398 return ret;
19399 }
19400
19401 static ssize_t
19402 tracing_mark_write(struct file *filp, const char *ubuf,
19403 size_t cnt, loff_t *fpos)
19404 {
19405 char *buf;
19406 char *end;
19407 struct trace_array *tr = &global_trace;
19408
19409 if (!tr->ctrl || tracing_disabled)
19410 return -22;
19411
19412 if (cnt > 1024)
19413 cnt = 1024;
19414
19415 buf = kmalloc(cnt + 1, ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u)));
19416 if (buf == ((void *)0))
19417 return -12;
19418
19419 if (copy_from_user(buf, ubuf, cnt)) {
19420 kfree(buf);
19421 return -14;
19422 }
19423
19424
19425 buf[cnt] = '\0';
19426 end = strchr(buf, '\n');
19427 if (end)
19428 *end = '\0';
19429
19430 cnt = mark_printk("%s\n", buf);
19431 kfree(buf);
19432 *fpos += cnt;
19433
19434 return cnt;
19435 }
19436
19437 static struct file_operations tracing_max_lat_fops = {
19438 .open = tracing_open_generic,
19439 .read = tracing_max_lat_read,
19440 .write = tracing_max_lat_write,
19441 };
19442
19443 static struct file_operations tracing_ctrl_fops = {
19444 .open = tracing_open_generic,
19445 .read = tracing_ctrl_read,
19446 .write = tracing_ctrl_write,
19447 };
19448
19449 static struct file_operations set_tracer_fops = {
19450 .open = tracing_open_generic,
19451 .read = tracing_set_trace_read,
19452 .write = tracing_set_trace_write,
19453 };
19454
19455 static struct file_operations tracing_pipe_fops = {
19456 .open = tracing_open_pipe,
19457 .poll = tracing_poll_pipe,
19458 .read = tracing_read_pipe,
19459 .release = tracing_release_pipe,
19460 };
19461
19462 static struct file_operations tracing_entries_fops = {
19463 .open = tracing_open_generic,
19464 .read = tracing_entries_read,
19465 .write = tracing_entries_write,
19466 };
19467
19468 static struct file_operations tracing_mark_fops = {
19469 .open = tracing_open_generic,
19470 .write = tracing_mark_write,
19471 };
19472 # 2863 "kernel/trace/trace.c"
19473 static struct dentry *d_tracer;
19474
19475 struct dentry *tracing_init_dentry(void)
19476 {
19477 static int once;
19478
19479 if (d_tracer)
19480 return d_tracer;
19481
19482 d_tracer = debugfs_create_dir("tracing", ((void *)0));
19483
19484 if (!d_tracer && !once) {
19485 once = 1;
19486 printk("<4>" "Could not create debugfs directory 'tracing'\n");
19487 return ((void *)0);
19488 }
19489
19490 return d_tracer;
19491 }
19492
19493
19494
19495
19496
19497
19498 static __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) int tracer_init_debugfs(void)
19499 {
19500 struct dentry *d_tracer;
19501 struct dentry *entry;
19502
19503 d_tracer = tracing_init_dentry();
19504
19505 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
19506 &global_trace, &tracing_ctrl_fops);
19507 if (!entry)
19508 printk("<4>" "Could not create debugfs 'tracing_enabled' entry\n");
19509
19510 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
19511 ((void *)0), &tracing_iter_fops);
19512 if (!entry)
19513 printk("<4>" "Could not create debugfs 'iter_ctrl' entry\n");
19514
19515 entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
19516 ((void *)0), &tracing_cpumask_fops);
19517 if (!entry)
19518 printk("<4>" "Could not create debugfs 'tracing_cpumask' entry\n");
19519
19520 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
19521 &global_trace, &tracing_lt_fops);
19522 if (!entry)
19523 printk("<4>" "Could not create debugfs 'latency_trace' entry\n");
19524
19525 entry = debugfs_create_file("trace", 0444, d_tracer,
19526 &global_trace, &tracing_fops);
19527 if (!entry)
19528 printk("<4>" "Could not create debugfs 'trace' entry\n");
19529
19530 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
19531 &global_trace, &show_traces_fops);
19532 if (!entry)
19533 printk("<4>" "Could not create debugfs 'available_tracers' entry\n");
19534
19535 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
19536 &global_trace, &set_tracer_fops);
19537 if (!entry)
19538 printk("<4>" "Could not create debugfs 'current_tracer' entry\n");
19539
19540 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
19541 &tracing_max_latency,
19542 &tracing_max_lat_fops);
19543 if (!entry)
19544 printk("<4>" "Could not create debugfs " "'tracing_max_latency' entry\n");
19545
19546
19547 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
19548 &tracing_thresh, &tracing_max_lat_fops);
19549 if (!entry)
19550 printk("<4>" "Could not create debugfs " "'tracing_thresh' entry\n");
19551
19552 entry = debugfs_create_file("README", 0644, d_tracer,
19553 ((void *)0), &tracing_readme_fops);
19554 if (!entry)
19555 printk("<4>" "Could not create debugfs 'README' entry\n");
19556
19557 entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
19558 ((void *)0), &tracing_pipe_fops);
19559 if (!entry)
19560 printk("<4>" "Could not create debugfs " "'trace_pipe' entry\n");
19561
19562
19563 entry = debugfs_create_file("trace_entries", 0644, d_tracer,
19564 &global_trace, &tracing_entries_fops);
19565 if (!entry)
19566 printk("<4>" "Could not create debugfs " "'trace_entries' entry\n");
19567
19568
19569 entry = debugfs_create_file("trace_marker", 0220, d_tracer,
19570 ((void *)0), &tracing_mark_fops);
19571 if (!entry)
19572 printk("<4>" "Could not create debugfs " "'trace_marker' entry\n");
19573 # 2976 "kernel/trace/trace.c"
19574 return 0;
19575 }
19576
19577 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
19578 {
19579 static spinlock_t trace_buf_lock = (spinlock_t) { .raw_lock = { 1 }, .magic = 0xdead4ead, .owner = ((void *)-1L), .owner_cpu = -1, };
19580 static char trace_buf[1024];
19581
19582 struct ring_buffer_event *event;
19583 struct trace_array *tr = &global_trace;
19584 struct trace_array_cpu *data;
19585 struct print_entry *entry;
19586 unsigned long flags, irq_flags;
19587 int cpu, len = 0, size, pc;
19588
19589 if (!tr->ctrl || tracing_disabled)
19590 return 0;
19591
19592 pc = (current_thread_info()->preempt_count);
19593 do { } while (0);
19594 cpu = 0;
19595 data = tr->data[cpu];
19596
19597 if (__builtin_expect(!!(((&data->disabled)->counter)), 0))
19598 goto out;
19599
19600 do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _spin_lock_irqsave(&trace_buf_lock); } while (0);
19601 len = vsnprintf(trace_buf, 1024, fmt, args);
19602
19603 len = ({ typeof(len) _min1 = (len); typeof(1024 -1) _min2 = (1024 -1); (void) (&_min1 == &_min2); _min1 < _min2 ? _min1 : _min2; });
19604 trace_buf[len] = 0;
19605
19606 size = sizeof(*entry) + len + 1;
19607 event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
19608 if (!event)
19609 goto out_unlock;
19610 entry = ring_buffer_event_data(event);
19611 tracing_generic_entry_update(&entry->ent, flags, pc);
19612 entry->ent.type = TRACE_PRINT;
19613 entry->ip = ip;
19614
19615 memcpy(&entry->buf, trace_buf, len);
19616 entry->buf[len] = 0;
19617 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
19618
19619 out_unlock:
19620 do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _spin_unlock_irqrestore(&trace_buf_lock, flags); } while (0);
19621
19622 out:
19623 do { } while (0);
19624
19625 return len;
19626 }
19627 ;
19628
19629 int __ftrace_printk(unsigned long ip, const char *fmt, ...)
19630 {
19631 int ret;
19632 va_list ap;
19633
19634 if (!(trace_flags & TRACE_ITER_PRINTK))
19635 return 0;
19636
19637 __builtin_va_start(ap,fmt);
19638 ret = trace_vprintk(ip, fmt, ap);
19639 __builtin_va_end(ap);
19640 return ret;
19641 }
19642 ;
19643
19644 static int trace_panic_handler(struct notifier_block *this,
19645 unsigned long event, void *unused)
19646 {
19647 ftrace_dump();
19648 return 0x0001;
19649 }
19650
19651 static struct notifier_block trace_panic_notifier = {
19652 .notifier_call = trace_panic_handler,
19653 .next = ((void *)0),
19654 .priority = 150
19655 };
19656
19657 static int trace_die_handler(struct notifier_block *self,
19658 unsigned long val,
19659 void *data)
19660 {
19661 switch (val) {
19662 case DIE_OOPS:
19663 ftrace_dump();
19664 break;
19665 default:
19666 break;
19667 }
19668 return 0x0001;
19669 }
19670
19671 static struct notifier_block trace_die_notifier = {
19672 .notifier_call = trace_die_handler,
19673 .priority = 200
19674 };
19675 # 3091 "kernel/trace/trace.c"
19676 static void
19677 trace_printk_seq(struct trace_seq *s)
19678 {
19679
19680 if (s->len >= 1000)
19681 s->len = 1000;
19682
19683
19684 s->buffer[s->len] = 0;
19685
19686 printk("<6>" "%s", s->buffer);
19687
19688 trace_seq_reset(s);
19689 }
19690
19691
19692 void ftrace_dump(void)
19693 {
19694 static spinlock_t ftrace_dump_lock = (spinlock_t) { .raw_lock = { 1 }, .magic = 0xdead4ead, .owner = ((void *)-1L), .owner_cpu = -1, };
19695
19696 static struct trace_iterator iter;
19697 static cpumask_t mask;
19698 static int dump_ran;
19699 unsigned long flags;
19700 int cnt = 0, cpu;
19701
19702
19703 do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _spin_lock_irqsave(&ftrace_dump_lock); } while (0);
19704 if (dump_ran)
19705 goto out;
19706
19707 dump_ran = 1;
19708
19709
19710 ftrace_kill();
19711
19712 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) {
19713 atomic_inc(&global_trace.data[cpu]->disabled);
19714 }
19715
19716 printk("<6>" "Dumping ftrace buffer:\n");
19717
19718 iter.tr = &global_trace;
19719 iter.trace = current_trace;
19720 # 3143 "kernel/trace/trace.c"
19721 __cpus_clear(&(mask), 1);
19722
19723 while (!trace_empty(&iter)) {
19724
19725 if (!cnt)
19726 printk("<6>" "---------------------------------\n");
19727
19728 cnt++;
19729
19730
19731 memset(&iter.seq, 0,
19732 sizeof(struct trace_iterator) -
19733 __builtin_offsetof(struct trace_iterator,seq));
19734 iter.iter_flags |= TRACE_FILE_LAT_FMT;
19735 iter.pos = -1;
19736
19737 if (find_next_entry_inc(&iter) != ((void *)0)) {
19738 print_trace_line(&iter);
19739 trace_consume(&iter);
19740 }
19741
19742 trace_printk_seq(&iter.seq);
19743 }
19744
19745 if (!cnt)
19746 printk("<6>" " (ftrace buffer empty)\n");
19747 else
19748 printk("<6>" "---------------------------------\n");
19749
19750 out:
19751 do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _spin_unlock_irqrestore(&ftrace_dump_lock, flags); } while (0);
19752 }
19753
19754 __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) static int tracer_alloc_buffers(void)
19755 {
19756 struct trace_array_cpu *data;
19757 int i;
19758
19759
19760 tracing_buffer_mask = cpu_possible_map;
19761
19762 global_trace.buffer = ring_buffer_alloc(trace_buf_size,
19763 (RB_FL_OVERWRITE));
19764 if (!global_trace.buffer) {
19765 printk("<3>" "tracer: failed to allocate ring buffer!\n");
19766 ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 3188); __builtin_expect(!!(__ret_warn_on), 0); });
19767 return 0;
19768 }
19769 global_trace.entries = ring_buffer_size(global_trace.buffer);
19770
19771
19772 max_tr.buffer = ring_buffer_alloc(trace_buf_size,
19773 (RB_FL_OVERWRITE));
19774 if (!max_tr.buffer) {
19775 printk("<3>" "tracer: failed to allocate max ring buffer!\n");
19776 ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 3198); __builtin_expect(!!(__ret_warn_on), 0); });
19777 ring_buffer_free(global_trace.buffer);
19778 return 0;
19779 }
19780 max_tr.entries = ring_buffer_size(max_tr.buffer);
19781 ({ int __ret_warn_on = !!(max_tr.entries != global_trace.entries); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 3203); __builtin_expect(!!(__ret_warn_on), 0); });
19782
19783
19784
19785 for ((i) = 0; (i) < 1; (i)++, (void)tracing_buffer_mask) {
19786 data = global_trace.data[i] = &(*((void)(i), &per_cpu__global_trace_cpu));
19787 max_tr.data[i] = &(*((void)(i), &per_cpu__max_data));
19788 }
19789
19790 trace_init_cmdlines();
19791
19792 register_tracer(&nop_trace);
19793
19794 register_tracer(&boot_tracer);
19795 current_trace = &boot_tracer;
19796 current_trace->init(&global_trace);
19797
19798
19799
19800
19801
19802 global_trace.ctrl = tracer_enabled;
19803 tracing_disabled = 0;
19804
19805 atomic_notifier_chain_register(&panic_notifier_list,
19806 &trace_panic_notifier);
19807
19808 register_die_notifier(&trace_die_notifier);
19809
19810 return 0;
19811 }
19812 static initcall_t __initcall_tracer_alloc_buffersearly __attribute__((__used__)) __attribute__((__section__(".initcall" "early" ".init"))) = tracer_alloc_buffers;
19813 static initcall_t __initcall_tracer_init_debugfs5 __attribute__((__used__)) __attribute__((__section__(".initcall" "5" ".init"))) = tracer_init_debugfs;