# 1 "kernel/trace/trace.c" # 1 "/usr/local/src/blackfin/git/linux-kernel//" # 1 "" # 1 "" # 1 "./include/linux/autoconf.h" 1 # 1 "" 2 # 1 "kernel/trace/trace.c" # 14 "kernel/trace/trace.c" # 1 "include/linux/utsrelease.h" 1 # 15 "kernel/trace/trace.c" 2 # 1 "include/linux/kallsyms.h" 1 # 1 "include/linux/errno.h" 1 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/errno.h" 1 # 1 "include/asm-generic/errno.h" 1 # 1 "include/asm-generic/errno-base.h" 1 # 5 "include/asm-generic/errno.h" 2 # 5 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/errno.h" 2 # 5 "include/linux/errno.h" 2 # 9 "include/linux/kallsyms.h" 2 # 1 "include/linux/kernel.h" 1 # 10 "include/linux/kernel.h" # 1 "/usr/local/src/blackfin/toolchains/20090128/bfin-uclinux/lib/gcc/bfin-uclinux/4.3.3/include/stdarg.h" 1 3 4 # 43 "/usr/local/src/blackfin/toolchains/20090128/bfin-uclinux/lib/gcc/bfin-uclinux/4.3.3/include/stdarg.h" 3 4 typedef __builtin_va_list __gnuc_va_list; # 105 "/usr/local/src/blackfin/toolchains/20090128/bfin-uclinux/lib/gcc/bfin-uclinux/4.3.3/include/stdarg.h" 3 4 typedef __gnuc_va_list va_list; # 11 "include/linux/kernel.h" 2 # 1 "include/linux/linkage.h" 1 # 1 "include/linux/compiler.h" 1 # 40 "include/linux/compiler.h" # 1 "include/linux/compiler-gcc4.h" 1 # 1 "include/linux/compiler-gcc.h" 1 # 7 "include/linux/compiler-gcc4.h" 2 # 41 "include/linux/compiler.h" 2 # 5 "include/linux/linkage.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/linkage.h" 1 # 6 "include/linux/linkage.h" 2 # 12 "include/linux/kernel.h" 2 # 1 "include/linux/stddef.h" 1 # 15 "include/linux/stddef.h" enum { false = 0, true = 1 }; # 13 "include/linux/kernel.h" 2 # 1 "include/linux/types.h" 1 # 11 "include/linux/types.h" # 1 "include/linux/posix_types.h" 1 # 36 "include/linux/posix_types.h" typedef struct { unsigned long fds_bits [(1024/(8 * sizeof(unsigned long)))]; } __kernel_fd_set; typedef void (*__kernel_sighandler_t)(int); typedef int __kernel_key_t; typedef int __kernel_mqd_t; # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/posix_types.h" 1 # 10 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/posix_types.h" typedef unsigned long __kernel_ino_t; typedef unsigned short __kernel_mode_t; typedef unsigned short __kernel_nlink_t; typedef long __kernel_off_t; typedef int __kernel_pid_t; typedef unsigned int __kernel_ipc_pid_t; typedef unsigned int __kernel_uid_t; typedef unsigned int __kernel_gid_t; typedef unsigned long __kernel_size_t; typedef long __kernel_ssize_t; typedef int __kernel_ptrdiff_t; typedef long __kernel_time_t; typedef long __kernel_suseconds_t; typedef long __kernel_clock_t; typedef int __kernel_timer_t; typedef int __kernel_clockid_t; typedef int __kernel_daddr_t; typedef char *__kernel_caddr_t; typedef unsigned short __kernel_uid16_t; typedef unsigned short __kernel_gid16_t; typedef unsigned int __kernel_uid32_t; typedef unsigned int __kernel_gid32_t; typedef unsigned short __kernel_old_uid_t; typedef unsigned short __kernel_old_gid_t; typedef unsigned short __kernel_old_dev_t; typedef long long __kernel_loff_t; typedef struct { int val[2]; } __kernel_fsid_t; # 48 "include/linux/posix_types.h" 2 # 12 "include/linux/types.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" 1 # 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" # 1 "include/asm-generic/int-ll64.h" 1 # 17 "include/asm-generic/int-ll64.h" typedef __signed__ char __s8; typedef unsigned char __u8; typedef __signed__ short __s16; typedef unsigned short __u16; typedef __signed__ int __s32; typedef unsigned int __u32; __extension__ typedef __signed__ long long __s64; __extension__ typedef unsigned long long __u64; # 40 "include/asm-generic/int-ll64.h" typedef signed char s8; typedef unsigned char u8; typedef signed short s16; typedef unsigned short u16; typedef signed int s32; typedef unsigned int u32; typedef signed long long s64; typedef unsigned long long u64; # 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" 2 typedef unsigned short umode_t; # 29 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" typedef u32 dma_addr_t; typedef u64 dma64_addr_t; # 13 "include/linux/types.h" 2 typedef __u32 __kernel_dev_t; typedef __kernel_fd_set fd_set; typedef __kernel_dev_t dev_t; typedef __kernel_ino_t ino_t; typedef __kernel_mode_t mode_t; typedef __kernel_nlink_t nlink_t; typedef __kernel_off_t off_t; typedef __kernel_pid_t pid_t; typedef __kernel_daddr_t daddr_t; typedef __kernel_key_t key_t; typedef __kernel_suseconds_t suseconds_t; typedef __kernel_timer_t timer_t; typedef __kernel_clockid_t clockid_t; typedef __kernel_mqd_t mqd_t; typedef _Bool bool; typedef __kernel_uid32_t uid_t; typedef __kernel_gid32_t gid_t; typedef __kernel_uid16_t uid16_t; typedef __kernel_gid16_t gid16_t; typedef unsigned long uintptr_t; typedef __kernel_old_uid_t old_uid_t; typedef __kernel_old_gid_t old_gid_t; # 57 "include/linux/types.h" typedef __kernel_loff_t loff_t; # 66 "include/linux/types.h" typedef __kernel_size_t size_t; typedef __kernel_ssize_t ssize_t; typedef __kernel_ptrdiff_t ptrdiff_t; typedef __kernel_time_t time_t; typedef __kernel_clock_t clock_t; typedef __kernel_caddr_t caddr_t; typedef unsigned char u_char; typedef unsigned short u_short; typedef unsigned int u_int; typedef unsigned long u_long; typedef unsigned char unchar; typedef unsigned short ushort; typedef unsigned int uint; typedef unsigned long ulong; typedef __u8 u_int8_t; typedef __s8 int8_t; typedef __u16 u_int16_t; typedef __s16 int16_t; typedef __u32 u_int32_t; typedef __s32 int32_t; typedef __u8 uint8_t; typedef __u16 uint16_t; typedef __u32 uint32_t; typedef __u64 uint64_t; typedef __u64 u_int64_t; typedef __s64 int64_t; # 140 "include/linux/types.h" typedef u64 sector_t; # 151 "include/linux/types.h" typedef unsigned long blkcnt_t; # 180 "include/linux/types.h" typedef __u16 __le16; typedef __u16 __be16; typedef __u32 __le32; typedef __u32 __be32; typedef __u64 __le64; typedef __u64 __be64; typedef __u16 __sum16; typedef __u32 __wsum; typedef unsigned gfp_t; typedef unsigned fmode_t; typedef u32 phys_addr_t; typedef phys_addr_t resource_size_t; struct ustat { __kernel_daddr_t f_tfree; __kernel_ino_t f_tinode; char f_fname[6]; char f_fpack[6]; }; # 14 "include/linux/kernel.h" 2 # 1 "include/linux/bitops.h" 1 # 17 "include/linux/bitops.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 1 # 9 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/byteorder.h" 1 # 9 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/byteorder.h" static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u32 ___arch__swahb32(__u32 xx) { __u32 tmp; __asm__("%1 = %0 >> 8 (V);\n\t" "%0 = %0 << 8 (V);\n\t" "%0 = %0 | %1;\n\t" : "+d"(xx), "=&d"(tmp)); return xx; } static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u32 ___arch__swahw32(__u32 xx) { __u32 rv; __asm__("%0 = PACK(%1.L, %1.H);\n\t": "=d"(rv): "d"(xx)); return rv; } static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u16 ___arch__swab16(__u16 xx) { __u32 xw = xx; __asm__("%0 <<= 8;\n %0.L = %0.L + %0.H (NS);\n": "+d"(xw)); return (__u16)xw; } # 46 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/byteorder.h" # 1 "include/linux/byteorder/little_endian.h" 1 # 12 "include/linux/byteorder/little_endian.h" # 1 "include/linux/byteorder/swab.h" 1 # 64 "include/linux/byteorder/swab.h" static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u16 ___swab16(__u16 x) { return x<<8 | x>>8; } static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u32 ___swab32(__u32 x) { return x<<24 | x>>24 | (x & (__u32)0x0000ff00UL)<<8 | (x & (__u32)0x00ff0000UL)>>8; } static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u64 ___swab64(__u64 x) { return x<<56 | x>>56 | (x & (__u64)0x000000000000ff00ULL)<<40 | (x & (__u64)0x0000000000ff0000ULL)<<24 | (x & (__u64)0x00000000ff000000ULL)<< 8 | (x & (__u64)0x000000ff00000000ULL)>> 8 | (x & (__u64)0x0000ff0000000000ULL)>>24 | (x & (__u64)0x00ff000000000000ULL)>>40; } # 163 "include/linux/byteorder/swab.h" static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u16 __fswab16(__u16 x) { return ___arch__swab16(x); } static __inline__ __attribute__((always_inline)) __u16 __swab16p(const __u16 *x) { return ___arch__swab16(*(x)); } static __inline__ __attribute__((always_inline)) void __swab16s(__u16 *addr) { ((void)(*(addr) = ___arch__swab16(*(addr)))); } static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswab32(__u32 x) { return ___arch__swahb32(___arch__swahw32(x)); } static __inline__ __attribute__((always_inline)) __u32 __swab32p(const __u32 *x) { return ___arch__swahb32(___arch__swahw32(*(x))); } static __inline__ __attribute__((always_inline)) void __swab32s(__u32 *addr) { ((void)(*(addr) = ___arch__swahb32(___arch__swahw32(*(addr))))); } static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u64 __fswab64(__u64 x) { __u32 h = x >> 32; __u32 l = x & ((1ULL<<32)-1); return (((__u64)(__builtin_constant_p((__u32)(l)) ? ((__u32)( (((__u32)((l)) & (__u32)0x000000ffUL) << 24) | (((__u32)((l)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((l)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((l)) & (__u32)0xff000000UL) >> 24) )) : __fswab32((l)))) << 32) | ((__u64)((__builtin_constant_p((__u32)(h)) ? ((__u32)( (((__u32)((h)) & (__u32)0x000000ffUL) << 24) | (((__u32)((h)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((h)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((h)) & (__u32)0xff000000UL) >> 24) )) : __fswab32((h))))); } static __inline__ __attribute__((always_inline)) __u64 __swab64p(const __u64 *x) { return ___swab64(*(x)); } static __inline__ __attribute__((always_inline)) void __swab64s(__u64 *addr) { ((void)(*(addr) = ___swab64(*(addr)))); } # 13 "include/linux/byteorder/little_endian.h" 2 # 1 "include/linux/byteorder/swabb.h" 1 # 92 "include/linux/byteorder/swabb.h" static inline __attribute__((always_inline)) __u32 __fswahw32(__u32 x) { return ___arch__swahw32(x); } static inline __attribute__((always_inline)) __u32 __swahw32p(__u32 *x) { return (__builtin_constant_p((__u32)(*(x))) ? ({ __u32 __x = ((*(x))); ((__u32)( (((__u32)(__x) & (__u32)0x0000ffffUL) << 16) | (((__u32)(__x) & (__u32)0xffff0000UL) >> 16) )); }) : __fswahw32((*(x)))); } static inline __attribute__((always_inline)) void __swahw32s(__u32 *addr) { do { *(addr) = __swahw32p((addr)); } while (0); } static inline __attribute__((always_inline)) __u32 __fswahb32(__u32 x) { return ___arch__swahb32(x); } static inline __attribute__((always_inline)) __u32 __swahb32p(__u32 *x) { return (__builtin_constant_p((__u32)(*(x))) ? ({ __u32 __x = ((*(x))); ((__u32)( (((__u32)(__x) & (__u32)0x00ff00ffUL) << 8) | (((__u32)(__x) & (__u32)0xff00ff00UL) >> 8) )); }) : __fswahb32((*(x)))); } static inline __attribute__((always_inline)) void __swahb32s(__u32 *addr) { do { *(addr) = __swahb32p((addr)); } while (0); } # 14 "include/linux/byteorder/little_endian.h" 2 # 44 "include/linux/byteorder/little_endian.h" static inline __attribute__((always_inline)) __le64 __cpu_to_le64p(const __u64 *p) { return ( __le64)*p; } static inline __attribute__((always_inline)) __u64 __le64_to_cpup(const __le64 *p) { return ( __u64)*p; } static inline __attribute__((always_inline)) __le32 __cpu_to_le32p(const __u32 *p) { return ( __le32)*p; } static inline __attribute__((always_inline)) __u32 __le32_to_cpup(const __le32 *p) { return ( __u32)*p; } static inline __attribute__((always_inline)) __le16 __cpu_to_le16p(const __u16 *p) { return ( __le16)*p; } static inline __attribute__((always_inline)) __u16 __le16_to_cpup(const __le16 *p) { return ( __u16)*p; } static inline __attribute__((always_inline)) __be64 __cpu_to_be64p(const __u64 *p) { return ( __be64)__swab64p(p); } static inline __attribute__((always_inline)) __u64 __be64_to_cpup(const __be64 *p) { return __swab64p((__u64 *)p); } static inline __attribute__((always_inline)) __be32 __cpu_to_be32p(const __u32 *p) { return ( __be32)__swab32p(p); } static inline __attribute__((always_inline)) __u32 __be32_to_cpup(const __be32 *p) { return __swab32p((__u32 *)p); } static inline __attribute__((always_inline)) __be16 __cpu_to_be16p(const __u16 *p) { return ( __be16)__swab16p(p); } static inline __attribute__((always_inline)) __u16 __be16_to_cpup(const __be16 *p) { return __swab16p((__u16 *)p); } # 106 "include/linux/byteorder/little_endian.h" # 1 "include/linux/byteorder/generic.h" 1 # 143 "include/linux/byteorder/generic.h" static inline __attribute__((always_inline)) void le16_add_cpu(__le16 *var, u16 val) { *var = (( __le16)(__u16)((( __u16)(__le16)(*var)) + val)); } static inline __attribute__((always_inline)) void le32_add_cpu(__le32 *var, u32 val) { *var = (( __le32)(__u32)((( __u32)(__le32)(*var)) + val)); } static inline __attribute__((always_inline)) void le64_add_cpu(__le64 *var, u64 val) { *var = (( __le64)(__u64)((( __u64)(__le64)(*var)) + val)); } static inline __attribute__((always_inline)) void be16_add_cpu(__be16 *var, u16 val) { *var = (( __be16)(__builtin_constant_p((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)((( __u16)(__be16)(*var))) & (__u16)0x00ffU) << 8) | (((__u16)((( __u16)(__be16)(*var))) & (__u16)0xff00U) >> 8) )) : __fswab16((( __u16)(__be16)(*var)))) + val))) ? ((__u16)( (((__u16)((((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)((( __u16)(__be16)(*var))) & (__u16)0x00ffU) << 8) | (((__u16)((( __u16)(__be16)(*var))) & (__u16)0xff00U) >> 8) )) : __fswab16((( __u16)(__be16)(*var)))) + val))) & (__u16)0x00ffU) << 8) | (((__u16)((((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)((( __u16)(__be16)(*var))) & (__u16)0x00ffU) << 8) | (((__u16)((( __u16)(__be16)(*var))) & (__u16)0xff00U) >> 8) )) : __fswab16((( __u16)(__be16)(*var)))) + val))) & (__u16)0xff00U) >> 8) )) : __fswab16((((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)((( __u16)(__be16)(*var))) & (__u16)0x00ffU) << 8) | (((__u16)((( __u16)(__be16)(*var))) & (__u16)0xff00U) >> 8) )) : __fswab16((( __u16)(__be16)(*var)))) + val))))); } static inline __attribute__((always_inline)) void be32_add_cpu(__be32 *var, u32 val) { *var = (( __be32)(__builtin_constant_p((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x000000ffUL) << 24) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x0000ff00UL) << 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0xff000000UL) >> 24) )) : __fswab32((( __u32)(__be32)(*var)))) + val))) ? ((__u32)( (((__u32)((((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x000000ffUL) << 24) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x0000ff00UL) << 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0xff000000UL) >> 24) )) : __fswab32((( __u32)(__be32)(*var)))) + val))) & (__u32)0x000000ffUL) << 24) | (((__u32)((((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x000000ffUL) << 24) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x0000ff00UL) << 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0xff000000UL) >> 24) )) : __fswab32((( __u32)(__be32)(*var)))) + val))) & (__u32)0x0000ff00UL) << 8) | (((__u32)((((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x000000ffUL) << 24) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x0000ff00UL) << 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0xff000000UL) >> 24) )) : __fswab32((( __u32)(__be32)(*var)))) + val))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x000000ffUL) << 24) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x0000ff00UL) << 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0xff000000UL) >> 24) )) : __fswab32((( __u32)(__be32)(*var)))) + val))) & (__u32)0xff000000UL) >> 24) )) : __fswab32((((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x000000ffUL) << 24) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x0000ff00UL) << 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((( __u32)(__be32)(*var))) & (__u32)0xff000000UL) >> 24) )) : __fswab32((( __u32)(__be32)(*var)))) + val))))); } static inline __attribute__((always_inline)) void be64_add_cpu(__be64 *var, u64 val) { *var = (( __be64)(__builtin_constant_p((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))) ? ((__u64)( (__u64)(((__u64)((((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000000000ffULL) << 56) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000000000ff00ULL) << 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000000000ff0000ULL) << 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00000000ff000000ULL) << 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x000000ff00000000ULL) >> 8) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x0000ff0000000000ULL) >> 24) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0x00ff000000000000ULL) >> 40) | (__u64)(((__u64)((( __u64)(__be64)(*var))) & (__u64)0xff00000000000000ULL) >> 56) )) : __fswab64((( __u64)(__be64)(*var)))) + val))))); } # 107 "include/linux/byteorder/little_endian.h" 2 # 47 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/byteorder.h" 2 # 10 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2 # 1 "include/asm-generic/bitops/ffs.h" 1 # 12 "include/asm-generic/bitops/ffs.h" static inline __attribute__((always_inline)) int ffs(int x) { int r = 1; if (!x) return 0; if (!(x & 0xffff)) { x >>= 16; r += 16; } if (!(x & 0xff)) { x >>= 8; r += 8; } if (!(x & 0xf)) { x >>= 4; r += 4; } if (!(x & 3)) { x >>= 2; r += 2; } if (!(x & 1)) { x >>= 1; r += 1; } return r; } # 18 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2 # 1 "include/asm-generic/bitops/__ffs.h" 1 # 12 "include/asm-generic/bitops/__ffs.h" static inline __attribute__((always_inline)) unsigned long __ffs(unsigned long word) { int num = 0; if ((word & 0xffff) == 0) { num += 16; word >>= 16; } if ((word & 0xff) == 0) { num += 8; word >>= 8; } if ((word & 0xf) == 0) { num += 4; word >>= 4; } if ((word & 0x3) == 0) { num += 2; word >>= 2; } if ((word & 0x1) == 0) num += 1; return num; } # 19 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2 # 1 "include/asm-generic/bitops/sched.h" 1 # 12 "include/asm-generic/bitops/sched.h" static inline __attribute__((always_inline)) int sched_find_first_bit(const unsigned long *b) { if (b[0]) return __ffs(b[0]); if (b[1]) return __ffs(b[1]) + 32; if (b[2]) return __ffs(b[2]) + 64; return __ffs(b[3]) + 96; } # 20 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2 # 1 "include/asm-generic/bitops/ffz.h" 1 # 21 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2 # 84 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 1 # 39 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/anomaly.h" 1 # 40 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pda.h" 1 # 30 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pda.h" struct blackfin_pda { struct blackfin_pda *next; unsigned long syscfg; unsigned long *ipdt; unsigned long *ipdt_swapcount; unsigned long *dpdt; unsigned long *dpdt_swapcount; unsigned long ex_iptr; unsigned long ex_optr; unsigned long ex_buf[4]; unsigned long ex_imask; unsigned long *ex_stack; unsigned long last_cplb_fault_retx; unsigned long dcplb_fault_addr; unsigned long icplb_fault_addr; unsigned long retx; unsigned long seqstat; unsigned int __nmi_count; }; extern struct blackfin_pda cpu_pda[]; void reserve_pda(void); # 41 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h" 1 # 10 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/blackfin.h" 1 # 14 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/blackfin.h" static inline __attribute__((always_inline)) void SSYNC(void) { int _tmp; if ((0x0003 < 6)) __asm__ __volatile__( "cli %0;" "nop;" "nop;" "ssync;" "sti %0;" : "=d" (_tmp) ); else if ((0x0003 < 5)) __asm__ __volatile__( "nop;" "nop;" "nop;" "ssync;" ); else __asm__ __volatile__("ssync;"); } static inline __attribute__((always_inline)) void CSYNC(void) { int _tmp; if ((0x0003 < 6)) __asm__ __volatile__( "cli %0;" "nop;" "nop;" "csync;" "sti %0;" : "=d" (_tmp) ); else if ((0x0003 < 5)) __asm__ __volatile__( "nop;" "nop;" "nop;" "csync;" ); else __asm__ __volatile__("csync;"); } # 89 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/blackfin.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h" 1 # 36 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/bf533.h" 1 # 37 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/mem_map.h" 1 # 38 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/defBF532.h" 1 # 51 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/defBF532.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/def_LPBlackfin.h" 1 # 52 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/defBF532.h" 2 # 39 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/anomaly.h" 1 # 40 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h" 1 # 34 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/blackfin.h" 1 # 35 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cdef_LPBlackfin.h" 1 # 41 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h" 2 # 680 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irq.h" 1 # 21 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irq.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/irq.h" 1 # 22 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irq.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h" 1 # 24 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irq.h" 2 extern unsigned long bfin_irq_flags; # 283 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irq.h" static inline __attribute__((always_inline)) int irq_canonicalize(int irq) { return irq; } # 681 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h" 2 # 692 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h" static inline __attribute__((always_inline)) void bfin_write_FIO_FLAG_D(unsigned short val) { unsigned long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); __asm__ __volatile__( "nop;" "w[%0] = %1;" : : "a" (0xFFC00700), "d" ((uint16_t)(val)) : "memory" ); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); } static inline __attribute__((always_inline)) void bfin_write_FIO_FLAG_C(unsigned short val) { unsigned long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); __asm__ __volatile__( "nop;" "w[%0] = %1;" : : "a" (0xFFC00704), "d" ((uint16_t)(val)) : "memory" ); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); } static inline __attribute__((always_inline)) void bfin_write_FIO_FLAG_S(unsigned short val) { unsigned long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); __asm__ __volatile__( "nop;" "w[%0] = %1;" : : "a" (0xFFC00708), "d" ((uint16_t)(val)) : "memory" ); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); } static inline __attribute__((always_inline)) void bfin_write_FIO_FLAG_T(unsigned short val) { unsigned long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); __asm__ __volatile__( "nop;" "w[%0] = %1;" : : "a" (0xFFC0070C), "d" ((uint16_t)(val)) : "memory" ); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); } # 708 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h" static inline __attribute__((always_inline)) u16 bfin_read_FIO_FLAG_D(void) { unsigned long flags; u16 ret; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); ret = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = w[%1] (z);" : "=d" (__v) : "a" (0xFFC00700) ); __v; }); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); return ret; } static inline __attribute__((always_inline)) u16 bfin_read_FIO_FLAG_C(void) { unsigned long flags; u16 ret; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); ret = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = w[%1] (z);" : "=d" (__v) : "a" (0xFFC00704) ); __v; }); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); return ret; } static inline __attribute__((always_inline)) u16 bfin_read_FIO_FLAG_S(void) { unsigned long flags; u16 ret; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); ret = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = w[%1] (z);" : "=d" (__v) : "a" (0xFFC00708) ); __v; }); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); return ret; } static inline __attribute__((always_inline)) u16 bfin_read_FIO_FLAG_T(void) { unsigned long flags; u16 ret; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); ret = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = w[%1] (z);" : "=d" (__v) : "a" (0xFFC0070C) ); __v; }); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); return ret; } # 725 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h" static __inline__ __attribute__((always_inline)) void bfin_write_PLL_CTL(unsigned int val) { unsigned long flags, iwr; if (val == ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = w[%1] (z);" : "=d" (__v) : "a" (0xFFC00000) ); __v; })) return; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); iwr = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00124) ); __v; }); __asm__ __volatile__( "nop;" "[%0] = %1;" : : "a" (0xFFC00124), "d" ((1 << (0))) : "memory" ); __asm__ __volatile__( "nop;" "w[%0] = %1;" : : "a" (0xFFC00000), "d" ((uint16_t)(val)) : "memory" ); SSYNC(); asm("IDLE;"); __asm__ __volatile__( "nop;" "[%0] = %1;" : : "a" (0xFFC00124), "d" (iwr) : "memory" ); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); } static __inline__ __attribute__((always_inline)) void bfin_write_VR_CTL(unsigned int val) { unsigned long flags, iwr; if (val == ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = w[%1] (z);" : "=d" (__v) : "a" (0xFFC00008) ); __v; })) return; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); iwr = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00124) ); __v; }); __asm__ __volatile__( "nop;" "[%0] = %1;" : : "a" (0xFFC00124), "d" ((1 << (0))) : "memory" ); __asm__ __volatile__( "nop;" "w[%0] = %1;" : : "a" (0xFFC00008), "d" ((uint16_t)(val)) : "memory" ); SSYNC(); asm("IDLE;"); __asm__ __volatile__( "nop;" "[%0] = %1;" : : "a" (0xFFC00124), "d" (iwr) : "memory" ); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); } # 43 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h" 2 # 90 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/blackfin.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bfin-global.h" 1 # 34 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bfin-global.h" # 1 "include/asm-generic/sections.h" 1 extern char _text[], _stext[], _etext[]; extern char _data[], _sdata[], _edata[]; extern char __bss_start[], __bss_stop[]; extern char __init_begin[], __init_end[]; extern char _sinittext[], _einittext[]; extern char _end[]; extern char __per_cpu_start[], __per_cpu_end[]; extern char __kprobes_text_start[], __kprobes_text_end[]; extern char __initdata_begin[], __initdata_end[]; extern char __start_rodata[], __end_rodata[]; # 35 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bfin-global.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ptrace.h" 1 # 24 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ptrace.h" struct pt_regs { long orig_pc; long ipend; long seqstat; long rete; long retn; long retx; long pc; long rets; long reserved; long astat; long lb1; long lb0; long lt1; long lt0; long lc1; long lc0; long a1w; long a1x; long a0w; long a0x; long b3; long b2; long b1; long b0; long l3; long l2; long l1; long l0; long m3; long m2; long m1; long m0; long i3; long i2; long i1; long i0; long usp; long fp; long p5; long p4; long p3; long p2; long p1; long p0; long r7; long r6; long r5; long r4; long r3; long r2; long r1; long r0; long orig_r0; long orig_p0; long syscfg; }; # 99 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ptrace.h" extern void show_regs(struct pt_regs *); # 36 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bfin-global.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/user.h" 1 # 36 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/user.h" struct user_bfinfp_struct { }; struct user_regs_struct { long r0, r1, r2, r3, r4, r5, r6, r7; long p0, p1, p2, p3, p4, p5, usp, fp; long i0, i1, i2, i3; long l0, l1, l2, l3; long b0, b1, b2, b3; long m0, m1, m2, m3; long a0w, a1w; long a0x, a1x; unsigned long rets; unsigned long astat; unsigned long pc; unsigned long orig_p0; }; struct user { struct user_regs_struct regs; unsigned long int u_tsize; unsigned long int u_dsize; unsigned long int u_ssize; unsigned long start_code; unsigned long start_stack; long int signal; int reserved; unsigned long u_ar0; unsigned long magic; char u_comm[32]; }; # 37 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bfin-global.h" 2 # 50 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bfin-global.h" extern void bfin_setup_caches(unsigned int cpu); extern void bfin_setup_cpudata(unsigned int cpu); extern unsigned long get_cclk(void); extern unsigned long get_sclk(void); extern unsigned long sclk_to_usecs(unsigned long sclk); extern unsigned long usecs_to_sclk(unsigned long usecs); extern void dump_bfin_process(struct pt_regs *regs); extern void dump_bfin_mem(struct pt_regs *regs); extern void dump_bfin_trace_buffer(void); extern int init_arch_irq(void); extern void init_exception_vectors(void); extern void program_IAR(void); extern void lower_to_irq14(void); extern void bfin_return_from_exception(void); extern void evt14_softirq(void); extern void asm_do_IRQ(unsigned int irq, struct pt_regs *regs); extern int bfin_internal_set_wake(unsigned int irq, unsigned int state); extern void *l1_data_A_sram_alloc(size_t); extern void *l1_data_B_sram_alloc(size_t); extern void *l1_inst_sram_alloc(size_t); extern void *l1_data_sram_alloc(size_t); extern void *l1_data_sram_zalloc(size_t); extern void *l2_sram_alloc(size_t); extern void *l2_sram_zalloc(size_t); extern int l1_data_A_sram_free(const void*); extern int l1_data_B_sram_free(const void*); extern int l1_inst_sram_free(const void*); extern int l1_data_sram_free(const void*); extern int l2_sram_free(const void *); extern int sram_free(const void*); extern void *sram_alloc_with_lsl(size_t, unsigned long); extern int sram_free_with_lsl(const void*); extern void *isram_memcpy(void *dest, const void *src, size_t n); extern const char bfin_board_name[]; extern unsigned long bfin_sic_iwr[]; extern unsigned vr_wakeup; extern u16 _bfin_swrst; extern unsigned long _ramstart, _ramend, _rambase; extern unsigned long memory_start, memory_end, physical_mem_end; extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[], _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[], _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[], _ebss_l2[], _l2_lma_start[]; extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size; extern void cache_grab_lock(int way); extern void bfin_cache_lock(int way); # 91 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/blackfin.h" 2 # 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/segment.h" 1 # 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h" 2 static inline __attribute__((always_inline)) unsigned long rdusp(void) { unsigned long usp; __asm__ __volatile__("%0 = usp;\n\t":"=da"(usp)); return usp; } static inline __attribute__((always_inline)) void wrusp(unsigned long usp) { __asm__ __volatile__("usp = %0;\n\t"::"da"(usp)); } static inline __attribute__((always_inline)) unsigned long __get_SP(void) { unsigned long sp; __asm__ __volatile__("%0 = sp;\n\t" : "=da"(sp)); return sp; } # 48 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h" struct thread_struct { unsigned long ksp; unsigned long usp; unsigned short seqstat; unsigned long esp0; unsigned long pc; void * debuggerinfo; }; # 94 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h" struct task_struct; static inline __attribute__((always_inline)) void release_thread(struct task_struct *dead_task) { } extern int kernel_thread(int (*fn) (void *), void *arg, unsigned long flags); static inline __attribute__((always_inline)) void exit_thread(void) { } unsigned long get_wchan(struct task_struct *p); # 132 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h" static inline __attribute__((always_inline)) uint32_t __attribute__((pure)) bfin_revid(void) { uint32_t revid = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }) >> 28; # 154 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h" return revid; } static inline __attribute__((always_inline)) uint16_t __attribute__((pure)) bfin_cpuid(void) { return (({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }) & 0x0FFFF000) >> 12; } static inline __attribute__((always_inline)) uint32_t __attribute__((pure)) bfin_dspid(void) { return ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFE05000) ); __v; }); } static inline __attribute__((always_inline)) uint32_t __attribute__((pure)) bfin_compiled_revid(void) { return 3; # 188 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h" } # 42 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2 # 133 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" struct __xchg_dummy { unsigned long a[100]; }; static inline __attribute__((always_inline)) unsigned long __xchg(unsigned long x, volatile void *ptr, int size) { unsigned long tmp = 0; unsigned long flags = 0; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); switch (size) { case 1: __asm__ __volatile__ ("%0 = b%2 (z);\n\t" "b%2 = %1;\n\t" : "=&d" (tmp) : "d" (x), "m" (*((volatile struct __xchg_dummy *)(ptr))) : "memory"); break; case 2: __asm__ __volatile__ ("%0 = w%2 (z);\n\t" "w%2 = %1;\n\t" : "=&d" (tmp) : "d" (x), "m" (*((volatile struct __xchg_dummy *)(ptr))) : "memory"); break; case 4: __asm__ __volatile__ ("%0 = %2;\n\t" "%2 = %1;\n\t" : "=&d" (tmp) : "d" (x), "m" (*((volatile struct __xchg_dummy *)(ptr))) : "memory"); break; } do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); return tmp; } # 1 "include/asm-generic/cmpxchg-local.h" 1 extern unsigned long wrong_size_cmpxchg(volatile void *ptr); static inline __attribute__((always_inline)) unsigned long __cmpxchg_local_generic(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long flags, prev; if (size == 8 && sizeof(unsigned long) != 8) wrong_size_cmpxchg(ptr); __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); switch (size) { case 1: prev = *(u8 *)ptr; if (prev == old) *(u8 *)ptr = (u8)new; break; case 2: prev = *(u16 *)ptr; if (prev == old) *(u16 *)ptr = (u16)new; break; case 4: prev = *(u32 *)ptr; if (prev == old) *(u32 *)ptr = (u32)new; break; case 8: prev = *(u64 *)ptr; if (prev == old) *(u64 *)ptr = (u64)new; break; default: wrong_size_cmpxchg(ptr); } do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); return prev; } static inline __attribute__((always_inline)) u64 __cmpxchg64_local_generic(volatile void *ptr, u64 old, u64 new) { u64 prev; unsigned long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); prev = *(u64 *)ptr; if (prev == old) *(u64 *)ptr = new; do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); return prev; } # 171 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2 # 181 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" # 1 "include/asm-generic/cmpxchg.h" 1 # 182 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2 # 195 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/l1layout.h" 1 # 17 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/l1layout.h" struct l1_scratch_task_info { void *stack_start; void *lowest_sp; }; # 196 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/mem_map.h" 1 # 10 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/mem_map.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/mem_map.h" 1 # 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/mem_map.h" 2 # 51 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/mem_map.h" static inline __attribute__((always_inline)) ulong get_l1_scratch_start_cpu(int cpu) { return 0xFFB00000; } static inline __attribute__((always_inline)) ulong get_l1_code_start_cpu(int cpu) { return 0xFFA00000; } static inline __attribute__((always_inline)) ulong get_l1_data_a_start_cpu(int cpu) { return 0xFF800000; } static inline __attribute__((always_inline)) ulong get_l1_data_b_start_cpu(int cpu) { return 0xFF900000; } static inline __attribute__((always_inline)) ulong get_l1_scratch_start(void) { return get_l1_scratch_start_cpu(0); } static inline __attribute__((always_inline)) ulong get_l1_code_start(void) { return get_l1_code_start_cpu(0); } static inline __attribute__((always_inline)) ulong get_l1_data_a_start(void) { return get_l1_data_a_start_cpu(0); } static inline __attribute__((always_inline)) ulong get_l1_data_b_start(void) { return get_l1_data_b_start_cpu(0); } # 197 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2 struct task_struct *resume(struct task_struct *prev, struct task_struct *next); # 85 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2 static inline __attribute__((always_inline)) void set_bit(int nr, volatile unsigned long *addr) { int *a = (int *)addr; int mask; unsigned long flags; a += nr >> 5; mask = 1 << (nr & 0x1f); __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); *a |= mask; do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); } static inline __attribute__((always_inline)) void clear_bit(int nr, volatile unsigned long *addr) { int *a = (int *)addr; int mask; unsigned long flags; a += nr >> 5; mask = 1 << (nr & 0x1f); __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); *a &= ~mask; do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); } static inline __attribute__((always_inline)) void change_bit(int nr, volatile unsigned long *addr) { int mask, flags; unsigned long *ADDR = (unsigned long *)addr; ADDR += nr >> 5; mask = 1 << (nr & 31); __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); *ADDR ^= mask; do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); } static inline __attribute__((always_inline)) int test_and_set_bit(int nr, volatile unsigned long *addr) { int mask, retval; volatile unsigned int *a = (volatile unsigned int *)addr; unsigned long flags; a += nr >> 5; mask = 1 << (nr & 0x1f); __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); retval = (mask & *a) != 0; *a |= mask; do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); return retval; } static inline __attribute__((always_inline)) int test_and_clear_bit(int nr, volatile unsigned long *addr) { int mask, retval; volatile unsigned int *a = (volatile unsigned int *)addr; unsigned long flags; a += nr >> 5; mask = 1 << (nr & 0x1f); __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); retval = (mask & *a) != 0; *a &= ~mask; do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); return retval; } static inline __attribute__((always_inline)) int test_and_change_bit(int nr, volatile unsigned long *addr) { int mask, retval; volatile unsigned int *a = (volatile unsigned int *)addr; unsigned long flags; a += nr >> 5; mask = 1 << (nr & 0x1f); __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); retval = (mask & *a) != 0; *a ^= mask; do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); return retval; } # 177 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" static inline __attribute__((always_inline)) void __set_bit(int nr, volatile unsigned long *addr) { int *a = (int *)addr; int mask; a += nr >> 5; mask = 1 << (nr & 0x1f); *a |= mask; } static inline __attribute__((always_inline)) void __clear_bit(int nr, volatile unsigned long *addr) { int *a = (int *)addr; int mask; a += nr >> 5; mask = 1 << (nr & 0x1f); *a &= ~mask; } static inline __attribute__((always_inline)) void __change_bit(int nr, volatile unsigned long *addr) { int mask; unsigned long *ADDR = (unsigned long *)addr; ADDR += nr >> 5; mask = 1 << (nr & 31); *ADDR ^= mask; } static inline __attribute__((always_inline)) int __test_and_set_bit(int nr, volatile unsigned long *addr) { int mask, retval; volatile unsigned int *a = (volatile unsigned int *)addr; a += nr >> 5; mask = 1 << (nr & 0x1f); retval = (mask & *a) != 0; *a |= mask; return retval; } static inline __attribute__((always_inline)) int __test_and_clear_bit(int nr, volatile unsigned long *addr) { int mask, retval; volatile unsigned int *a = (volatile unsigned int *)addr; a += nr >> 5; mask = 1 << (nr & 0x1f); retval = (mask & *a) != 0; *a &= ~mask; return retval; } static inline __attribute__((always_inline)) int __test_and_change_bit(int nr, volatile unsigned long *addr) { int mask, retval; volatile unsigned int *a = (volatile unsigned int *)addr; a += nr >> 5; mask = 1 << (nr & 0x1f); retval = (mask & *a) != 0; *a ^= mask; return retval; } static inline __attribute__((always_inline)) int __test_bit(int nr, const void *addr) { int *a = (int *)addr; int mask; a += nr >> 5; mask = 1 << (nr & 0x1f); return ((mask & *a) != 0); } static inline __attribute__((always_inline)) int test_bit(int nr, const void *addr) { return __test_bit(nr, addr); } # 1 "include/asm-generic/bitops/find.h" 1 # 266 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2 # 1 "include/asm-generic/bitops/hweight.h" 1 extern unsigned int hweight32(unsigned int w); extern unsigned int hweight16(unsigned int w); extern unsigned int hweight8(unsigned int w); extern unsigned long hweight64(__u64 w); # 267 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2 # 1 "include/asm-generic/bitops/lock.h" 1 # 268 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2 # 1 "include/asm-generic/bitops/ext2-atomic.h" 1 # 270 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2 # 1 "include/asm-generic/bitops/ext2-non-atomic.h" 1 # 1 "include/asm-generic/bitops/le.h" 1 # 5 "include/asm-generic/bitops/ext2-non-atomic.h" 2 # 271 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2 # 1 "include/asm-generic/bitops/minix.h" 1 # 273 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2 # 1 "include/asm-generic/bitops/fls.h" 1 # 12 "include/asm-generic/bitops/fls.h" static inline __attribute__((always_inline)) int fls(int x) { int r = 32; if (!x) return 0; if (!(x & 0xffff0000u)) { x <<= 16; r -= 16; } if (!(x & 0xff000000u)) { x <<= 8; r -= 8; } if (!(x & 0xf0000000u)) { x <<= 4; r -= 4; } if (!(x & 0xc0000000u)) { x <<= 2; r -= 2; } if (!(x & 0x80000000u)) { x <<= 1; r -= 1; } return r; } # 277 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2 # 1 "include/asm-generic/bitops/fls64.h" 1 # 18 "include/asm-generic/bitops/fls64.h" static inline __attribute__((always_inline)) int fls64(__u64 x) { __u32 h = x >> 32; if (h) return fls(h) + 32; return fls(x); } # 278 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2 # 18 "include/linux/bitops.h" 2 static __inline__ __attribute__((always_inline)) int get_bitmask_order(unsigned int count) { int order; order = fls(count); return order; } static __inline__ __attribute__((always_inline)) int get_count_order(unsigned int count) { int order; order = fls(count) - 1; if (count & (count - 1)) order++; return order; } static inline __attribute__((always_inline)) unsigned long hweight_long(unsigned long w) { return sizeof(w) == 4 ? hweight32(w) : hweight64(w); } static inline __attribute__((always_inline)) __u32 rol32(__u32 word, unsigned int shift) { return (word << shift) | (word >> (32 - shift)); } static inline __attribute__((always_inline)) __u32 ror32(__u32 word, unsigned int shift) { return (word >> shift) | (word << (32 - shift)); } static inline __attribute__((always_inline)) __u16 rol16(__u16 word, unsigned int shift) { return (word << shift) | (word >> (16 - shift)); } static inline __attribute__((always_inline)) __u16 ror16(__u16 word, unsigned int shift) { return (word >> shift) | (word << (16 - shift)); } static inline __attribute__((always_inline)) __u8 rol8(__u8 word, unsigned int shift) { return (word << shift) | (word >> (8 - shift)); } static inline __attribute__((always_inline)) __u8 ror8(__u8 word, unsigned int shift) { return (word >> shift) | (word << (8 - shift)); } static inline __attribute__((always_inline)) unsigned fls_long(unsigned long l) { if (sizeof(l) == 4) return fls(l); return fls64(l); } # 148 "include/linux/bitops.h" extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset); # 158 "include/linux/bitops.h" extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset); # 16 "include/linux/kernel.h" 2 # 1 "include/linux/log2.h" 1 # 21 "include/linux/log2.h" extern __attribute__((const, noreturn)) int ____ilog2_NaN(void); # 31 "include/linux/log2.h" static inline __attribute__((always_inline)) __attribute__((const)) int __ilog2_u32(u32 n) { return fls(n) - 1; } static inline __attribute__((always_inline)) __attribute__((const)) int __ilog2_u64(u64 n) { return fls64(n) - 1; } static inline __attribute__((always_inline)) __attribute__((const)) bool is_power_of_2(unsigned long n) { return (n != 0 && ((n & (n - 1)) == 0)); } static inline __attribute__((always_inline)) __attribute__((const)) unsigned long __roundup_pow_of_two(unsigned long n) { return 1UL << fls_long(n - 1); } static inline __attribute__((always_inline)) __attribute__((const)) unsigned long __rounddown_pow_of_two(unsigned long n) { return 1UL << (fls_long(n) - 1); } # 17 "include/linux/kernel.h" 2 # 1 "include/linux/typecheck.h" 1 # 18 "include/linux/kernel.h" 2 # 1 "include/linux/ratelimit.h" 1 # 1 "include/linux/param.h" 1 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/param.h" 1 # 5 "include/linux/param.h" 2 # 4 "include/linux/ratelimit.h" 2 struct ratelimit_state { int interval; int burst; int printed; int missed; unsigned long begin; }; extern int __ratelimit(struct ratelimit_state *rs); # 19 "include/linux/kernel.h" 2 # 1 "include/linux/dynamic_printk.h" 1 # 13 "include/linux/dynamic_printk.h" extern int dynamic_enabled; extern long long dynamic_printk_enabled; extern long long dynamic_printk_enabled2; struct mod_debug { char *modname; char *logical_modname; char *flag_names; int type; int hash; int hash2; } __attribute__((aligned(8))); int register_dynamic_debug_module(char *mod_name, int type, char *share_name, char *flags, int hash, int hash2); # 78 "include/linux/dynamic_printk.h" static inline __attribute__((always_inline)) int unregister_dynamic_debug_module(const char *mod_name) { return 0; } static inline __attribute__((always_inline)) int __dynamic_dbg_enabled_helper(char *modname, int type, int value, int hash) { return 0; } # 20 "include/linux/kernel.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bug.h" 1 # 15 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bug.h" # 1 "include/asm-generic/bug.h" 1 # 36 "include/asm-generic/bug.h" extern void warn_on_slowpath(const char *file, const int line); extern void warn_slowpath(const char *file, const int line, const char *fmt, ...) __attribute__((format(printf, 3, 4))); # 16 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bug.h" 2 # 22 "include/linux/kernel.h" 2 extern const char linux_banner[]; extern const char linux_proc_banner[]; # 56 "include/linux/kernel.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/div64.h" 1 # 1 "include/asm-generic/div64.h" 1 # 35 "include/asm-generic/div64.h" extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/div64.h" 2 # 57 "include/linux/kernel.h" 2 # 101 "include/linux/kernel.h" extern int console_printk[]; struct completion; struct pt_regs; struct user; # 144 "include/linux/kernel.h" extern struct atomic_notifier_head panic_notifier_list; extern long (*panic_blink)(long time); void panic(const char * fmt, ...) __attribute__ ((noreturn, format (printf, 1, 2))) __attribute__((__cold__)); extern void oops_enter(void); extern void oops_exit(void); extern int oops_may_print(void); void do_exit(long error_code) __attribute__((noreturn)); void complete_and_exit(struct completion *, long) __attribute__((noreturn)); extern unsigned long simple_strtoul(const char *,char **,unsigned int); extern long simple_strtol(const char *,char **,unsigned int); extern unsigned long long simple_strtoull(const char *,char **,unsigned int); extern long long simple_strtoll(const char *,char **,unsigned int); extern int strict_strtoul(const char *, unsigned int, unsigned long *); extern int strict_strtol(const char *, unsigned int, long *); extern int strict_strtoull(const char *, unsigned int, unsigned long long *); extern int strict_strtoll(const char *, unsigned int, long long *); extern int sprintf(char * buf, const char * fmt, ...) __attribute__ ((format (printf, 2, 3))); extern int vsprintf(char *buf, const char *, va_list) __attribute__ ((format (printf, 2, 0))); extern int snprintf(char * buf, size_t size, const char * fmt, ...) __attribute__ ((format (printf, 3, 4))); extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) __attribute__ ((format (printf, 3, 0))); extern int scnprintf(char * buf, size_t size, const char * fmt, ...) __attribute__ ((format (printf, 3, 4))); extern int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) __attribute__ ((format (printf, 3, 0))); extern char *kasprintf(gfp_t gfp, const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); extern int sscanf(const char *, const char *, ...) __attribute__ ((format (scanf, 2, 3))); extern int vsscanf(const char *, const char *, va_list) __attribute__ ((format (scanf, 2, 0))); extern int get_option(char **str, int *pint); extern char *get_options(const char *str, int nints, int *ints); extern unsigned long long memparse(const char *ptr, char **retptr); extern int core_kernel_text(unsigned long addr); extern int __kernel_text_address(unsigned long addr); extern int kernel_text_address(unsigned long addr); struct pid; extern struct pid *session_of_pgrp(struct pid *pgrp); # 219 "include/linux/kernel.h" int vprintk(const char *fmt, va_list args) __attribute__ ((format (printf, 1, 0))); int printk(const char * fmt, ...) __attribute__ ((format (printf, 1, 2))) __attribute__((__cold__)); extern struct ratelimit_state printk_ratelimit_state; extern int printk_ratelimit(void); extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msec); # 241 "include/linux/kernel.h" extern int printk_needs_cpu(int cpu); extern void printk_tick(void); extern void __attribute__((format(printf, 1, 2))) early_printk(const char *fmt, ...); unsigned long int_sqrt(unsigned long); static inline __attribute__((always_inline)) void console_silent(void) { (console_printk[0]) = 0; } static inline __attribute__((always_inline)) void console_verbose(void) { if ((console_printk[0])) (console_printk[0]) = 15; } extern void bust_spinlocks(int yes); extern void wake_up_klogd(void); extern int oops_in_progress; extern int panic_timeout; extern int panic_on_oops; extern int panic_on_unrecovered_nmi; extern const char *print_tainted(void); extern void add_taint(unsigned flag); extern int test_taint(unsigned flag); extern unsigned long get_taint(void); extern int root_mountflags; extern enum system_states { SYSTEM_BOOTING, SYSTEM_RUNNING, SYSTEM_HALT, SYSTEM_POWER_OFF, SYSTEM_RESTART, SYSTEM_SUSPEND_DISK, } system_state; # 294 "include/linux/kernel.h" extern void dump_stack(void) __attribute__((__cold__)); enum { DUMP_PREFIX_NONE, DUMP_PREFIX_ADDRESS, DUMP_PREFIX_OFFSET }; extern void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, char *linebuf, size_t linebuflen, bool ascii); extern void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii); extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, const void *buf, size_t len); extern const char hex_asc[]; static inline __attribute__((always_inline)) char *pack_hex_byte(char *buf, u8 byte) { *buf++ = hex_asc[((byte) & 0xf0) >> 4]; *buf++ = hex_asc[((byte) & 0x0f)]; return buf; } # 485 "include/linux/kernel.h" struct sysinfo; extern int do_sysinfo(struct sysinfo *info); struct sysinfo { long uptime; unsigned long loads[3]; unsigned long totalram; unsigned long freeram; unsigned long sharedram; unsigned long bufferram; unsigned long totalswap; unsigned long freeswap; unsigned short procs; unsigned short pad; unsigned long totalhigh; unsigned long freehigh; unsigned int mem_unit; char _f[20-2*sizeof(long)-sizeof(int)]; }; # 10 "include/linux/kallsyms.h" 2 # 41 "include/linux/kallsyms.h" static inline __attribute__((always_inline)) unsigned long kallsyms_lookup_name(const char *name) { return 0; } static inline __attribute__((always_inline)) int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset) { return 0; } static inline __attribute__((always_inline)) const char *kallsyms_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf) { return ((void *)0); } static inline __attribute__((always_inline)) int sprint_symbol(char *buffer, unsigned long addr) { *buffer = '\0'; return 0; } static inline __attribute__((always_inline)) int lookup_symbol_name(unsigned long addr, char *symname) { return -34; } static inline __attribute__((always_inline)) int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name) { return -34; } static void __check_printsym_format(const char *fmt, ...) __attribute__((format(printf,1,2))); static inline __attribute__((always_inline)) void __check_printsym_format(const char *fmt, ...) { } static inline __attribute__((always_inline)) void print_symbol(const char *fmt, unsigned long addr) { __check_printsym_format(fmt, ""); ; } static inline __attribute__((always_inline)) void __attribute__((deprecated)) print_fn_descriptor_symbol(const char *fmt, void *addr) { print_symbol(fmt, (unsigned long)addr); } static inline __attribute__((always_inline)) void print_ip_sym(unsigned long ip) { printk("[<%p>] %pS\n", (void *) ip, (void *) ip); } # 16 "kernel/trace/trace.c" 2 # 1 "include/linux/seq_file.h" 1 # 1 "include/linux/string.h" 1 # 14 "include/linux/string.h" extern char *strndup_user(const char *, long); # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/string.h" 1 # 9 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/string.h" extern inline __attribute__((always_inline)) char *strcpy(char *dest, const char *src) { char *xdest = dest; char temp = 0; __asm__ __volatile__ ( "1:" "%2 = B [%1++] (Z);" "B [%0++] = %2;" "CC = %2;" "if cc jump 1b (bp);" : "+&a" (dest), "+&a" (src), "=&d" (temp) : : "memory", "CC"); return xdest; } extern inline __attribute__((always_inline)) char *strncpy(char *dest, const char *src, size_t n) { char *xdest = dest; char temp = 0; if (n == 0) return xdest; __asm__ __volatile__ ( "1:" "%3 = B [%1++] (Z);" "B [%0++] = %3;" "CC = %3;" "if ! cc jump 2f;" "%2 += -1;" "CC = %2 == 0;" "if ! cc jump 1b (bp);" "jump 4f;" "2:" "%3 = 0;" "3:" "%2 += -1;" "CC = %2 == 0;" "if cc jump 4f;" "B [%0++] = %3;" "jump 3b;" "4:" : "+&a" (dest), "+&a" (src), "+&da" (n), "=&d" (temp) : : "memory", "CC"); return xdest; } extern inline __attribute__((always_inline)) int strcmp(const char *cs, const char *ct) { int __res1, __res2; __asm__ __volatile__ ( "1:" "%2 = B[%0++] (Z);" "%3 = B[%1++] (Z);" "CC = %2 == %3;" "if ! cc jump 2f;" "CC = %2;" "if cc jump 1b (bp);" "jump.s 3f;" "2:" "%2 = %2 - %3;" "3:" : "+&a" (cs), "+&a" (ct), "=&d" (__res1), "=&d" (__res2) : : "memory", "CC"); return __res1; } extern inline __attribute__((always_inline)) int strncmp(const char *cs, const char *ct, size_t count) { int __res1, __res2; if (!count) return 0; __asm__ __volatile__ ( "1:" "%3 = B[%0++] (Z);" "%4 = B[%1++] (Z);" "CC = %3 == %4;" "if ! cc jump 3f;" "CC = %3;" "if ! cc jump 4f;" "%2 += -1;" "CC = %2 == 0;" "if ! cc jump 1b;" "2:" "%3 = 0;" "jump.s 4f;" "3:" "%3 = %3 - %4;" "4:" : "+&a" (cs), "+&a" (ct), "+&da" (count), "=&d" (__res1), "=&d" (__res2) : : "memory", "CC"); return __res1; } extern void *memset(void *s, int c, size_t count); extern void *memcpy(void *d, const void *s, size_t count); extern int memcmp(const void *, const void *, __kernel_size_t); extern void *memchr(const void *s, int c, size_t n); extern void *memmove(void *dest, const void *src, size_t count); # 20 "include/linux/string.h" 2 # 28 "include/linux/string.h" size_t strlcpy(char *, const char *, size_t); extern char * strcat(char *, const char *); extern char * strncat(char *, const char *, __kernel_size_t); extern size_t strlcat(char *, const char *, __kernel_size_t); # 46 "include/linux/string.h" extern int strnicmp(const char *, const char *, __kernel_size_t); extern int strcasecmp(const char *s1, const char *s2); extern int strncasecmp(const char *s1, const char *s2, size_t n); extern char * strchr(const char *,int); extern char * strnchr(const char *, size_t, int); extern char * strrchr(const char *,int); extern char * strstrip(char *); extern char * strstr(const char *,const char *); extern __kernel_size_t strlen(const char *); extern __kernel_size_t strnlen(const char *,__kernel_size_t); extern char * strpbrk(const char *,const char *); extern char * strsep(char **,const char *); extern __kernel_size_t strspn(const char *,const char *); extern __kernel_size_t strcspn(const char *,const char *); # 96 "include/linux/string.h" extern void * memscan(void *,int,__kernel_size_t); # 105 "include/linux/string.h" extern char *kstrdup(const char *s, gfp_t gfp); extern char *kstrndup(const char *s, size_t len, gfp_t gfp); extern void *kmemdup(const void *src, size_t len, gfp_t gfp); extern char **argv_split(gfp_t gfp, const char *str, int *argcp); extern void argv_free(char **argv); extern bool sysfs_streq(const char *s1, const char *s2); extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, const void *from, size_t available); # 6 "include/linux/seq_file.h" 2 # 1 "include/linux/mutex.h" 1 # 13 "include/linux/mutex.h" # 1 "include/linux/list.h" 1 # 1 "include/linux/poison.h" 1 # 6 "include/linux/list.h" 2 # 1 "include/linux/prefetch.h" 1 # 15 "include/linux/prefetch.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cache.h" 1 # 16 "include/linux/prefetch.h" 2 # 53 "include/linux/prefetch.h" static inline __attribute__((always_inline)) void prefetch_range(void *addr, size_t len) { } # 7 "include/linux/list.h" 2 # 19 "include/linux/list.h" struct list_head { struct list_head *next, *prev; }; static inline __attribute__((always_inline)) void INIT_LIST_HEAD(struct list_head *list) { list->next = list; list->prev = list; } # 51 "include/linux/list.h" extern void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next); # 64 "include/linux/list.h" static inline __attribute__((always_inline)) void list_add(struct list_head *new, struct list_head *head) { __list_add(new, head, head->next); } # 78 "include/linux/list.h" static inline __attribute__((always_inline)) void list_add_tail(struct list_head *new, struct list_head *head) { __list_add(new, head->prev, head); } # 90 "include/linux/list.h" static inline __attribute__((always_inline)) void __list_del(struct list_head * prev, struct list_head * next) { next->prev = prev; prev->next = next; } # 110 "include/linux/list.h" extern void list_del(struct list_head *entry); # 120 "include/linux/list.h" static inline __attribute__((always_inline)) void list_replace(struct list_head *old, struct list_head *new) { new->next = old->next; new->next->prev = new; new->prev = old->prev; new->prev->next = new; } static inline __attribute__((always_inline)) void list_replace_init(struct list_head *old, struct list_head *new) { list_replace(old, new); INIT_LIST_HEAD(old); } static inline __attribute__((always_inline)) void list_del_init(struct list_head *entry) { __list_del(entry->prev, entry->next); INIT_LIST_HEAD(entry); } static inline __attribute__((always_inline)) void list_move(struct list_head *list, struct list_head *head) { __list_del(list->prev, list->next); list_add(list, head); } static inline __attribute__((always_inline)) void list_move_tail(struct list_head *list, struct list_head *head) { __list_del(list->prev, list->next); list_add_tail(list, head); } static inline __attribute__((always_inline)) int list_is_last(const struct list_head *list, const struct list_head *head) { return list->next == head; } static inline __attribute__((always_inline)) int list_empty(const struct list_head *head) { return head->next == head; } # 202 "include/linux/list.h" static inline __attribute__((always_inline)) int list_empty_careful(const struct list_head *head) { struct list_head *next = head->next; return (next == head) && (next == head->prev); } static inline __attribute__((always_inline)) int list_is_singular(const struct list_head *head) { return !list_empty(head) && (head->next == head->prev); } static inline __attribute__((always_inline)) void __list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) { struct list_head *new_first = entry->next; list->next = head->next; list->next->prev = list; list->prev = entry; entry->next = list; head->next = new_first; new_first->prev = head; } # 243 "include/linux/list.h" static inline __attribute__((always_inline)) void list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) { if (list_empty(head)) return; if (list_is_singular(head) && (head->next != entry && head != entry)) return; if (entry == head) INIT_LIST_HEAD(list); else __list_cut_position(list, head, entry); } static inline __attribute__((always_inline)) void __list_splice(const struct list_head *list, struct list_head *prev, struct list_head *next) { struct list_head *first = list->next; struct list_head *last = list->prev; first->prev = prev; prev->next = first; last->next = next; next->prev = last; } static inline __attribute__((always_inline)) void list_splice(const struct list_head *list, struct list_head *head) { if (!list_empty(list)) __list_splice(list, head, head->next); } static inline __attribute__((always_inline)) void list_splice_tail(struct list_head *list, struct list_head *head) { if (!list_empty(list)) __list_splice(list, head->prev, head); } # 302 "include/linux/list.h" static inline __attribute__((always_inline)) void list_splice_init(struct list_head *list, struct list_head *head) { if (!list_empty(list)) { __list_splice(list, head, head->next); INIT_LIST_HEAD(list); } } # 319 "include/linux/list.h" static inline __attribute__((always_inline)) void list_splice_tail_init(struct list_head *list, struct list_head *head) { if (!list_empty(list)) { __list_splice(list, head->prev, head); INIT_LIST_HEAD(list); } } # 540 "include/linux/list.h" struct hlist_head { struct hlist_node *first; }; struct hlist_node { struct hlist_node *next, **pprev; }; static inline __attribute__((always_inline)) void INIT_HLIST_NODE(struct hlist_node *h) { h->next = ((void *)0); h->pprev = ((void *)0); } static inline __attribute__((always_inline)) int hlist_unhashed(const struct hlist_node *h) { return !h->pprev; } static inline __attribute__((always_inline)) int hlist_empty(const struct hlist_head *h) { return !h->first; } static inline __attribute__((always_inline)) void __hlist_del(struct hlist_node *n) { struct hlist_node *next = n->next; struct hlist_node **pprev = n->pprev; *pprev = next; if (next) next->pprev = pprev; } static inline __attribute__((always_inline)) void hlist_del(struct hlist_node *n) { __hlist_del(n); n->next = ((void *) 0x00100100); n->pprev = ((void *) 0x00200200); } static inline __attribute__((always_inline)) void hlist_del_init(struct hlist_node *n) { if (!hlist_unhashed(n)) { __hlist_del(n); INIT_HLIST_NODE(n); } } static inline __attribute__((always_inline)) void hlist_add_head(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *first = h->first; n->next = first; if (first) first->pprev = &n->next; h->first = n; n->pprev = &h->first; } static inline __attribute__((always_inline)) void hlist_add_before(struct hlist_node *n, struct hlist_node *next) { n->pprev = next->pprev; n->next = next; next->pprev = &n->next; *(n->pprev) = n; } static inline __attribute__((always_inline)) void hlist_add_after(struct hlist_node *n, struct hlist_node *next) { next->next = n->next; n->next = next; next->pprev = &n->next; if(next->next) next->next->pprev = &next->next; } static inline __attribute__((always_inline)) void hlist_move_list(struct hlist_head *old, struct hlist_head *new) { new->first = old->first; if (new->first) new->first->pprev = &new->first; old->first = ((void *)0); } # 14 "include/linux/mutex.h" 2 # 1 "include/linux/spinlock_types.h" 1 # 15 "include/linux/spinlock_types.h" # 1 "include/linux/spinlock_types_up.h" 1 # 17 "include/linux/spinlock_types_up.h" typedef struct { volatile unsigned int slock; } raw_spinlock_t; # 31 "include/linux/spinlock_types_up.h" typedef struct { } raw_rwlock_t; # 16 "include/linux/spinlock_types.h" 2 # 1 "include/linux/lockdep.h" 1 # 12 "include/linux/lockdep.h" struct task_struct; struct lockdep_map; # 321 "include/linux/lockdep.h" static inline __attribute__((always_inline)) void lockdep_off(void) { } static inline __attribute__((always_inline)) void lockdep_on(void) { } # 350 "include/linux/lockdep.h" struct lock_class_key { }; # 383 "include/linux/lockdep.h" static inline __attribute__((always_inline)) void early_init_irq_lock_class(void) { } static inline __attribute__((always_inline)) void early_boot_irqs_off(void) { } static inline __attribute__((always_inline)) void early_boot_irqs_on(void) { } static inline __attribute__((always_inline)) void print_irqtrace_events(struct task_struct *curr) { } # 19 "include/linux/spinlock_types.h" 2 typedef struct { raw_spinlock_t raw_lock; unsigned int magic, owner_cpu; void *owner; } spinlock_t; typedef struct { raw_rwlock_t raw_lock; unsigned int magic, owner_cpu; void *owner; } rwlock_t; # 15 "include/linux/mutex.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/atomic.h" 1 # 16 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/atomic.h" typedef struct { volatile int counter; } atomic_t; # 92 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/atomic.h" static inline __attribute__((always_inline)) void atomic_add(int i, atomic_t *v) { long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); v->counter += i; do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); } static inline __attribute__((always_inline)) void atomic_sub(int i, atomic_t *v) { long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); v->counter -= i; do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); } static inline __attribute__((always_inline)) int atomic_add_return(int i, atomic_t *v) { int __temp = 0; long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); v->counter += i; __temp = v->counter; do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); return __temp; } static inline __attribute__((always_inline)) int atomic_sub_return(int i, atomic_t *v) { int __temp = 0; long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); v->counter -= i; __temp = v->counter; do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); return __temp; } static inline __attribute__((always_inline)) void atomic_inc(volatile atomic_t *v) { long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); v->counter++; do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); } static inline __attribute__((always_inline)) void atomic_dec(volatile atomic_t *v) { long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); v->counter--; do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); } static inline __attribute__((always_inline)) void atomic_clear_mask(unsigned int mask, atomic_t *v) { long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); v->counter &= ~mask; do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); } static inline __attribute__((always_inline)) void atomic_set_mask(unsigned int mask, atomic_t *v) { long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); v->counter |= mask; do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); } # 212 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/atomic.h" # 1 "include/asm-generic/atomic.h" 1 # 141 "include/asm-generic/atomic.h" typedef atomic_t atomic_long_t; static inline __attribute__((always_inline)) long atomic_long_read(atomic_long_t *l) { atomic_t *v = (atomic_t *)l; return (long)((v)->counter); } static inline __attribute__((always_inline)) void atomic_long_set(atomic_long_t *l, long i) { atomic_t *v = (atomic_t *)l; (((v)->counter) = i); } static inline __attribute__((always_inline)) void atomic_long_inc(atomic_long_t *l) { atomic_t *v = (atomic_t *)l; atomic_inc(v); } static inline __attribute__((always_inline)) void atomic_long_dec(atomic_long_t *l) { atomic_t *v = (atomic_t *)l; atomic_dec(v); } static inline __attribute__((always_inline)) void atomic_long_add(long i, atomic_long_t *l) { atomic_t *v = (atomic_t *)l; atomic_add(i, v); } static inline __attribute__((always_inline)) void atomic_long_sub(long i, atomic_long_t *l) { atomic_t *v = (atomic_t *)l; atomic_sub(i, v); } static inline __attribute__((always_inline)) int atomic_long_sub_and_test(long i, atomic_long_t *l) { atomic_t *v = (atomic_t *)l; return (atomic_sub_return((i), (v)) == 0); } static inline __attribute__((always_inline)) int atomic_long_dec_and_test(atomic_long_t *l) { atomic_t *v = (atomic_t *)l; return (atomic_sub_return(1, (v)) == 0); } static inline __attribute__((always_inline)) int atomic_long_inc_and_test(atomic_long_t *l) { atomic_t *v = (atomic_t *)l; return (atomic_add_return(1,(v)) == 0); } static inline __attribute__((always_inline)) int atomic_long_add_negative(long i, atomic_long_t *l) { atomic_t *v = (atomic_t *)l; return (atomic_add_return((i), (v)) < 0); } static inline __attribute__((always_inline)) long atomic_long_add_return(long i, atomic_long_t *l) { atomic_t *v = (atomic_t *)l; return (long)atomic_add_return(i, v); } static inline __attribute__((always_inline)) long atomic_long_sub_return(long i, atomic_long_t *l) { atomic_t *v = (atomic_t *)l; return (long)atomic_sub_return(i, v); } static inline __attribute__((always_inline)) long atomic_long_inc_return(atomic_long_t *l) { atomic_t *v = (atomic_t *)l; return (long)atomic_add_return(1,(v)); } static inline __attribute__((always_inline)) long atomic_long_dec_return(atomic_long_t *l) { atomic_t *v = (atomic_t *)l; return (long)atomic_sub_return(1,(v)); } static inline __attribute__((always_inline)) long atomic_long_add_unless(atomic_long_t *l, long a, long u) { atomic_t *v = (atomic_t *)l; return (long)({ int c, old; c = ((v)->counter); while (c != (u) && (old = ((int)((__typeof__(*((&(((v))->counter)))))__cmpxchg_local_generic(((&(((v))->counter))), (unsigned long)(((c))), (unsigned long)(((c + (a)))), sizeof(*((&(((v))->counter)))))))) != c) c = old; c != (u); }); } # 213 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/atomic.h" 2 # 19 "include/linux/mutex.h" 2 # 48 "include/linux/mutex.h" struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; struct thread_info *owner; const char *name; void *magic; }; struct mutex_waiter { struct list_head list; struct task_struct *task; struct mutex *lock; void *magic; }; # 1 "include/linux/mutex-debug.h" 1 # 21 "include/linux/mutex-debug.h" extern void mutex_destroy(struct mutex *lock); # 78 "include/linux/mutex.h" 2 # 106 "include/linux/mutex.h" extern void __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key); static inline __attribute__((always_inline)) int mutex_is_locked(struct mutex *lock) { return ((&lock->count)->counter) != 1; } # 135 "include/linux/mutex.h" extern void mutex_lock(struct mutex *lock); extern int __attribute__((warn_unused_result)) mutex_lock_interruptible(struct mutex *lock); extern int __attribute__((warn_unused_result)) mutex_lock_killable(struct mutex *lock); # 148 "include/linux/mutex.h" extern int mutex_trylock(struct mutex *lock); extern void mutex_unlock(struct mutex *lock); # 7 "include/linux/seq_file.h" 2 # 1 "include/linux/cpumask.h" 1 # 141 "include/linux/cpumask.h" # 1 "include/linux/threads.h" 1 # 142 "include/linux/cpumask.h" 2 # 1 "include/linux/bitmap.h" 1 # 87 "include/linux/bitmap.h" extern int __bitmap_empty(const unsigned long *bitmap, int bits); extern int __bitmap_full(const unsigned long *bitmap, int bits); extern int __bitmap_equal(const unsigned long *bitmap1, const unsigned long *bitmap2, int bits); extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits); extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, int shift, int bits); extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, int shift, int bits); extern void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, int bits); extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, int bits); extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, int bits); extern void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, int bits); extern int __bitmap_intersects(const unsigned long *bitmap1, const unsigned long *bitmap2, int bits); extern int __bitmap_subset(const unsigned long *bitmap1, const unsigned long *bitmap2, int bits); extern int __bitmap_weight(const unsigned long *bitmap, int bits); extern int bitmap_scnprintf(char *buf, unsigned int len, const unsigned long *src, int nbits); extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, unsigned long *dst, int nbits); extern int bitmap_parse_user(const char *ubuf, unsigned int ulen, unsigned long *dst, int nbits); extern int bitmap_scnlistprintf(char *buf, unsigned int len, const unsigned long *src, int nbits); extern int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits); extern void bitmap_remap(unsigned long *dst, const unsigned long *src, const unsigned long *old, const unsigned long *new, int bits); extern int bitmap_bitremap(int oldbit, const unsigned long *old, const unsigned long *new, int bits); extern void bitmap_onto(unsigned long *dst, const unsigned long *orig, const unsigned long *relmap, int bits); extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, int sz, int bits); extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order); extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); static inline __attribute__((always_inline)) void bitmap_zero(unsigned long *dst, int nbits) { if (nbits <= 32) *dst = 0UL; else { int len = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(unsigned long); memset(dst, 0, len); } } static inline __attribute__((always_inline)) void bitmap_fill(unsigned long *dst, int nbits) { size_t nlongs = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))); if (nlongs > 1) { int len = (nlongs - 1) * sizeof(unsigned long); memset(dst, 0xff, len); } dst[nlongs - 1] = ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ); } static inline __attribute__((always_inline)) void bitmap_copy(unsigned long *dst, const unsigned long *src, int nbits) { if (nbits <= 32) *dst = *src; else { int len = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(unsigned long); memcpy(dst, src, len); } } static inline __attribute__((always_inline)) void bitmap_and(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, int nbits) { if (nbits <= 32) *dst = *src1 & *src2; else __bitmap_and(dst, src1, src2, nbits); } static inline __attribute__((always_inline)) void bitmap_or(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, int nbits) { if (nbits <= 32) *dst = *src1 | *src2; else __bitmap_or(dst, src1, src2, nbits); } static inline __attribute__((always_inline)) void bitmap_xor(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, int nbits) { if (nbits <= 32) *dst = *src1 ^ *src2; else __bitmap_xor(dst, src1, src2, nbits); } static inline __attribute__((always_inline)) void bitmap_andnot(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, int nbits) { if (nbits <= 32) *dst = *src1 & ~(*src2); else __bitmap_andnot(dst, src1, src2, nbits); } static inline __attribute__((always_inline)) void bitmap_complement(unsigned long *dst, const unsigned long *src, int nbits) { if (nbits <= 32) *dst = ~(*src) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ); else __bitmap_complement(dst, src, nbits); } static inline __attribute__((always_inline)) int bitmap_equal(const unsigned long *src1, const unsigned long *src2, int nbits) { if (nbits <= 32) return ! ((*src1 ^ *src2) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )); else return __bitmap_equal(src1, src2, nbits); } static inline __attribute__((always_inline)) int bitmap_intersects(const unsigned long *src1, const unsigned long *src2, int nbits) { if (nbits <= 32) return ((*src1 & *src2) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )) != 0; else return __bitmap_intersects(src1, src2, nbits); } static inline __attribute__((always_inline)) int bitmap_subset(const unsigned long *src1, const unsigned long *src2, int nbits) { if (nbits <= 32) return ! ((*src1 & ~(*src2)) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )); else return __bitmap_subset(src1, src2, nbits); } static inline __attribute__((always_inline)) int bitmap_empty(const unsigned long *src, int nbits) { if (nbits <= 32) return ! (*src & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )); else return __bitmap_empty(src, nbits); } static inline __attribute__((always_inline)) int bitmap_full(const unsigned long *src, int nbits) { if (nbits <= 32) return ! (~(*src) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )); else return __bitmap_full(src, nbits); } static inline __attribute__((always_inline)) int bitmap_weight(const unsigned long *src, int nbits) { if (nbits <= 32) return hweight_long(*src & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )); return __bitmap_weight(src, nbits); } static inline __attribute__((always_inline)) void bitmap_shift_right(unsigned long *dst, const unsigned long *src, int n, int nbits) { if (nbits <= 32) *dst = *src >> n; else __bitmap_shift_right(dst, src, n, nbits); } static inline __attribute__((always_inline)) void bitmap_shift_left(unsigned long *dst, const unsigned long *src, int n, int nbits) { if (nbits <= 32) *dst = (*src << n) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ); else __bitmap_shift_left(dst, src, n, nbits); } static inline __attribute__((always_inline)) int bitmap_parse(const char *buf, unsigned int buflen, unsigned long *maskp, int nmaskbits) { return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); } # 143 "include/linux/cpumask.h" 2 typedef struct cpumask { unsigned long bits[(((1) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; } cpumask_t; extern cpumask_t _unused_cpumask_arg_; static inline __attribute__((always_inline)) void __cpu_set(int cpu, volatile cpumask_t *dstp) { set_bit(cpu, dstp->bits); } static inline __attribute__((always_inline)) void __cpu_clear(int cpu, volatile cpumask_t *dstp) { clear_bit(cpu, dstp->bits); } static inline __attribute__((always_inline)) void __cpus_setall(cpumask_t *dstp, int nbits) { bitmap_fill(dstp->bits, nbits); } static inline __attribute__((always_inline)) void __cpus_clear(cpumask_t *dstp, int nbits) { bitmap_zero(dstp->bits, nbits); } static inline __attribute__((always_inline)) int __cpu_test_and_set(int cpu, cpumask_t *addr) { return test_and_set_bit(cpu, addr->bits); } static inline __attribute__((always_inline)) void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, const cpumask_t *src2p, int nbits) { bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); } static inline __attribute__((always_inline)) void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, const cpumask_t *src2p, int nbits) { bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); } static inline __attribute__((always_inline)) void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, const cpumask_t *src2p, int nbits) { bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); } static inline __attribute__((always_inline)) void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, const cpumask_t *src2p, int nbits) { bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); } static inline __attribute__((always_inline)) void __cpus_complement(cpumask_t *dstp, const cpumask_t *srcp, int nbits) { bitmap_complement(dstp->bits, srcp->bits, nbits); } static inline __attribute__((always_inline)) int __cpus_equal(const cpumask_t *src1p, const cpumask_t *src2p, int nbits) { return bitmap_equal(src1p->bits, src2p->bits, nbits); } static inline __attribute__((always_inline)) int __cpus_intersects(const cpumask_t *src1p, const cpumask_t *src2p, int nbits) { return bitmap_intersects(src1p->bits, src2p->bits, nbits); } static inline __attribute__((always_inline)) int __cpus_subset(const cpumask_t *src1p, const cpumask_t *src2p, int nbits) { return bitmap_subset(src1p->bits, src2p->bits, nbits); } static inline __attribute__((always_inline)) int __cpus_empty(const cpumask_t *srcp, int nbits) { return bitmap_empty(srcp->bits, nbits); } static inline __attribute__((always_inline)) int __cpus_full(const cpumask_t *srcp, int nbits) { return bitmap_full(srcp->bits, nbits); } static inline __attribute__((always_inline)) int __cpus_weight(const cpumask_t *srcp, int nbits) { return bitmap_weight(srcp->bits, nbits); } static inline __attribute__((always_inline)) void __cpus_shift_right(cpumask_t *dstp, const cpumask_t *srcp, int n, int nbits) { bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); } static inline __attribute__((always_inline)) void __cpus_shift_left(cpumask_t *dstp, const cpumask_t *srcp, int n, int nbits) { bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); } # 278 "include/linux/cpumask.h" extern const unsigned long cpu_bit_bitmap[32 +1][(((1) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; static inline __attribute__((always_inline)) const cpumask_t *get_cpu_mask(unsigned int cpu) { const unsigned long *p = cpu_bit_bitmap[1 + cpu % 32]; p -= cpu / 32; return (const cpumask_t *)p; } # 344 "include/linux/cpumask.h" static inline __attribute__((always_inline)) int __cpumask_scnprintf(char *buf, int len, const cpumask_t *srcp, int nbits) { return bitmap_scnprintf(buf, len, srcp->bits, nbits); } static inline __attribute__((always_inline)) int __cpumask_parse_user(const char *buf, int len, cpumask_t *dstp, int nbits) { return bitmap_parse_user(buf, len, dstp->bits, nbits); } static inline __attribute__((always_inline)) int __cpulist_scnprintf(char *buf, int len, const cpumask_t *srcp, int nbits) { return bitmap_scnlistprintf(buf, len, srcp->bits, nbits); } static inline __attribute__((always_inline)) int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits) { return bitmap_parselist(buf, dstp->bits, nbits); } static inline __attribute__((always_inline)) int __cpu_remap(int oldbit, const cpumask_t *oldp, const cpumask_t *newp, int nbits) { return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits); } static inline __attribute__((always_inline)) void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp, const cpumask_t *oldp, const cpumask_t *newp, int nbits) { bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); } static inline __attribute__((always_inline)) void __cpus_onto(cpumask_t *dstp, const cpumask_t *origp, const cpumask_t *relmapp, int nbits) { bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); } static inline __attribute__((always_inline)) void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp, int sz, int nbits) { bitmap_fold(dstp->bits, origp->bits, sz, nbits); } # 504 "include/linux/cpumask.h" extern cpumask_t cpu_possible_map; extern cpumask_t cpu_online_map; extern cpumask_t cpu_present_map; extern cpumask_t cpu_active_map; # 558 "include/linux/cpumask.h" static inline __attribute__((always_inline)) unsigned int cpumask_check(unsigned int cpu) { return cpu; } static inline __attribute__((always_inline)) unsigned int cpumask_first(const struct cpumask *srcp) { return 0; } static inline __attribute__((always_inline)) unsigned int cpumask_next(int n, const struct cpumask *srcp) { return n+1; } static inline __attribute__((always_inline)) unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) { return n+1; } static inline __attribute__((always_inline)) unsigned int cpumask_next_and(int n, const struct cpumask *srcp, const struct cpumask *andp) { return n+1; } static inline __attribute__((always_inline)) unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) { return 1; } # 694 "include/linux/cpumask.h" static inline __attribute__((always_inline)) void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) { set_bit(cpumask_check(cpu), ((dstp)->bits)); } static inline __attribute__((always_inline)) void cpumask_clear_cpu(int cpu, struct cpumask *dstp) { clear_bit(cpumask_check(cpu), ((dstp)->bits)); } # 726 "include/linux/cpumask.h" static inline __attribute__((always_inline)) int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) { return test_and_set_bit(cpumask_check(cpu), ((cpumask)->bits)); } static inline __attribute__((always_inline)) void cpumask_setall(struct cpumask *dstp) { bitmap_fill(((dstp)->bits), 1); } static inline __attribute__((always_inline)) void cpumask_clear(struct cpumask *dstp) { bitmap_zero(((dstp)->bits), 1); } static inline __attribute__((always_inline)) void cpumask_and(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { bitmap_and(((dstp)->bits), ((src1p)->bits), ((src2p)->bits), 1); } static inline __attribute__((always_inline)) void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { bitmap_or(((dstp)->bits), ((src1p)->bits), ((src2p)->bits), 1); } static inline __attribute__((always_inline)) void cpumask_xor(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { bitmap_xor(((dstp)->bits), ((src1p)->bits), ((src2p)->bits), 1); } static inline __attribute__((always_inline)) void cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { bitmap_andnot(((dstp)->bits), ((src1p)->bits), ((src2p)->bits), 1); } static inline __attribute__((always_inline)) void cpumask_complement(struct cpumask *dstp, const struct cpumask *srcp) { bitmap_complement(((dstp)->bits), ((srcp)->bits), 1); } static inline __attribute__((always_inline)) bool cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p) { return bitmap_equal(((src1p)->bits), ((src2p)->bits), 1); } static inline __attribute__((always_inline)) bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p) { return bitmap_intersects(((src1p)->bits), ((src2p)->bits), 1); } static inline __attribute__((always_inline)) int cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p) { return bitmap_subset(((src1p)->bits), ((src2p)->bits), 1); } static inline __attribute__((always_inline)) bool cpumask_empty(const struct cpumask *srcp) { return bitmap_empty(((srcp)->bits), 1); } static inline __attribute__((always_inline)) bool cpumask_full(const struct cpumask *srcp) { return bitmap_full(((srcp)->bits), 1); } static inline __attribute__((always_inline)) unsigned int cpumask_weight(const struct cpumask *srcp) { return bitmap_weight(((srcp)->bits), 1); } static inline __attribute__((always_inline)) void cpumask_shift_right(struct cpumask *dstp, const struct cpumask *srcp, int n) { bitmap_shift_right(((dstp)->bits), ((srcp)->bits), n, 1); } static inline __attribute__((always_inline)) void cpumask_shift_left(struct cpumask *dstp, const struct cpumask *srcp, int n) { bitmap_shift_left(((dstp)->bits), ((srcp)->bits), n, 1); } static inline __attribute__((always_inline)) void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp) { bitmap_copy(((dstp)->bits), ((srcp)->bits), 1); } # 962 "include/linux/cpumask.h" static inline __attribute__((always_inline)) int __check_is_bitmap(const unsigned long *bitmap) { return 1; } static inline __attribute__((always_inline)) size_t cpumask_size(void) { return (((1) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(long); } # 1004 "include/linux/cpumask.h" typedef struct cpumask cpumask_var_t[1]; static inline __attribute__((always_inline)) bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) { return true; } static inline __attribute__((always_inline)) void alloc_bootmem_cpumask_var(cpumask_var_t *mask) { } static inline __attribute__((always_inline)) void free_cpumask_var(cpumask_var_t mask) { } static inline __attribute__((always_inline)) void free_bootmem_cpumask_var(cpumask_var_t mask) { } # 1032 "include/linux/cpumask.h" extern const unsigned long cpu_all_bits[(((1) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; static inline __attribute__((always_inline)) void set_cpu_possible(unsigned int cpu, bool possible) { if (possible) cpumask_set_cpu(cpu, &cpu_possible_map); else cpumask_clear_cpu(cpu, &cpu_possible_map); } static inline __attribute__((always_inline)) void set_cpu_present(unsigned int cpu, bool present) { if (present) cpumask_set_cpu(cpu, &cpu_present_map); else cpumask_clear_cpu(cpu, &cpu_present_map); } static inline __attribute__((always_inline)) void set_cpu_online(unsigned int cpu, bool online) { if (online) cpumask_set_cpu(cpu, &cpu_online_map); else cpumask_clear_cpu(cpu, &cpu_online_map); } static inline __attribute__((always_inline)) void set_cpu_active(unsigned int cpu, bool active) { if (active) cpumask_set_cpu(cpu, &cpu_active_map); else cpumask_clear_cpu(cpu, &cpu_active_map); } static inline __attribute__((always_inline)) void init_cpu_present(const struct cpumask *src) { cpumask_copy(&cpu_present_map, src); } static inline __attribute__((always_inline)) void init_cpu_possible(const struct cpumask *src) { cpumask_copy(&cpu_possible_map, src); } static inline __attribute__((always_inline)) void init_cpu_online(const struct cpumask *src) { cpumask_copy(&cpu_online_map, src); } # 8 "include/linux/seq_file.h" 2 # 1 "include/linux/nodemask.h" 1 # 90 "include/linux/nodemask.h" # 1 "include/linux/numa.h" 1 # 91 "include/linux/nodemask.h" 2 typedef struct { unsigned long bits[((((1 << 0)) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; } nodemask_t; extern nodemask_t _unused_nodemask_arg_; static inline __attribute__((always_inline)) void __node_set(int node, volatile nodemask_t *dstp) { set_bit(node, dstp->bits); } static inline __attribute__((always_inline)) void __node_clear(int node, volatile nodemask_t *dstp) { clear_bit(node, dstp->bits); } static inline __attribute__((always_inline)) void __nodes_setall(nodemask_t *dstp, int nbits) { bitmap_fill(dstp->bits, nbits); } static inline __attribute__((always_inline)) void __nodes_clear(nodemask_t *dstp, int nbits) { bitmap_zero(dstp->bits, nbits); } static inline __attribute__((always_inline)) int __node_test_and_set(int node, nodemask_t *addr) { return test_and_set_bit(node, addr->bits); } static inline __attribute__((always_inline)) void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, int nbits) { bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); } static inline __attribute__((always_inline)) void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, int nbits) { bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); } static inline __attribute__((always_inline)) void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, int nbits) { bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); } static inline __attribute__((always_inline)) void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, int nbits) { bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); } static inline __attribute__((always_inline)) void __nodes_complement(nodemask_t *dstp, const nodemask_t *srcp, int nbits) { bitmap_complement(dstp->bits, srcp->bits, nbits); } static inline __attribute__((always_inline)) int __nodes_equal(const nodemask_t *src1p, const nodemask_t *src2p, int nbits) { return bitmap_equal(src1p->bits, src2p->bits, nbits); } static inline __attribute__((always_inline)) int __nodes_intersects(const nodemask_t *src1p, const nodemask_t *src2p, int nbits) { return bitmap_intersects(src1p->bits, src2p->bits, nbits); } static inline __attribute__((always_inline)) int __nodes_subset(const nodemask_t *src1p, const nodemask_t *src2p, int nbits) { return bitmap_subset(src1p->bits, src2p->bits, nbits); } static inline __attribute__((always_inline)) int __nodes_empty(const nodemask_t *srcp, int nbits) { return bitmap_empty(srcp->bits, nbits); } static inline __attribute__((always_inline)) int __nodes_full(const nodemask_t *srcp, int nbits) { return bitmap_full(srcp->bits, nbits); } static inline __attribute__((always_inline)) int __nodes_weight(const nodemask_t *srcp, int nbits) { return bitmap_weight(srcp->bits, nbits); } static inline __attribute__((always_inline)) void __nodes_shift_right(nodemask_t *dstp, const nodemask_t *srcp, int n, int nbits) { bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); } static inline __attribute__((always_inline)) void __nodes_shift_left(nodemask_t *dstp, const nodemask_t *srcp, int n, int nbits) { bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); } static inline __attribute__((always_inline)) int __first_node(const nodemask_t *srcp) { return ({ int __min1 = ((1 << 0)); int __min2 = (find_next_bit((srcp->bits), ((1 << 0)), 0)); __min1 < __min2 ? __min1: __min2; }); } static inline __attribute__((always_inline)) int __next_node(int n, const nodemask_t *srcp) { return ({ int __min1 = ((1 << 0)); int __min2 = (find_next_bit(srcp->bits, (1 << 0), n+1)); __min1 < __min2 ? __min1: __min2; }); } # 255 "include/linux/nodemask.h" static inline __attribute__((always_inline)) int __first_unset_node(const nodemask_t *maskp) { return ({ int __min1 = ((1 << 0)); int __min2 = (find_next_zero_bit((maskp->bits), ((1 << 0)), 0)); __min1 < __min2 ? __min1: __min2; }); } # 289 "include/linux/nodemask.h" static inline __attribute__((always_inline)) int __nodemask_scnprintf(char *buf, int len, const nodemask_t *srcp, int nbits) { return bitmap_scnprintf(buf, len, srcp->bits, nbits); } static inline __attribute__((always_inline)) int __nodemask_parse_user(const char *buf, int len, nodemask_t *dstp, int nbits) { return bitmap_parse_user(buf, len, dstp->bits, nbits); } static inline __attribute__((always_inline)) int __nodelist_scnprintf(char *buf, int len, const nodemask_t *srcp, int nbits) { return bitmap_scnlistprintf(buf, len, srcp->bits, nbits); } static inline __attribute__((always_inline)) int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits) { return bitmap_parselist(buf, dstp->bits, nbits); } static inline __attribute__((always_inline)) int __node_remap(int oldbit, const nodemask_t *oldp, const nodemask_t *newp, int nbits) { return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits); } static inline __attribute__((always_inline)) void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp, const nodemask_t *oldp, const nodemask_t *newp, int nbits) { bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); } static inline __attribute__((always_inline)) void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp, const nodemask_t *relmapp, int nbits) { bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); } static inline __attribute__((always_inline)) void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp, int sz, int nbits) { bitmap_fold(dstp->bits, origp->bits, sz, nbits); } # 363 "include/linux/nodemask.h" enum node_states { N_POSSIBLE, N_ONLINE, N_NORMAL_MEMORY, N_HIGH_MEMORY = N_NORMAL_MEMORY, N_CPU, NR_NODE_STATES }; extern nodemask_t node_states[NR_NODE_STATES]; # 413 "include/linux/nodemask.h" static inline __attribute__((always_inline)) int node_state(int node, enum node_states state) { return node == 0; } static inline __attribute__((always_inline)) void node_set_state(int node, enum node_states state) { } static inline __attribute__((always_inline)) void node_clear_state(int node, enum node_states state) { } static inline __attribute__((always_inline)) int num_node_state(enum node_states state) { return 1; } # 9 "include/linux/seq_file.h" 2 struct seq_operations; struct file; struct path; struct inode; struct dentry; struct seq_file { char *buf; size_t size; size_t from; size_t count; loff_t index; u64 version; struct mutex lock; const struct seq_operations *op; void *private; }; struct seq_operations { void * (*start) (struct seq_file *m, loff_t *pos); void (*stop) (struct seq_file *m, void *v); void * (*next) (struct seq_file *m, void *v, loff_t *pos); int (*show) (struct seq_file *m, void *v); }; int seq_open(struct file *, const struct seq_operations *); ssize_t seq_read(struct file *, char *, size_t, loff_t *); loff_t seq_lseek(struct file *, loff_t, int); int seq_release(struct inode *, struct file *); int seq_escape(struct seq_file *, const char *, const char *); int seq_putc(struct seq_file *m, char c); int seq_puts(struct seq_file *m, const char *s); int seq_printf(struct seq_file *, const char *, ...) __attribute__ ((format (printf,2,3))); int seq_path(struct seq_file *, struct path *, char *); int seq_dentry(struct seq_file *, struct dentry *, char *); int seq_path_root(struct seq_file *m, struct path *path, struct path *root, char *esc); int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits); static inline __attribute__((always_inline)) int seq_cpumask(struct seq_file *m, cpumask_t *mask) { return seq_bitmap(m, mask->bits, 1); } static inline __attribute__((always_inline)) int seq_nodemask(struct seq_file *m, nodemask_t *mask) { return seq_bitmap(m, mask->bits, (1 << 0)); } int seq_bitmap_list(struct seq_file *m, unsigned long *bits, unsigned int nr_bits); static inline __attribute__((always_inline)) int seq_cpumask_list(struct seq_file *m, cpumask_t *mask) { return seq_bitmap_list(m, mask->bits, 1); } static inline __attribute__((always_inline)) int seq_nodemask_list(struct seq_file *m, nodemask_t *mask) { return seq_bitmap_list(m, mask->bits, (1 << 0)); } int single_open(struct file *, int (*)(struct seq_file *, void *), void *); int single_release(struct inode *, struct file *); void *__seq_open_private(struct file *, const struct seq_operations *, int); int seq_open_private(struct file *, const struct seq_operations *, int); int seq_release_private(struct inode *, struct file *); extern struct list_head *seq_list_start(struct list_head *head, loff_t pos); extern struct list_head *seq_list_start_head(struct list_head *head, loff_t pos); extern struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos); # 17 "kernel/trace/trace.c" 2 # 1 "include/linux/notifier.h" 1 # 14 "include/linux/notifier.h" # 1 "include/linux/rwsem.h" 1 # 17 "include/linux/rwsem.h" struct rw_semaphore; # 1 "include/linux/rwsem-spinlock.h" 1 # 15 "include/linux/rwsem-spinlock.h" # 1 "include/linux/spinlock.h" 1 # 50 "include/linux/spinlock.h" # 1 "include/linux/preempt.h" 1 # 9 "include/linux/preempt.h" # 1 "include/linux/thread_info.h" 1 # 12 "include/linux/thread_info.h" struct timespec; struct compat_timespec; struct restart_block { long (*fn)(struct restart_block *); union { struct { unsigned long arg0, arg1, arg2, arg3; }; struct { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; } futex; struct { clockid_t index; struct timespec *rmtp; u64 expires; } nanosleep; struct { struct pollfd *ufds; int nfds; int has_timeout; unsigned long tv_sec; unsigned long tv_nsec; } poll; }; }; extern long do_no_restart_syscall(struct restart_block *parm); # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/thread_info.h" 1 # 30 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/thread_info.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h" 1 # 14 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/setup.h" 1 # 15 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h" 2 # 30 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h" typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long pmd[16]; } pmd_t; typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; typedef struct page *pgtable_t; # 54 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h" extern unsigned long memory_start; extern unsigned long memory_end; # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page_offset.h" 1 # 60 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/io.h" 1 # 23 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/io.h" static inline __attribute__((always_inline)) unsigned char readb(const volatile void *addr) { unsigned int val; int tmp; __asm__ __volatile__ ("cli %1;\n\t" "NOP; NOP; SSYNC;\n\t" "%0 = b [%2] (z);\n\t" "sti %1;\n\t" : "=d"(val), "=d"(tmp): "a"(addr) ); return (unsigned char) val; } static inline __attribute__((always_inline)) unsigned short readw(const volatile void *addr) { unsigned int val; int tmp; __asm__ __volatile__ ("cli %1;\n\t" "NOP; NOP; SSYNC;\n\t" "%0 = w [%2] (z);\n\t" "sti %1;\n\t" : "=d"(val), "=d"(tmp): "a"(addr) ); return (unsigned short) val; } static inline __attribute__((always_inline)) unsigned int readl(const volatile void *addr) { unsigned int val; int tmp; __asm__ __volatile__ ("cli %1;\n\t" "NOP; NOP; SSYNC;\n\t" "%0 = [%2];\n\t" "sti %1;\n\t" : "=d"(val), "=d"(tmp): "a"(addr) ); return val; } # 120 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/io.h" extern void outsb(unsigned long port, const void *addr, unsigned long count); extern void outsw(unsigned long port, const void *addr, unsigned long count); extern void outsw_8(unsigned long port, const void *addr, unsigned long count); extern void outsl(unsigned long port, const void *addr, unsigned long count); extern void insb(unsigned long port, void *addr, unsigned long count); extern void insw(unsigned long port, void *addr, unsigned long count); extern void insw_8(unsigned long port, void *addr, unsigned long count); extern void insl(unsigned long port, void *addr, unsigned long count); extern void insl_16(unsigned long port, void *addr, unsigned long count); extern void dma_outsb(unsigned long port, const void *addr, unsigned short count); extern void dma_outsw(unsigned long port, const void *addr, unsigned short count); extern void dma_outsl(unsigned long port, const void *addr, unsigned short count); extern void dma_insb(unsigned long port, void *addr, unsigned short count); extern void dma_insw(unsigned long port, void *addr, unsigned short count); extern void dma_insl(unsigned long port, void *addr, unsigned short count); static inline __attribute__((always_inline)) void readsl(const void *addr, void *buf, int len) { insl((unsigned long)addr, buf, len); } static inline __attribute__((always_inline)) void readsw(const void *addr, void *buf, int len) { insw((unsigned long)addr, buf, len); } static inline __attribute__((always_inline)) void readsb(const void *addr, void *buf, int len) { insb((unsigned long)addr, buf, len); } static inline __attribute__((always_inline)) void writesl(const void *addr, const void *buf, int len) { outsl((unsigned long)addr, buf, len); } static inline __attribute__((always_inline)) void writesw(const void *addr, const void *buf, int len) { outsw((unsigned long)addr, buf, len); } static inline __attribute__((always_inline)) void writesb(const void *addr, const void *buf, int len) { outsb((unsigned long)addr, buf, len); } static inline __attribute__((always_inline)) void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) { return (void *)physaddr; } static inline __attribute__((always_inline)) void iounmap(void *addr) { } static inline __attribute__((always_inline)) void __iounmap(void *addr, unsigned long size) { } static inline __attribute__((always_inline)) void kernel_set_cachemode(void *addr, unsigned long size, int cmode) { } static inline __attribute__((always_inline)) void *ioremap(unsigned long physaddr, unsigned long size) { return __ioremap(physaddr, size, 1); } static inline __attribute__((always_inline)) void *ioremap_nocache(unsigned long physaddr, unsigned long size) { return __ioremap(physaddr, size, 1); } extern void blkfin_inv_cache_all(void); # 61 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h" 2 # 84 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h" # 1 "include/asm-generic/page.h" 1 # 9 "include/asm-generic/page.h" static __inline__ __attribute__((always_inline)) __attribute__((__const__)) int get_order(unsigned long size) { int order; size = (size - 1) >> (12 - 1); order = -1; do { size >>= 1; order++; } while (size); return order; } # 85 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h" 2 # 31 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/thread_info.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/entry.h" 1 # 32 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/thread_info.h" 2 # 51 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/thread_info.h" typedef unsigned long mm_segment_t; struct thread_info { struct task_struct *task; struct exec_domain *exec_domain; unsigned long flags; int cpu; int preempt_count; mm_segment_t addr_limit; struct restart_block restart_block; struct l1_scratch_task_info l1_task_info; }; # 92 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/thread_info.h" __attribute__((__const__)) static inline __attribute__((always_inline)) struct thread_info *current_thread_info(void) { struct thread_info *ti; __asm__("%0 = sp;" : "=da"(ti) : ); return (struct thread_info *)((long)ti & ~((long)8192 -1)); } # 56 "include/linux/thread_info.h" 2 # 64 "include/linux/thread_info.h" static inline __attribute__((always_inline)) void set_ti_thread_flag(struct thread_info *ti, int flag) { set_bit(flag, (unsigned long *)&ti->flags); } static inline __attribute__((always_inline)) void clear_ti_thread_flag(struct thread_info *ti, int flag) { clear_bit(flag, (unsigned long *)&ti->flags); } static inline __attribute__((always_inline)) int test_and_set_ti_thread_flag(struct thread_info *ti, int flag) { return test_and_set_bit(flag, (unsigned long *)&ti->flags); } static inline __attribute__((always_inline)) int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag) { return test_and_clear_bit(flag, (unsigned long *)&ti->flags); } static inline __attribute__((always_inline)) int test_ti_thread_flag(struct thread_info *ti, int flag) { return test_bit(flag, (unsigned long *)&ti->flags); } # 121 "include/linux/thread_info.h" static inline __attribute__((always_inline)) void set_restore_sigmask(void) { set_ti_thread_flag(current_thread_info(), 5); set_ti_thread_flag(current_thread_info(), 1); } # 10 "include/linux/preempt.h" 2 # 51 "include/linux/spinlock.h" 2 # 1 "include/linux/stringify.h" 1 # 56 "include/linux/spinlock.h" 2 # 1 "include/linux/bottom_half.h" 1 extern void local_bh_disable(void); extern void __local_bh_enable(void); extern void _local_bh_enable(void); extern void local_bh_enable(void); extern void local_bh_enable_ip(unsigned long ip); # 57 "include/linux/spinlock.h" 2 # 82 "include/linux/spinlock.h" extern int __attribute__((section(".spinlock.text"))) generic__raw_read_trylock(raw_rwlock_t *lock); # 1 "include/linux/spinlock_up.h" 1 # 23 "include/linux/spinlock_up.h" static inline __attribute__((always_inline)) void __raw_spin_lock(raw_spinlock_t *lock) { lock->slock = 0; } static inline __attribute__((always_inline)) void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) { __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); lock->slock = 0; } static inline __attribute__((always_inline)) int __raw_spin_trylock(raw_spinlock_t *lock) { char oldval = lock->slock; lock->slock = 0; return oldval > 0; } static inline __attribute__((always_inline)) void __raw_spin_unlock(raw_spinlock_t *lock) { lock->slock = 1; } # 91 "include/linux/spinlock.h" 2 extern void __spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key); # 109 "include/linux/spinlock.h" extern void __rwlock_init(rwlock_t *lock, const char *name, struct lock_class_key *key); # 140 "include/linux/spinlock.h" # 1 "include/linux/spinlock_api_smp.h" 1 # 18 "include/linux/spinlock_api_smp.h" int in_lock_functions(unsigned long addr); void __attribute__((section(".spinlock.text"))) _spin_lock(spinlock_t *lock) ; void __attribute__((section(".spinlock.text"))) _spin_lock_nested(spinlock_t *lock, int subclass) ; void __attribute__((section(".spinlock.text"))) _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) ; void __attribute__((section(".spinlock.text"))) _read_lock(rwlock_t *lock) ; void __attribute__((section(".spinlock.text"))) _write_lock(rwlock_t *lock) ; void __attribute__((section(".spinlock.text"))) _spin_lock_bh(spinlock_t *lock) ; void __attribute__((section(".spinlock.text"))) _read_lock_bh(rwlock_t *lock) ; void __attribute__((section(".spinlock.text"))) _write_lock_bh(rwlock_t *lock) ; void __attribute__((section(".spinlock.text"))) _spin_lock_irq(spinlock_t *lock) ; void __attribute__((section(".spinlock.text"))) _read_lock_irq(rwlock_t *lock) ; void __attribute__((section(".spinlock.text"))) _write_lock_irq(rwlock_t *lock) ; unsigned long __attribute__((section(".spinlock.text"))) _spin_lock_irqsave(spinlock_t *lock) ; unsigned long __attribute__((section(".spinlock.text"))) _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) ; unsigned long __attribute__((section(".spinlock.text"))) _read_lock_irqsave(rwlock_t *lock) ; unsigned long __attribute__((section(".spinlock.text"))) _write_lock_irqsave(rwlock_t *lock) ; int __attribute__((section(".spinlock.text"))) _spin_trylock(spinlock_t *lock); int __attribute__((section(".spinlock.text"))) _read_trylock(rwlock_t *lock); int __attribute__((section(".spinlock.text"))) _write_trylock(rwlock_t *lock); int __attribute__((section(".spinlock.text"))) _spin_trylock_bh(spinlock_t *lock); void __attribute__((section(".spinlock.text"))) _spin_unlock(spinlock_t *lock) ; void __attribute__((section(".spinlock.text"))) _read_unlock(rwlock_t *lock) ; void __attribute__((section(".spinlock.text"))) _write_unlock(rwlock_t *lock) ; void __attribute__((section(".spinlock.text"))) _spin_unlock_bh(spinlock_t *lock) ; void __attribute__((section(".spinlock.text"))) _read_unlock_bh(rwlock_t *lock) ; void __attribute__((section(".spinlock.text"))) _write_unlock_bh(rwlock_t *lock) ; void __attribute__((section(".spinlock.text"))) _spin_unlock_irq(spinlock_t *lock) ; void __attribute__((section(".spinlock.text"))) _read_unlock_irq(rwlock_t *lock) ; void __attribute__((section(".spinlock.text"))) _write_unlock_irq(rwlock_t *lock) ; void __attribute__((section(".spinlock.text"))) _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) ; void __attribute__((section(".spinlock.text"))) _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) ; void __attribute__((section(".spinlock.text"))) _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) ; # 141 "include/linux/spinlock.h" 2 extern void _raw_spin_lock(spinlock_t *lock); extern int _raw_spin_trylock(spinlock_t *lock); extern void _raw_spin_unlock(spinlock_t *lock); extern void _raw_read_lock(rwlock_t *lock); extern int _raw_read_trylock(rwlock_t *lock); extern void _raw_read_unlock(rwlock_t *lock); extern void _raw_write_lock(rwlock_t *lock); extern int _raw_write_trylock(rwlock_t *lock); extern void _raw_write_unlock(rwlock_t *lock); # 357 "include/linux/spinlock.h" extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); # 16 "include/linux/rwsem-spinlock.h" 2 struct rwsem_waiter; # 31 "include/linux/rwsem-spinlock.h" struct rw_semaphore { __s32 activity; spinlock_t wait_lock; struct list_head wait_list; }; # 53 "include/linux/rwsem-spinlock.h" extern void __init_rwsem(struct rw_semaphore *sem, const char *name, struct lock_class_key *key); # 63 "include/linux/rwsem-spinlock.h" extern void __down_read(struct rw_semaphore *sem); extern int __down_read_trylock(struct rw_semaphore *sem); extern void __down_write(struct rw_semaphore *sem); extern void __down_write_nested(struct rw_semaphore *sem, int subclass); extern int __down_write_trylock(struct rw_semaphore *sem); extern void __up_read(struct rw_semaphore *sem); extern void __up_write(struct rw_semaphore *sem); extern void __downgrade_write(struct rw_semaphore *sem); static inline __attribute__((always_inline)) int rwsem_is_locked(struct rw_semaphore *sem) { return (sem->activity != 0); } # 21 "include/linux/rwsem.h" 2 extern void down_read(struct rw_semaphore *sem); extern int down_read_trylock(struct rw_semaphore *sem); extern void down_write(struct rw_semaphore *sem); extern int down_write_trylock(struct rw_semaphore *sem); extern void up_read(struct rw_semaphore *sem); extern void up_write(struct rw_semaphore *sem); extern void downgrade_write(struct rw_semaphore *sem); # 15 "include/linux/notifier.h" 2 # 1 "include/linux/srcu.h" 1 # 30 "include/linux/srcu.h" struct srcu_struct_array { int c[2]; }; struct srcu_struct { int completed; struct srcu_struct_array *per_cpu_ref; struct mutex mutex; }; int init_srcu_struct(struct srcu_struct *sp); void cleanup_srcu_struct(struct srcu_struct *sp); int srcu_read_lock(struct srcu_struct *sp) ; void srcu_read_unlock(struct srcu_struct *sp, int idx) ; void synchronize_srcu(struct srcu_struct *sp); long srcu_batches_completed(struct srcu_struct *sp); # 16 "include/linux/notifier.h" 2 # 50 "include/linux/notifier.h" struct notifier_block { int (*notifier_call)(struct notifier_block *, unsigned long, void *); struct notifier_block *next; int priority; }; struct atomic_notifier_head { spinlock_t lock; struct notifier_block *head; }; struct blocking_notifier_head { struct rw_semaphore rwsem; struct notifier_block *head; }; struct raw_notifier_head { struct notifier_block *head; }; struct srcu_notifier_head { struct mutex mutex; struct srcu_struct srcu; struct notifier_block *head; }; # 89 "include/linux/notifier.h" extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); # 115 "include/linux/notifier.h" extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, struct notifier_block *nb); extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh, struct notifier_block *nb); extern int raw_notifier_chain_register(struct raw_notifier_head *nh, struct notifier_block *nb); extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh, struct notifier_block *nb); extern int blocking_notifier_chain_cond_register( struct blocking_notifier_head *nh, struct notifier_block *nb); extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, struct notifier_block *nb); extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, struct notifier_block *nb); extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh, struct notifier_block *nb); extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, struct notifier_block *nb); extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh, unsigned long val, void *v); extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls); extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh, unsigned long val, void *v); extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls); extern int raw_notifier_call_chain(struct raw_notifier_head *nh, unsigned long val, void *v); extern int __raw_notifier_call_chain(struct raw_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls); extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh, unsigned long val, void *v); extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls); # 165 "include/linux/notifier.h" static inline __attribute__((always_inline)) int notifier_from_errno(int err) { return 0x8000 | (0x0001 - err); } static inline __attribute__((always_inline)) int notifier_to_errno(int ret) { ret &= ~0x8000; return ret > 0x0001 ? 0x0001 - ret : 0; } # 258 "include/linux/notifier.h" extern struct blocking_notifier_head reboot_notifier_list; # 18 "kernel/trace/trace.c" 2 # 1 "include/linux/debugfs.h" 1 # 18 "include/linux/debugfs.h" # 1 "include/linux/fs.h" 1 # 9 "include/linux/fs.h" # 1 "include/linux/limits.h" 1 # 10 "include/linux/fs.h" 2 # 1 "include/linux/ioctl.h" 1 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ioctl.h" 1 # 1 "include/asm-generic/ioctl.h" 1 # 73 "include/asm-generic/ioctl.h" extern unsigned int __invalid_size_argument_for_IOC; # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ioctl.h" 2 # 5 "include/linux/ioctl.h" 2 # 11 "include/linux/fs.h" 2 # 24 "include/linux/fs.h" extern int sysctl_nr_open; # 36 "include/linux/fs.h" struct files_stat_struct { int nr_files; int nr_free_files; int max_files; }; extern struct files_stat_struct files_stat; extern int get_max_files(void); struct inodes_stat_t { int nr_inodes; int nr_unused; int dummy[5]; }; extern struct inodes_stat_t inodes_stat; extern int leases_enable, lease_break_time; extern int dir_notify_enable; # 288 "include/linux/fs.h" # 1 "include/linux/wait.h" 1 # 26 "include/linux/wait.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/current.h" 1 # 13 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/current.h" struct task_struct; static inline __attribute__((always_inline)) struct task_struct *get_current(void) __attribute__ ((__const__)); static inline __attribute__((always_inline)) struct task_struct *get_current(void) { return (current_thread_info()->task); } # 27 "include/linux/wait.h" 2 typedef struct __wait_queue wait_queue_t; typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync, void *key); int default_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); struct __wait_queue { unsigned int flags; void *private; wait_queue_func_t func; struct list_head task_list; }; struct wait_bit_key { void *flags; int bit_nr; }; struct wait_bit_queue { struct wait_bit_key key; wait_queue_t wait; }; struct __wait_queue_head { spinlock_t lock; struct list_head task_list; }; typedef struct __wait_queue_head wait_queue_head_t; struct task_struct; # 80 "include/linux/wait.h" extern void init_waitqueue_head(wait_queue_head_t *q); # 91 "include/linux/wait.h" static inline __attribute__((always_inline)) void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) { q->flags = 0; q->private = p; q->func = default_wake_function; } static inline __attribute__((always_inline)) void init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func) { q->flags = 0; q->private = ((void *)0); q->func = func; } static inline __attribute__((always_inline)) int waitqueue_active(wait_queue_head_t *q) { return !list_empty(&q->task_list); } extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); static inline __attribute__((always_inline)) void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) { list_add(&new->task_list, &head->task_list); } static inline __attribute__((always_inline)) void __add_wait_queue_tail(wait_queue_head_t *head, wait_queue_t *new) { list_add_tail(&new->task_list, &head->task_list); } static inline __attribute__((always_inline)) void __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old) { list_del(&old->task_list); } void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); extern void __wake_up_locked(wait_queue_head_t *q, unsigned int mode); extern void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); void __wake_up_bit(wait_queue_head_t *, void *, int); int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); void wake_up_bit(void *, int); int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned); int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned); wait_queue_head_t *bit_waitqueue(void *, int); # 400 "include/linux/wait.h" static inline __attribute__((always_inline)) void add_wait_queue_exclusive_locked(wait_queue_head_t *q, wait_queue_t * wait) { wait->flags |= 0x01; __add_wait_queue_tail(q, wait); } static inline __attribute__((always_inline)) void remove_wait_queue_locked(wait_queue_head_t *q, wait_queue_t * wait) { __remove_wait_queue(q, wait); } extern void sleep_on(wait_queue_head_t *q); extern long sleep_on_timeout(wait_queue_head_t *q, signed long timeout); extern void interruptible_sleep_on(wait_queue_head_t *q); extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, signed long timeout); void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); # 476 "include/linux/wait.h" static inline __attribute__((always_inline)) int wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode) { if (!test_bit(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, action, mode); } # 500 "include/linux/wait.h" static inline __attribute__((always_inline)) int wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode) { if (!test_and_set_bit(bit, word)) return 0; return out_of_line_wait_on_bit_lock(word, bit, action, mode); } # 289 "include/linux/fs.h" 2 # 1 "include/linux/kdev_t.h" 1 # 21 "include/linux/kdev_t.h" static inline __attribute__((always_inline)) int old_valid_dev(dev_t dev) { return ((unsigned int) ((dev) >> 20)) < 256 && ((unsigned int) ((dev) & ((1U << 20) - 1))) < 256; } static inline __attribute__((always_inline)) u16 old_encode_dev(dev_t dev) { return (((unsigned int) ((dev) >> 20)) << 8) | ((unsigned int) ((dev) & ((1U << 20) - 1))); } static inline __attribute__((always_inline)) dev_t old_decode_dev(u16 val) { return ((((val >> 8) & 255) << 20) | (val & 255)); } static inline __attribute__((always_inline)) int new_valid_dev(dev_t dev) { return 1; } static inline __attribute__((always_inline)) u32 new_encode_dev(dev_t dev) { unsigned major = ((unsigned int) ((dev) >> 20)); unsigned minor = ((unsigned int) ((dev) & ((1U << 20) - 1))); return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12); } static inline __attribute__((always_inline)) dev_t new_decode_dev(u32 dev) { unsigned major = (dev & 0xfff00) >> 8; unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00); return (((major) << 20) | (minor)); } static inline __attribute__((always_inline)) int huge_valid_dev(dev_t dev) { return 1; } static inline __attribute__((always_inline)) u64 huge_encode_dev(dev_t dev) { return new_encode_dev(dev); } static inline __attribute__((always_inline)) dev_t huge_decode_dev(u64 dev) { return new_decode_dev(dev); } static inline __attribute__((always_inline)) int sysv_valid_dev(dev_t dev) { return ((unsigned int) ((dev) >> 20)) < (1<<14) && ((unsigned int) ((dev) & ((1U << 20) - 1))) < (1<<18); } static inline __attribute__((always_inline)) u32 sysv_encode_dev(dev_t dev) { return ((unsigned int) ((dev) & ((1U << 20) - 1))) | (((unsigned int) ((dev) >> 20)) << 18); } static inline __attribute__((always_inline)) unsigned sysv_major(u32 dev) { return (dev >> 18) & 0x3fff; } static inline __attribute__((always_inline)) unsigned sysv_minor(u32 dev) { return dev & 0x3ffff; } # 291 "include/linux/fs.h" 2 # 1 "include/linux/dcache.h" 1 # 1 "include/linux/rculist.h" 1 # 10 "include/linux/rculist.h" # 1 "include/linux/rcupdate.h" 1 # 36 "include/linux/rcupdate.h" # 1 "include/linux/cache.h" 1 # 37 "include/linux/rcupdate.h" 2 # 1 "include/linux/percpu.h" 1 # 1 "include/linux/slab.h" 1 # 12 "include/linux/slab.h" # 1 "include/linux/gfp.h" 1 # 1 "include/linux/mmzone.h" 1 # 14 "include/linux/mmzone.h" # 1 "include/linux/init.h" 1 # 139 "include/linux/init.h" typedef int (*initcall_t)(void); typedef void (*exitcall_t)(void); extern initcall_t __con_initcall_start[], __con_initcall_end[]; extern initcall_t __security_initcall_start[], __security_initcall_end[]; extern int do_one_initcall(initcall_t fn); extern char __attribute__ ((__section__(".init.data"))) boot_command_line[]; extern char *saved_command_line; extern unsigned int reset_devices; void setup_arch(char **); void prepare_namespace(void); extern void (*late_time_init)(void); # 221 "include/linux/init.h" struct obs_kernel_param { const char *str; int (*setup_func)(char *); int early; }; # 249 "include/linux/init.h" void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) parse_early_param(void); # 15 "include/linux/mmzone.h" 2 # 1 "include/linux/seqlock.h" 1 # 32 "include/linux/seqlock.h" typedef struct { unsigned sequence; spinlock_t lock; } seqlock_t; # 60 "include/linux/seqlock.h" static inline __attribute__((always_inline)) void write_seqlock(seqlock_t *sl) { _spin_lock(&sl->lock); ++sl->sequence; __asm__ __volatile__("": : :"memory"); } static inline __attribute__((always_inline)) void write_sequnlock(seqlock_t *sl) { __asm__ __volatile__("": : :"memory"); sl->sequence++; _spin_unlock(&sl->lock); } static inline __attribute__((always_inline)) int write_tryseqlock(seqlock_t *sl) { int ret = (_spin_trylock(&sl->lock)); if (ret) { ++sl->sequence; __asm__ __volatile__("": : :"memory"); } return ret; } static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned read_seqbegin(const seqlock_t *sl) { unsigned ret; repeat: ret = sl->sequence; __asm__ __volatile__("": : :"memory"); if (__builtin_expect(!!(ret & 1), 0)) { __asm__ __volatile__("": : :"memory"); goto repeat; } return ret; } static inline __attribute__((always_inline)) __attribute__((always_inline)) int read_seqretry(const seqlock_t *sl, unsigned start) { __asm__ __volatile__("": : :"memory"); return (sl->sequence != start); } # 121 "include/linux/seqlock.h" typedef struct seqcount { unsigned sequence; } seqcount_t; static inline __attribute__((always_inline)) unsigned read_seqcount_begin(const seqcount_t *s) { unsigned ret; repeat: ret = s->sequence; __asm__ __volatile__("": : :"memory"); if (__builtin_expect(!!(ret & 1), 0)) { __asm__ __volatile__("": : :"memory"); goto repeat; } return ret; } static inline __attribute__((always_inline)) int read_seqcount_retry(const seqcount_t *s, unsigned start) { __asm__ __volatile__("": : :"memory"); return s->sequence != start; } static inline __attribute__((always_inline)) void write_seqcount_begin(seqcount_t *s) { s->sequence++; __asm__ __volatile__("": : :"memory"); } static inline __attribute__((always_inline)) void write_seqcount_end(seqcount_t *s) { __asm__ __volatile__("": : :"memory"); s->sequence++; } # 16 "include/linux/mmzone.h" 2 # 1 "include/linux/pageblock-flags.h" 1 # 29 "include/linux/pageblock-flags.h" enum pageblock_bits { PB_migrate, PB_migrate_end = PB_migrate + 3 - 1, NR_PAGEBLOCK_BITS }; # 60 "include/linux/pageblock-flags.h" struct page; unsigned long get_pageblock_flags_group(struct page *page, int start_bitidx, int end_bitidx); void set_pageblock_flags_group(struct page *page, unsigned long flags, int start_bitidx, int end_bitidx); # 18 "include/linux/mmzone.h" 2 # 1 "include/linux/bounds.h" 1 # 19 "include/linux/mmzone.h" 2 # 49 "include/linux/mmzone.h" extern int page_group_by_mobility_disabled; static inline __attribute__((always_inline)) int get_pageblock_migratetype(struct page *page) { if (__builtin_expect(!!(page_group_by_mobility_disabled), 0)) return 0; return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); } struct free_area { struct list_head free_list[5]; unsigned long nr_free; }; struct pglist_data; # 81 "include/linux/mmzone.h" enum zone_stat_item { NR_FREE_PAGES, NR_LRU_BASE, NR_INACTIVE_ANON = NR_LRU_BASE, NR_ACTIVE_ANON, NR_INACTIVE_FILE, NR_ACTIVE_FILE, NR_UNEVICTABLE = NR_ACTIVE_FILE, NR_MLOCK = NR_ACTIVE_FILE, NR_ANON_PAGES, NR_FILE_MAPPED, NR_FILE_PAGES, NR_FILE_DIRTY, NR_WRITEBACK, NR_SLAB_RECLAIMABLE, NR_SLAB_UNRECLAIMABLE, NR_PAGETABLE, NR_UNSTABLE_NFS, NR_BOUNCE, NR_VMSCAN_WRITE, NR_WRITEBACK_TEMP, # 118 "include/linux/mmzone.h" NR_VM_ZONE_STAT_ITEMS }; # 133 "include/linux/mmzone.h" enum lru_list { LRU_INACTIVE_ANON = 0, LRU_ACTIVE_ANON = 0 + 1, LRU_INACTIVE_FILE = 0 + 2, LRU_ACTIVE_FILE = 0 + 2 + 1, LRU_UNEVICTABLE = LRU_ACTIVE_FILE, NR_LRU_LISTS }; static inline __attribute__((always_inline)) int is_file_lru(enum lru_list l) { return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE); } static inline __attribute__((always_inline)) int is_active_lru(enum lru_list l) { return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE); } static inline __attribute__((always_inline)) int is_unevictable_lru(enum lru_list l) { return 0; } struct per_cpu_pages { int count; int high; int batch; struct list_head list; }; struct per_cpu_pageset { struct per_cpu_pages pcp; } ; # 195 "include/linux/mmzone.h" enum zone_type { # 215 "include/linux/mmzone.h" ZONE_DMA, # 230 "include/linux/mmzone.h" ZONE_NORMAL, # 242 "include/linux/mmzone.h" ZONE_MOVABLE, __MAX_NR_ZONES }; # 266 "include/linux/mmzone.h" struct zone { unsigned long pages_min, pages_low, pages_high; # 277 "include/linux/mmzone.h" unsigned long lowmem_reserve[3]; # 288 "include/linux/mmzone.h" struct per_cpu_pageset pageset[1]; spinlock_t lock; struct free_area free_area[14]; unsigned long *pageblock_flags; spinlock_t lru_lock; struct { struct list_head list; unsigned long nr_scan; } lru[NR_LRU_LISTS]; # 326 "include/linux/mmzone.h" unsigned long recent_rotated[2]; unsigned long recent_scanned[2]; unsigned long pages_scanned; unsigned long flags; atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; # 348 "include/linux/mmzone.h" int prev_priority; unsigned int inactive_ratio; # 384 "include/linux/mmzone.h" wait_queue_head_t * wait_table; unsigned long wait_table_hash_nr_entries; unsigned long wait_table_bits; struct pglist_data *zone_pgdat; unsigned long zone_start_pfn; # 405 "include/linux/mmzone.h" unsigned long spanned_pages; unsigned long present_pages; const char *name; } ; typedef enum { ZONE_ALL_UNRECLAIMABLE, ZONE_RECLAIM_LOCKED, ZONE_OOM_LOCKED, } zone_flags_t; static inline __attribute__((always_inline)) void zone_set_flag(struct zone *zone, zone_flags_t flag) { set_bit(flag, &zone->flags); } static inline __attribute__((always_inline)) int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag) { return test_and_set_bit(flag, &zone->flags); } static inline __attribute__((always_inline)) void zone_clear_flag(struct zone *zone, zone_flags_t flag) { clear_bit(flag, &zone->flags); } static inline __attribute__((always_inline)) int zone_is_all_unreclaimable(const struct zone *zone) { return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags); } static inline __attribute__((always_inline)) int zone_is_reclaim_locked(const struct zone *zone) { return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); } static inline __attribute__((always_inline)) int zone_is_oom_locked(const struct zone *zone) { return test_bit(ZONE_OOM_LOCKED, &zone->flags); } # 538 "include/linux/mmzone.h" struct zonelist_cache; struct zoneref { struct zone *zone; int zone_idx; }; # 567 "include/linux/mmzone.h" struct zonelist { struct zonelist_cache *zlcache_ptr; struct zoneref _zonerefs[((1 << 0) * 3) + 1]; }; # 585 "include/linux/mmzone.h" extern struct page *mem_map; # 599 "include/linux/mmzone.h" struct bootmem_data; typedef struct pglist_data { struct zone node_zones[3]; struct zonelist node_zonelists[1]; int nr_zones; struct page *node_mem_map; struct bootmem_data *bdata; # 621 "include/linux/mmzone.h" unsigned long node_start_pfn; unsigned long node_present_pages; unsigned long node_spanned_pages; int node_id; wait_queue_head_t kswapd_wait; struct task_struct *kswapd; int kswapd_max_order; } pg_data_t; # 640 "include/linux/mmzone.h" # 1 "include/linux/memory_hotplug.h" 1 # 1 "include/linux/mmzone.h" 1 # 5 "include/linux/memory_hotplug.h" 2 struct page; struct zone; struct pglist_data; struct mem_section; # 165 "include/linux/memory_hotplug.h" static inline __attribute__((always_inline)) void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} static inline __attribute__((always_inline)) void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} static inline __attribute__((always_inline)) void pgdat_resize_init(struct pglist_data *pgdat) {} static inline __attribute__((always_inline)) unsigned zone_span_seqbegin(struct zone *zone) { return 0; } static inline __attribute__((always_inline)) int zone_span_seqretry(struct zone *zone, unsigned iv) { return 0; } static inline __attribute__((always_inline)) void zone_span_writelock(struct zone *zone) {} static inline __attribute__((always_inline)) void zone_span_writeunlock(struct zone *zone) {} static inline __attribute__((always_inline)) void zone_seqlock_init(struct zone *zone) {} static inline __attribute__((always_inline)) int mhp_notimplemented(const char *func) { printk("<4>" "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); dump_stack(); return -38; } static inline __attribute__((always_inline)) void register_page_bootmem_info_node(struct pglist_data *pgdat) { } extern int walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, int (*func)(unsigned long, unsigned long, void *)); static inline __attribute__((always_inline)) int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages) { return 0; } extern int add_memory(int nid, u64 start, u64 size); extern int arch_add_memory(int nid, u64 start, u64 size); extern int remove_memory(u64 start, u64 size); extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, int nr_pages); extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms); extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); # 641 "include/linux/mmzone.h" 2 void get_zone_counts(unsigned long *active, unsigned long *inactive, unsigned long *free); void build_all_zonelists(void); void wakeup_kswapd(struct zone *zone, int order); int zone_watermark_ok(struct zone *z, int order, unsigned long mark, int classzone_idx, int alloc_flags); enum memmap_context { MEMMAP_EARLY, MEMMAP_HOTPLUG, }; extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, unsigned long size, enum memmap_context context); static inline __attribute__((always_inline)) void memory_present(int nid, unsigned long start, unsigned long end) {} # 671 "include/linux/mmzone.h" static inline __attribute__((always_inline)) int populated_zone(struct zone *zone) { return (!!zone->present_pages); } extern int movable_zone; static inline __attribute__((always_inline)) int zone_movable_is_highmem(void) { return 0; } static inline __attribute__((always_inline)) int is_highmem_idx(enum zone_type idx) { return 0; } static inline __attribute__((always_inline)) int is_normal_idx(enum zone_type idx) { return (idx == ZONE_NORMAL); } static inline __attribute__((always_inline)) int is_highmem(struct zone *zone) { return 0; } static inline __attribute__((always_inline)) int is_normal(struct zone *zone) { return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; } static inline __attribute__((always_inline)) int is_dma32(struct zone *zone) { return 0; } static inline __attribute__((always_inline)) int is_dma(struct zone *zone) { return zone == zone->zone_pgdat->node_zones + ZONE_DMA; } struct ctl_table; struct file; int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *, void *, size_t *, loff_t *); extern int sysctl_lowmem_reserve_ratio[3 -1]; int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, void *, size_t *, loff_t *); int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, void *, size_t *, loff_t *); int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, struct file *, void *, size_t *, loff_t *); int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, struct file *, void *, size_t *, loff_t *); extern int numa_zonelist_order_handler(struct ctl_table *, int, struct file *, void *, size_t *, loff_t *); extern char numa_zonelist_order[]; # 1 "include/linux/topology.h" 1 # 33 "include/linux/topology.h" # 1 "include/linux/smp.h" 1 # 14 "include/linux/smp.h" extern void cpu_idle(void); struct call_single_data { struct list_head list; void (*func) (void *info); void *info; u16 flags; u16 priv; }; # 123 "include/linux/smp.h" static inline __attribute__((always_inline)) int up_smp_call_function(void (*func)(void *), void *info) { return 0; } # 136 "include/linux/smp.h" static inline __attribute__((always_inline)) void smp_send_reschedule(int cpu) { } # 151 "include/linux/smp.h" static inline __attribute__((always_inline)) void init_call_single_data(void) { } # 182 "include/linux/smp.h" void smp_setup_processor_id(void); # 34 "include/linux/topology.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/topology.h" 1 # 1 "include/asm-generic/topology.h" 1 # 5 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/topology.h" 2 # 35 "include/linux/topology.h" 2 # 52 "include/linux/topology.h" void arch_update_cpu_topology(void); # 764 "include/linux/mmzone.h" 2 extern struct pglist_data contig_page_data; # 781 "include/linux/mmzone.h" extern struct pglist_data *first_online_pgdat(void); extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); extern struct zone *next_zone(struct zone *zone); # 805 "include/linux/mmzone.h" static inline __attribute__((always_inline)) struct zone *zonelist_zone(struct zoneref *zoneref) { return zoneref->zone; } static inline __attribute__((always_inline)) int zonelist_zone_idx(struct zoneref *zoneref) { return zoneref->zone_idx; } static inline __attribute__((always_inline)) int zonelist_node_idx(struct zoneref *zoneref) { return 0; } # 838 "include/linux/mmzone.h" struct zoneref *next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, nodemask_t *nodes, struct zone **zone); # 855 "include/linux/mmzone.h" static inline __attribute__((always_inline)) struct zoneref *first_zones_zonelist(struct zonelist *zonelist, enum zone_type highest_zoneidx, nodemask_t *nodes, struct zone **zone) { return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes, zone); } # 898 "include/linux/mmzone.h" static inline __attribute__((always_inline)) unsigned long early_pfn_to_nid(unsigned long pfn) { return 0; } # 1079 "include/linux/mmzone.h" void memory_present(int nid, unsigned long start, unsigned long end); unsigned long __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) node_memmap_size_bytes(int, unsigned long, unsigned long); # 5 "include/linux/gfp.h" 2 struct vm_area_struct; # 108 "include/linux/gfp.h" static inline __attribute__((always_inline)) int allocflags_to_migratetype(gfp_t gfp_flags) { ({ int __ret_warn_on = !!((gfp_flags & ((( gfp_t)0x80000u)|(( gfp_t)0x100000u))) == ((( gfp_t)0x80000u)|(( gfp_t)0x100000u))); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("include/linux/gfp.h", 110); __builtin_expect(!!(__ret_warn_on), 0); }); if (__builtin_expect(!!(page_group_by_mobility_disabled), 0)) return 0; return (((gfp_flags & (( gfp_t)0x100000u)) != 0) << 1) | ((gfp_flags & (( gfp_t)0x80000u)) != 0); } static inline __attribute__((always_inline)) enum zone_type gfp_zone(gfp_t flags) { if (flags & (( gfp_t)0x01u)) return ZONE_DMA; if ((flags & ((( gfp_t)0x02u) | (( gfp_t)0x100000u))) == ((( gfp_t)0x02u) | (( gfp_t)0x100000u))) return ZONE_MOVABLE; return ZONE_NORMAL; } # 147 "include/linux/gfp.h" static inline __attribute__((always_inline)) int gfp_zonelist(gfp_t flags) { if (0 && __builtin_expect(!!(flags & (( gfp_t)0x40000u)), 0)) return 1; return 0; } # 164 "include/linux/gfp.h" static inline __attribute__((always_inline)) struct zonelist *node_zonelist(int nid, gfp_t flags) { return (&contig_page_data)->node_zonelists + gfp_zonelist(flags); } static inline __attribute__((always_inline)) void arch_free_page(struct page *page, int order) { } static inline __attribute__((always_inline)) void arch_alloc_page(struct page *page, int order) { } struct page * __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, nodemask_t *nodemask); static inline __attribute__((always_inline)) struct page * __alloc_pages(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist) { return __alloc_pages_internal(gfp_mask, order, zonelist, ((void *)0)); } static inline __attribute__((always_inline)) struct page * __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, nodemask_t *nodemask) { return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask); } static inline __attribute__((always_inline)) struct page *alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) { if (__builtin_expect(!!(order >= 14), 0)) return ((void *)0); if (nid < 0) nid = (((void)(0),0)); return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); } # 228 "include/linux/gfp.h" extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); void *alloc_pages_exact(size_t size, gfp_t gfp_mask); void free_pages_exact(void *virt, size_t size); extern void __free_pages(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); extern void free_hot_page(struct page *page); extern void free_cold_page(struct page *page); void page_alloc_init(void); void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); void drain_all_pages(void); void drain_local_pages(void *dummy); # 13 "include/linux/slab.h" 2 # 84 "include/linux/slab.h" void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) kmem_cache_init(void); int slab_is_available(void); struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, unsigned long, void (*)(void *)); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); void kmem_cache_free(struct kmem_cache *, void *); unsigned int kmem_cache_size(struct kmem_cache *); const char *kmem_cache_name(struct kmem_cache *); int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); # 127 "include/linux/slab.h" void * __attribute__((warn_unused_result)) __krealloc(const void *, size_t, gfp_t); void * __attribute__((warn_unused_result)) krealloc(const void *, size_t, gfp_t); void kfree(const void *); size_t ksize(const void *); # 152 "include/linux/slab.h" # 1 "include/linux/slub_def.h" 1 # 11 "include/linux/slub_def.h" # 1 "include/linux/workqueue.h" 1 # 1 "include/linux/timer.h" 1 # 1 "include/linux/ktime.h" 1 # 24 "include/linux/ktime.h" # 1 "include/linux/time.h" 1 # 9 "include/linux/time.h" # 1 "include/linux/math64.h" 1 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/div64.h" 1 # 6 "include/linux/math64.h" 2 # 41 "include/linux/math64.h" static inline __attribute__((always_inline)) u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) { *remainder = ({ uint32_t __base = (divisor); uint32_t __rem; (void)(((typeof((dividend)) *)0) == ((uint64_t *)0)); if (__builtin_expect(!!(((dividend) >> 32) == 0), 1)) { __rem = (uint32_t)(dividend) % __base; (dividend) = (uint32_t)(dividend) / __base; } else __rem = __div64_32(&(dividend), __base); __rem; }); return dividend; } extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); extern u64 div64_u64(u64 dividend, u64 divisor); # 66 "include/linux/math64.h" static inline __attribute__((always_inline)) u64 div_u64(u64 dividend, u32 divisor) { u32 remainder; return div_u64_rem(dividend, divisor, &remainder); } static inline __attribute__((always_inline)) s64 div_s64(s64 dividend, s32 divisor) { s32 remainder; return div_s64_rem(dividend, divisor, &remainder); } u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); static inline __attribute__((always_inline)) __attribute__((always_inline)) u32 __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) { u32 ret = 0; while (dividend >= divisor) { asm("" : "+rm"(dividend)); dividend -= divisor; ret++; } *remainder = dividend; return ret; } # 10 "include/linux/time.h" 2 struct timespec { time_t tv_sec; long tv_nsec; }; struct timeval { time_t tv_sec; suseconds_t tv_usec; }; struct timezone { int tz_minuteswest; int tz_dsttime; }; extern struct timezone sys_tz; # 45 "include/linux/time.h" static inline __attribute__((always_inline)) int timespec_equal(const struct timespec *a, const struct timespec *b) { return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); } static inline __attribute__((always_inline)) int timespec_compare(const struct timespec *lhs, const struct timespec *rhs) { if (lhs->tv_sec < rhs->tv_sec) return -1; if (lhs->tv_sec > rhs->tv_sec) return 1; return lhs->tv_nsec - rhs->tv_nsec; } static inline __attribute__((always_inline)) int timeval_compare(const struct timeval *lhs, const struct timeval *rhs) { if (lhs->tv_sec < rhs->tv_sec) return -1; if (lhs->tv_sec > rhs->tv_sec) return 1; return lhs->tv_usec - rhs->tv_usec; } extern unsigned long mktime(const unsigned int year, const unsigned int mon, const unsigned int day, const unsigned int hour, const unsigned int min, const unsigned int sec); extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec); extern struct timespec timespec_add_safe(const struct timespec lhs, const struct timespec rhs); static inline __attribute__((always_inline)) struct timespec timespec_sub(struct timespec lhs, struct timespec rhs) { struct timespec ts_delta; set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec, lhs.tv_nsec - rhs.tv_nsec); return ts_delta; } extern struct timespec xtime; extern struct timespec wall_to_monotonic; extern seqlock_t xtime_lock; extern unsigned long read_persistent_clock(void); extern int update_persistent_clock(struct timespec now); extern int no_sync_cmos_clock ; void timekeeping_init(void); unsigned long get_seconds(void); struct timespec current_kernel_time(void); extern void do_gettimeofday(struct timeval *tv); extern int do_settimeofday(struct timespec *tv); extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz); extern long do_utimes(int dfd, char *filename, struct timespec *times, int flags); struct itimerval; extern int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue); extern unsigned int alarm_setitimer(unsigned int seconds); extern int do_getitimer(int which, struct itimerval *value); extern void getnstimeofday(struct timespec *tv); extern void getrawmonotonic(struct timespec *ts); extern void getboottime(struct timespec *ts); extern void monotonic_to_bootbased(struct timespec *ts); extern struct timespec timespec_trunc(struct timespec t, unsigned gran); extern int timekeeping_valid_for_hres(void); extern void update_wall_time(void); extern void update_xtime_cache(u64 nsec); struct tms; extern void do_sys_times(struct tms *); # 145 "include/linux/time.h" static inline __attribute__((always_inline)) s64 timespec_to_ns(const struct timespec *ts) { return ((s64) ts->tv_sec * 1000000000L) + ts->tv_nsec; } # 157 "include/linux/time.h" static inline __attribute__((always_inline)) s64 timeval_to_ns(const struct timeval *tv) { return ((s64) tv->tv_sec * 1000000000L) + tv->tv_usec * 1000L; } extern struct timespec ns_to_timespec(const s64 nsec); extern struct timeval ns_to_timeval(const s64 nsec); # 187 "include/linux/time.h" static inline __attribute__((always_inline)) __attribute__((always_inline)) void timespec_add_ns(struct timespec *a, u64 ns) { a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, 1000000000L, &ns); a->tv_nsec = ns; } # 210 "include/linux/time.h" struct itimerspec { struct timespec it_interval; struct timespec it_value; }; struct itimerval { struct timeval it_interval; struct timeval it_value; }; # 25 "include/linux/ktime.h" 2 # 1 "include/linux/jiffies.h" 1 # 1 "include/linux/timex.h" 1 # 100 "include/linux/timex.h" struct timex { unsigned int modes; long offset; long freq; long maxerror; long esterror; int status; long constant; long precision; long tolerance; struct timeval time; long tick; long ppsfreq; long jitter; int shift; long stabil; long jitcnt; long calcnt; long errcnt; long stbcnt; int tai; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; }; # 202 "include/linux/timex.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/timex.h" 1 # 14 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/timex.h" typedef unsigned long long cycles_t; static inline __attribute__((always_inline)) cycles_t get_cycles(void) { unsigned long tmp, tmp2; __asm__ __volatile__("%0 = cycles; %1 = cycles2;" : "=d"(tmp), "=d"(tmp2)); return tmp | ((cycles_t)tmp2 << 32); } # 203 "include/linux/timex.h" 2 extern unsigned long tick_usec; extern unsigned long tick_nsec; extern int tickadj; extern int time_status; extern long time_maxerror; extern long time_esterror; extern long time_adjust; extern void ntp_init(void); extern void ntp_clear(void); static inline __attribute__((always_inline)) int ntp_synced(void) { return !(time_status & 0x0040); } # 251 "include/linux/timex.h" extern u64 tick_length; extern void second_overflow(void); extern void update_ntp_one_tick(void); extern int do_adjtimex(struct timex *); int read_current_timer(unsigned long *timer_val); # 9 "include/linux/jiffies.h" 2 # 81 "include/linux/jiffies.h" extern u64 __attribute__((section(".data"))) jiffies_64; extern unsigned long volatile __attribute__((section(".data"))) jiffies; u64 get_jiffies_64(void); # 173 "include/linux/jiffies.h" extern unsigned long preset_lpj; # 286 "include/linux/jiffies.h" extern unsigned int jiffies_to_msecs(const unsigned long j); extern unsigned int jiffies_to_usecs(const unsigned long j); extern unsigned long msecs_to_jiffies(const unsigned int m); extern unsigned long usecs_to_jiffies(const unsigned int u); extern unsigned long timespec_to_jiffies(const struct timespec *value); extern void jiffies_to_timespec(const unsigned long jiffies, struct timespec *value); extern unsigned long timeval_to_jiffies(const struct timeval *value); extern void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value); extern clock_t jiffies_to_clock_t(long x); extern unsigned long clock_t_to_jiffies(unsigned long x); extern u64 jiffies_64_to_clock_t(u64 x); extern u64 nsec_to_clock_t(u64 x); # 26 "include/linux/ktime.h" 2 # 46 "include/linux/ktime.h" union ktime { s64 tv64; struct { s32 nsec, sec; } tv; }; typedef union ktime ktime_t; # 151 "include/linux/ktime.h" static inline __attribute__((always_inline)) ktime_t ktime_set(const long secs, const unsigned long nsecs) { return (ktime_t) { .tv = { .sec = secs, .nsec = nsecs } }; } # 163 "include/linux/ktime.h" static inline __attribute__((always_inline)) ktime_t ktime_sub(const ktime_t lhs, const ktime_t rhs) { ktime_t res; res.tv64 = lhs.tv64 - rhs.tv64; if (res.tv.nsec < 0) res.tv.nsec += 1000000000L; return res; } # 181 "include/linux/ktime.h" static inline __attribute__((always_inline)) ktime_t ktime_add(const ktime_t add1, const ktime_t add2) { ktime_t res; res.tv64 = add1.tv64 + add2.tv64; # 194 "include/linux/ktime.h" if (res.tv.nsec >= 1000000000L) res.tv64 += (u32)-1000000000L; return res; } # 207 "include/linux/ktime.h" extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec); # 216 "include/linux/ktime.h" extern ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec); static inline __attribute__((always_inline)) ktime_t timespec_to_ktime(const struct timespec ts) { return (ktime_t) { .tv = { .sec = (s32)ts.tv_sec, .nsec = (s32)ts.tv_nsec } }; } static inline __attribute__((always_inline)) ktime_t timeval_to_ktime(const struct timeval tv) { return (ktime_t) { .tv = { .sec = (s32)tv.tv_sec, .nsec = (s32)tv.tv_usec * 1000 } }; } static inline __attribute__((always_inline)) struct timespec ktime_to_timespec(const ktime_t kt) { return (struct timespec) { .tv_sec = (time_t) kt.tv.sec, .tv_nsec = (long) kt.tv.nsec }; } static inline __attribute__((always_inline)) struct timeval ktime_to_timeval(const ktime_t kt) { return (struct timeval) { .tv_sec = (time_t) kt.tv.sec, .tv_usec = (suseconds_t) (kt.tv.nsec / 1000L) }; } static inline __attribute__((always_inline)) s64 ktime_to_ns(const ktime_t kt) { return (s64) kt.tv.sec * 1000000000L + kt.tv.nsec; } # 287 "include/linux/ktime.h" static inline __attribute__((always_inline)) int ktime_equal(const ktime_t cmp1, const ktime_t cmp2) { return cmp1.tv64 == cmp2.tv64; } static inline __attribute__((always_inline)) s64 ktime_to_us(const ktime_t kt) { struct timeval tv = ktime_to_timeval(kt); return (s64) tv.tv_sec * 1000000L + tv.tv_usec; } static inline __attribute__((always_inline)) s64 ktime_us_delta(const ktime_t later, const ktime_t earlier) { return ktime_to_us(ktime_sub(later, earlier)); } static inline __attribute__((always_inline)) ktime_t ktime_add_us(const ktime_t kt, const u64 usec) { return ktime_add_ns(kt, usec * 1000); } static inline __attribute__((always_inline)) ktime_t ktime_sub_us(const ktime_t kt, const u64 usec) { return ktime_sub_ns(kt, usec * 1000); } extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); # 325 "include/linux/ktime.h" extern void ktime_get_ts(struct timespec *ts); static inline __attribute__((always_inline)) ktime_t ns_to_ktime(u64 ns) { static const ktime_t ktime_zero = { .tv64 = 0 }; return ktime_add_ns(ktime_zero, ns); } # 6 "include/linux/timer.h" 2 # 1 "include/linux/debugobjects.h" 1 enum debug_obj_state { ODEBUG_STATE_NONE, ODEBUG_STATE_INIT, ODEBUG_STATE_INACTIVE, ODEBUG_STATE_ACTIVE, ODEBUG_STATE_DESTROYED, ODEBUG_STATE_NOTAVAILABLE, ODEBUG_STATE_MAX, }; struct debug_obj_descr; # 26 "include/linux/debugobjects.h" struct debug_obj { struct hlist_node node; enum debug_obj_state state; void *object; struct debug_obj_descr *descr; }; # 45 "include/linux/debugobjects.h" struct debug_obj_descr { const char *name; int (*fixup_init) (void *addr, enum debug_obj_state state); int (*fixup_activate) (void *addr, enum debug_obj_state state); int (*fixup_destroy) (void *addr, enum debug_obj_state state); int (*fixup_free) (void *addr, enum debug_obj_state state); }; extern void debug_object_init (void *addr, struct debug_obj_descr *descr); extern void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr); extern void debug_object_activate (void *addr, struct debug_obj_descr *descr); extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr); extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr); extern void debug_object_free (void *addr, struct debug_obj_descr *descr); extern void debug_objects_early_init(void); extern void debug_objects_mem_init(void); # 84 "include/linux/debugobjects.h" extern void debug_check_no_obj_freed(const void *address, unsigned long size); # 8 "include/linux/timer.h" 2 struct tvec_base; struct timer_list { struct list_head entry; unsigned long expires; void (*function)(unsigned long); unsigned long data; struct tvec_base *base; void *start_site; char start_comm[16]; int start_pid; }; extern struct tvec_base boot_tvec_bases; # 40 "include/linux/timer.h" void init_timer(struct timer_list *timer); void init_timer_deferrable(struct timer_list *timer); static inline __attribute__((always_inline)) void destroy_timer_on_stack(struct timer_list *timer) { } static inline __attribute__((always_inline)) void init_timer_on_stack(struct timer_list *timer) { init_timer(timer); } static inline __attribute__((always_inline)) void setup_timer(struct timer_list * timer, void (*function)(unsigned long), unsigned long data) { timer->function = function; timer->data = data; init_timer(timer); } static inline __attribute__((always_inline)) void setup_timer_on_stack(struct timer_list *timer, void (*function)(unsigned long), unsigned long data) { timer->function = function; timer->data = data; init_timer_on_stack(timer); } # 82 "include/linux/timer.h" static inline __attribute__((always_inline)) int timer_pending(const struct timer_list * timer) { return timer->entry.next != ((void *)0); } extern void add_timer_on(struct timer_list *timer, int cpu); extern int del_timer(struct timer_list * timer); extern int __mod_timer(struct timer_list *timer, unsigned long expires); extern int mod_timer(struct timer_list *timer, unsigned long expires); # 102 "include/linux/timer.h" extern unsigned long next_timer_interrupt(void); extern unsigned long get_next_timer_interrupt(unsigned long now); # 117 "include/linux/timer.h" extern void init_timer_stats(void); extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf, void *timerf, char *comm, unsigned int timer_flag); extern void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr); static inline __attribute__((always_inline)) void timer_stats_timer_set_start_info(struct timer_list *timer) { __timer_stats_timer_set_start_info(timer, __builtin_return_address(0)); } static inline __attribute__((always_inline)) void timer_stats_timer_clear_start_info(struct timer_list *timer) { timer->start_site = ((void *)0); } # 163 "include/linux/timer.h" static inline __attribute__((always_inline)) void add_timer(struct timer_list *timer) { do { if (__builtin_expect(!!(timer_pending(timer)), 0)) do { dump_bfin_trace_buffer(); printk("<0>" "BUG: failure at %s:%d/%s()!\n", "include/linux/timer.h", 165, __func__); panic("BUG!"); } while (0); } while(0); __mod_timer(timer, timer->expires); } # 179 "include/linux/timer.h" extern void init_timers(void); extern void run_local_timers(void); struct hrtimer; extern enum hrtimer_restart it_real_fn(struct hrtimer *); unsigned long __round_jiffies(unsigned long j, int cpu); unsigned long __round_jiffies_relative(unsigned long j, int cpu); unsigned long round_jiffies(unsigned long j); unsigned long round_jiffies_relative(unsigned long j); unsigned long __round_jiffies_up(unsigned long j, int cpu); unsigned long __round_jiffies_up_relative(unsigned long j, int cpu); unsigned long round_jiffies_up(unsigned long j); unsigned long round_jiffies_up_relative(unsigned long j); # 9 "include/linux/workqueue.h" 2 struct workqueue_struct; struct work_struct; typedef void (*work_func_t)(struct work_struct *work); struct work_struct { atomic_long_t data; struct list_head entry; work_func_t func; }; struct delayed_work { struct work_struct work; struct timer_list timer; }; struct execute_work { struct work_struct work; }; # 150 "include/linux/workqueue.h" extern struct workqueue_struct * __create_workqueue_key(const char *name, int singlethread, int freezeable, int rt, struct lock_class_key *key, const char *lock_name); # 181 "include/linux/workqueue.h" extern void destroy_workqueue(struct workqueue_struct *wq); extern int queue_work(struct workqueue_struct *wq, struct work_struct *work); extern int queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work); extern int queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay); extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay); extern void flush_workqueue(struct workqueue_struct *wq); extern void flush_scheduled_work(void); extern int schedule_work(struct work_struct *work); extern int schedule_work_on(int cpu, struct work_struct *work); extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); extern int schedule_on_each_cpu(work_func_t func); extern int current_is_keventd(void); extern int keventd_up(void); extern void init_workqueues(void); int execute_in_process_context(work_func_t fn, struct execute_work *); extern int flush_work(struct work_struct *work); extern int cancel_work_sync(struct work_struct *work); static inline __attribute__((always_inline)) int cancel_delayed_work(struct delayed_work *work) { int ret; ret = del_timer(&work->timer); if (ret) clear_bit(0, ((unsigned long *)(&(&work->work)->data))); return ret; } extern int cancel_delayed_work_sync(struct delayed_work *work); static inline __attribute__((always_inline)) void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, struct delayed_work *work) { cancel_delayed_work_sync(work); } static inline __attribute__((always_inline)) void cancel_rearming_delayed_work(struct delayed_work *work) { cancel_delayed_work_sync(work); } static inline __attribute__((always_inline)) long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) { return fn(arg); } # 12 "include/linux/slub_def.h" 2 # 1 "include/linux/kobject.h" 1 # 21 "include/linux/kobject.h" # 1 "include/linux/sysfs.h" 1 # 20 "include/linux/sysfs.h" struct kobject; struct module; struct attribute { const char *name; struct module *owner; mode_t mode; }; struct attribute_group { const char *name; mode_t (*is_visible)(struct kobject *, struct attribute *, int); struct attribute **attrs; }; # 63 "include/linux/sysfs.h" struct vm_area_struct; struct bin_attribute { struct attribute attr; size_t size; void *private; ssize_t (*read)(struct kobject *, struct bin_attribute *, char *, loff_t, size_t); ssize_t (*write)(struct kobject *, struct bin_attribute *, char *, loff_t, size_t); int (*mmap)(struct kobject *, struct bin_attribute *attr, struct vm_area_struct *vma); }; struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *,char *); ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t); }; struct sysfs_dirent; # 134 "include/linux/sysfs.h" static inline __attribute__((always_inline)) int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *), void *data, struct module *owner) { return -38; } static inline __attribute__((always_inline)) int sysfs_create_dir(struct kobject *kobj) { return 0; } static inline __attribute__((always_inline)) void sysfs_remove_dir(struct kobject *kobj) { } static inline __attribute__((always_inline)) int sysfs_rename_dir(struct kobject *kobj, const char *new_name) { return 0; } static inline __attribute__((always_inline)) int sysfs_move_dir(struct kobject *kobj, struct kobject *new_parent_kobj) { return 0; } static inline __attribute__((always_inline)) int sysfs_create_file(struct kobject *kobj, const struct attribute *attr) { return 0; } static inline __attribute__((always_inline)) int sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode) { return 0; } static inline __attribute__((always_inline)) void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr) { } static inline __attribute__((always_inline)) int sysfs_create_bin_file(struct kobject *kobj, struct bin_attribute *attr) { return 0; } static inline __attribute__((always_inline)) void sysfs_remove_bin_file(struct kobject *kobj, struct bin_attribute *attr) { } static inline __attribute__((always_inline)) int sysfs_create_link(struct kobject *kobj, struct kobject *target, const char *name) { return 0; } static inline __attribute__((always_inline)) int sysfs_create_link_nowarn(struct kobject *kobj, struct kobject *target, const char *name) { return 0; } static inline __attribute__((always_inline)) void sysfs_remove_link(struct kobject *kobj, const char *name) { } static inline __attribute__((always_inline)) int sysfs_create_group(struct kobject *kobj, const struct attribute_group *grp) { return 0; } static inline __attribute__((always_inline)) int sysfs_update_group(struct kobject *kobj, const struct attribute_group *grp) { return 0; } static inline __attribute__((always_inline)) void sysfs_remove_group(struct kobject *kobj, const struct attribute_group *grp) { } static inline __attribute__((always_inline)) int sysfs_add_file_to_group(struct kobject *kobj, const struct attribute *attr, const char *group) { return 0; } static inline __attribute__((always_inline)) void sysfs_remove_file_from_group(struct kobject *kobj, const struct attribute *attr, const char *group) { } static inline __attribute__((always_inline)) void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr) { } static inline __attribute__((always_inline)) void sysfs_notify_dirent(struct sysfs_dirent *sd) { } static inline __attribute__((always_inline)) struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd, const unsigned char *name) { return ((void *)0); } static inline __attribute__((always_inline)) struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd) { return ((void *)0); } static inline __attribute__((always_inline)) void sysfs_put(struct sysfs_dirent *sd) { } static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) sysfs_init(void) { return 0; } static inline __attribute__((always_inline)) void sysfs_printk_last_file(void) { } # 22 "include/linux/kobject.h" 2 # 1 "include/linux/kref.h" 1 # 21 "include/linux/kref.h" struct kref { atomic_t refcount; }; void kref_set(struct kref *kref, int num); void kref_init(struct kref *kref); void kref_get(struct kref *kref); int kref_put(struct kref *kref, void (*release) (struct kref *kref)); # 25 "include/linux/kobject.h" 2 # 34 "include/linux/kobject.h" extern char uevent_helper[]; extern u64 uevent_seqnum; # 49 "include/linux/kobject.h" enum kobject_action { KOBJ_ADD, KOBJ_REMOVE, KOBJ_CHANGE, KOBJ_MOVE, KOBJ_ONLINE, KOBJ_OFFLINE, KOBJ_MAX }; struct kobject { const char *name; struct list_head entry; struct kobject *parent; struct kset *kset; struct kobj_type *ktype; struct sysfs_dirent *sd; struct kref kref; unsigned int state_initialized:1; unsigned int state_in_sysfs:1; unsigned int state_add_uevent_sent:1; unsigned int state_remove_uevent_sent:1; }; extern int kobject_set_name(struct kobject *kobj, const char *name, ...) __attribute__((format(printf, 2, 3))); static inline __attribute__((always_inline)) const char *kobject_name(const struct kobject *kobj) { return kobj->name; } extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype); extern int __attribute__((warn_unused_result)) kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...); extern int __attribute__((warn_unused_result)) kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype, struct kobject *parent, const char *fmt, ...); extern void kobject_del(struct kobject *kobj); extern struct kobject * __attribute__((warn_unused_result)) kobject_create(void); extern struct kobject * __attribute__((warn_unused_result)) kobject_create_and_add(const char *name, struct kobject *parent); extern int __attribute__((warn_unused_result)) kobject_rename(struct kobject *, const char *new_name); extern int __attribute__((warn_unused_result)) kobject_move(struct kobject *, struct kobject *); extern struct kobject *kobject_get(struct kobject *kobj); extern void kobject_put(struct kobject *kobj); extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); struct kobj_type { void (*release)(struct kobject *kobj); struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; }; struct kobj_uevent_env { char *envp[32]; int envp_idx; char buf[2048]; int buflen; }; struct kset_uevent_ops { int (*filter)(struct kset *kset, struct kobject *kobj); const char *(*name)(struct kset *kset, struct kobject *kobj); int (*uevent)(struct kset *kset, struct kobject *kobj, struct kobj_uevent_env *env); }; struct kobj_attribute { struct attribute attr; ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr, char *buf); ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count); }; extern struct sysfs_ops kobj_sysfs_ops; # 151 "include/linux/kobject.h" struct kset { struct list_head list; spinlock_t list_lock; struct kobject kobj; struct kset_uevent_ops *uevent_ops; }; extern void kset_init(struct kset *kset); extern int __attribute__((warn_unused_result)) kset_register(struct kset *kset); extern void kset_unregister(struct kset *kset); extern struct kset * __attribute__((warn_unused_result)) kset_create_and_add(const char *name, struct kset_uevent_ops *u, struct kobject *parent_kobj); static inline __attribute__((always_inline)) struct kset *to_kset(struct kobject *kobj) { return kobj ? ({ const typeof( ((struct kset *)0)->kobj ) *__mptr = (kobj); (struct kset *)( (char *)__mptr - __builtin_offsetof(struct kset,kobj) );}) : ((void *)0); } static inline __attribute__((always_inline)) struct kset *kset_get(struct kset *k) { return k ? to_kset(kobject_get(&k->kobj)) : ((void *)0); } static inline __attribute__((always_inline)) void kset_put(struct kset *k) { kobject_put(&k->kobj); } static inline __attribute__((always_inline)) struct kobj_type *get_ktype(struct kobject *kobj) { return kobj->ktype; } extern struct kobject *kset_find_obj(struct kset *, const char *); extern struct kobject *kernel_kobj; extern struct kobject *mm_kobj; extern struct kobject *hypervisor_kobj; extern struct kobject *power_kobj; extern struct kobject *firmware_kobj; # 209 "include/linux/kobject.h" static inline __attribute__((always_inline)) int kobject_uevent(struct kobject *kobj, enum kobject_action action) { return 0; } static inline __attribute__((always_inline)) int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, char *envp[]) { return 0; } static inline __attribute__((always_inline)) int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) { return 0; } static inline __attribute__((always_inline)) int kobject_action_type(const char *buf, size_t count, enum kobject_action *type) { return -22; } # 13 "include/linux/slub_def.h" 2 enum stat_item { ALLOC_FASTPATH, ALLOC_SLOWPATH, FREE_FASTPATH, FREE_SLOWPATH, FREE_FROZEN, FREE_ADD_PARTIAL, FREE_REMOVE_PARTIAL, ALLOC_FROM_PARTIAL, ALLOC_SLAB, ALLOC_REFILL, FREE_SLAB, CPUSLAB_FLUSH, DEACTIVATE_FULL, DEACTIVATE_EMPTY, DEACTIVATE_TO_HEAD, DEACTIVATE_TO_TAIL, DEACTIVATE_REMOTE_FREES, ORDER_FALLBACK, NR_SLUB_STAT_ITEMS }; struct kmem_cache_cpu { void **freelist; struct page *page; int node; unsigned int offset; unsigned int objsize; }; struct kmem_cache_node { spinlock_t list_lock; unsigned long nr_partial; unsigned long min_partial; struct list_head partial; }; struct kmem_cache_order_objects { unsigned long x; }; struct kmem_cache { unsigned long flags; int size; int objsize; int offset; struct kmem_cache_order_objects oo; struct kmem_cache_node local_node; struct kmem_cache_order_objects max; struct kmem_cache_order_objects min; gfp_t allocflags; int refcount; void (*ctor)(void *); int inuse; int align; const char *name; struct list_head list; # 108 "include/linux/slub_def.h" struct kmem_cache_cpu cpu_slab; }; # 127 "include/linux/slub_def.h" extern struct kmem_cache kmalloc_caches[12 + 1]; static inline __attribute__((always_inline)) __attribute__((always_inline)) int kmalloc_index(size_t size) { if (!size) return 0; if (size <= 8) return ( __builtin_constant_p(8) ? ( (8) < 1 ? ____ilog2_NaN() : (8) & (1ULL << 63) ? 63 : (8) & (1ULL << 62) ? 62 : (8) & (1ULL << 61) ? 61 : (8) & (1ULL << 60) ? 60 : (8) & (1ULL << 59) ? 59 : (8) & (1ULL << 58) ? 58 : (8) & (1ULL << 57) ? 57 : (8) & (1ULL << 56) ? 56 : (8) & (1ULL << 55) ? 55 : (8) & (1ULL << 54) ? 54 : (8) & (1ULL << 53) ? 53 : (8) & (1ULL << 52) ? 52 : (8) & (1ULL << 51) ? 51 : (8) & (1ULL << 50) ? 50 : (8) & (1ULL << 49) ? 49 : (8) & (1ULL << 48) ? 48 : (8) & (1ULL << 47) ? 47 : (8) & (1ULL << 46) ? 46 : (8) & (1ULL << 45) ? 45 : (8) & (1ULL << 44) ? 44 : (8) & (1ULL << 43) ? 43 : (8) & (1ULL << 42) ? 42 : (8) & (1ULL << 41) ? 41 : (8) & (1ULL << 40) ? 40 : (8) & (1ULL << 39) ? 39 : (8) & (1ULL << 38) ? 38 : (8) & (1ULL << 37) ? 37 : (8) & (1ULL << 36) ? 36 : (8) & (1ULL << 35) ? 35 : (8) & (1ULL << 34) ? 34 : (8) & (1ULL << 33) ? 33 : (8) & (1ULL << 32) ? 32 : (8) & (1ULL << 31) ? 31 : (8) & (1ULL << 30) ? 30 : (8) & (1ULL << 29) ? 29 : (8) & (1ULL << 28) ? 28 : (8) & (1ULL << 27) ? 27 : (8) & (1ULL << 26) ? 26 : (8) & (1ULL << 25) ? 25 : (8) & (1ULL << 24) ? 24 : (8) & (1ULL << 23) ? 23 : (8) & (1ULL << 22) ? 22 : (8) & (1ULL << 21) ? 21 : (8) & (1ULL << 20) ? 20 : (8) & (1ULL << 19) ? 19 : (8) & (1ULL << 18) ? 18 : (8) & (1ULL << 17) ? 17 : (8) & (1ULL << 16) ? 16 : (8) & (1ULL << 15) ? 15 : (8) & (1ULL << 14) ? 14 : (8) & (1ULL << 13) ? 13 : (8) & (1ULL << 12) ? 12 : (8) & (1ULL << 11) ? 11 : (8) & (1ULL << 10) ? 10 : (8) & (1ULL << 9) ? 9 : (8) & (1ULL << 8) ? 8 : (8) & (1ULL << 7) ? 7 : (8) & (1ULL << 6) ? 6 : (8) & (1ULL << 5) ? 5 : (8) & (1ULL << 4) ? 4 : (8) & (1ULL << 3) ? 3 : (8) & (1ULL << 2) ? 2 : (8) & (1ULL << 1) ? 1 : (8) & (1ULL << 0) ? 0 : ____ilog2_NaN() ) : (sizeof(8) <= 4) ? __ilog2_u32(8) : __ilog2_u64(8) ); if (size > 64 && size <= 96) return 1; if (size > 128 && size <= 192) return 2; if (size <= 8) return 3; if (size <= 16) return 4; if (size <= 32) return 5; if (size <= 64) return 6; if (size <= 128) return 7; if (size <= 256) return 8; if (size <= 512) return 9; if (size <= 1024) return 10; if (size <= 2 * 1024) return 11; if (size <= 4 * 1024) return 12; if (size <= 8 * 1024) return 13; if (size <= 16 * 1024) return 14; if (size <= 32 * 1024) return 15; if (size <= 64 * 1024) return 16; if (size <= 128 * 1024) return 17; if (size <= 256 * 1024) return 18; if (size <= 512 * 1024) return 19; if (size <= 1024 * 1024) return 20; if (size <= 2 * 1024 * 1024) return 21; return -1; # 179 "include/linux/slub_def.h" } static inline __attribute__((always_inline)) __attribute__((always_inline)) struct kmem_cache *kmalloc_slab(size_t size) { int index = kmalloc_index(size); if (index == 0) return ((void *)0); return &kmalloc_caches[index]; } # 204 "include/linux/slub_def.h" void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); static inline __attribute__((always_inline)) __attribute__((always_inline)) void *kmalloc_large(size_t size, gfp_t flags) { return (void *)__get_free_pages(flags | (( gfp_t)0x4000u), get_order(size)); } static inline __attribute__((always_inline)) __attribute__((always_inline)) void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { if (size > (1UL << 12)) return kmalloc_large(size, flags); if (!(flags & (( gfp_t)0x01u))) { struct kmem_cache *s = kmalloc_slab(size); if (!s) return ((void *)16); return kmem_cache_alloc(s, flags); } } return __kmalloc(size, flags); } # 153 "include/linux/slab.h" 2 # 210 "include/linux/slab.h" static inline __attribute__((always_inline)) void *kcalloc(size_t n, size_t size, gfp_t flags) { if (size != 0 && n > (~0UL) / size) return ((void *)0); return __kmalloc(n * size, flags | (( gfp_t)0x8000u)); } # 228 "include/linux/slab.h" static inline __attribute__((always_inline)) void *kmalloc_node(size_t size, gfp_t flags, int node) { return kmalloc(size, flags); } static inline __attribute__((always_inline)) void *__kmalloc_node(size_t size, gfp_t flags, int node) { return __kmalloc(size, flags); } void *kmem_cache_alloc(struct kmem_cache *, gfp_t); static inline __attribute__((always_inline)) void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int node) { return kmem_cache_alloc(cachep, flags); } # 256 "include/linux/slab.h" extern void *__kmalloc_track_caller(size_t, gfp_t, void*); # 293 "include/linux/slab.h" static inline __attribute__((always_inline)) void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) { return kmem_cache_alloc(k, flags | (( gfp_t)0x8000u)); } static inline __attribute__((always_inline)) void *kzalloc(size_t size, gfp_t flags) { return kmalloc(size, flags | (( gfp_t)0x8000u)); } static inline __attribute__((always_inline)) void *kzalloc_node(size_t size, gfp_t flags, int node) { return kmalloc_node(size, flags | (( gfp_t)0x8000u), node); } # 6 "include/linux/percpu.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/percpu.h" 1 # 1 "include/asm-generic/percpu.h" 1 # 5 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/percpu.h" 2 # 10 "include/linux/percpu.h" 2 # 91 "include/linux/percpu.h" static inline __attribute__((always_inline)) __attribute__((always_inline)) void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) { return kzalloc(size, gfp); } static inline __attribute__((always_inline)) void percpu_free(void *__pdata) { kfree(__pdata); } # 40 "include/linux/rcupdate.h" 2 # 1 "include/linux/completion.h" 1 # 25 "include/linux/completion.h" struct completion { unsigned int done; wait_queue_head_t wait; }; # 73 "include/linux/completion.h" static inline __attribute__((always_inline)) void init_completion(struct completion *x) { x->done = 0; init_waitqueue_head(&x->wait); } extern void wait_for_completion(struct completion *); extern int wait_for_completion_interruptible(struct completion *x); extern int wait_for_completion_killable(struct completion *x); extern unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout); extern unsigned long wait_for_completion_interruptible_timeout( struct completion *x, unsigned long timeout); extern bool try_wait_for_completion(struct completion *x); extern bool completion_done(struct completion *x); extern void complete(struct completion *); extern void complete_all(struct completion *); # 44 "include/linux/rcupdate.h" 2 struct rcu_head { struct rcu_head *next; void (*func)(struct rcu_head *head); }; # 1 "include/linux/rcuclassic.h" 1 # 49 "include/linux/rcuclassic.h" struct rcu_ctrlblk { long cur; long completed; long pending; unsigned long gp_start; unsigned long jiffies_stall; int signaled; spinlock_t lock ; cpumask_t cpumask; } ; static inline __attribute__((always_inline)) int rcu_batch_before(long a, long b) { return (a - b) < 0; } static inline __attribute__((always_inline)) int rcu_batch_after(long a, long b) { return (a - b) > 0; } struct rcu_data { long quiescbatch; int passed_quiesc; int qs_pending; # 100 "include/linux/rcuclassic.h" long batch; struct rcu_head *nxtlist; struct rcu_head **nxttail[3]; long qlen; struct rcu_head *donelist; struct rcu_head **donetail; long blimit; int cpu; struct rcu_head barrier; }; extern __typeof__(struct rcu_data) per_cpu__rcu_data; extern __typeof__(struct rcu_data) per_cpu__rcu_bh_data; static inline __attribute__((always_inline)) void rcu_qsctr_inc(int cpu) { struct rcu_data *rdp = &(*((void)(cpu), &per_cpu__rcu_data)); rdp->passed_quiesc = 1; } static inline __attribute__((always_inline)) void rcu_bh_qsctr_inc(int cpu) { struct rcu_data *rdp = &(*((void)(cpu), &per_cpu__rcu_bh_data)); rdp->passed_quiesc = 1; } extern int rcu_pending(int cpu); extern int rcu_needs_cpu(int cpu); # 173 "include/linux/rcuclassic.h" extern void __rcu_init(void); extern void rcu_check_callbacks(int cpu, int user); extern void rcu_restart_cpu(int cpu); extern long rcu_batches_completed(void); extern long rcu_batches_completed_bh(void); # 57 "include/linux/rcupdate.h" 2 # 194 "include/linux/rcupdate.h" struct rcu_synchronize { struct rcu_head head; struct completion completion; }; extern void wakeme_after_rcu(struct rcu_head *head); # 242 "include/linux/rcupdate.h" extern void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *head)); # 263 "include/linux/rcupdate.h" extern void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *head)); extern void synchronize_rcu(void); extern void rcu_barrier(void); extern void rcu_barrier_bh(void); extern void rcu_barrier_sched(void); extern void rcu_init(void); extern int rcu_needs_cpu(int cpu); # 11 "include/linux/rculist.h" 2 static inline __attribute__((always_inline)) void __list_add_rcu(struct list_head *new, struct list_head *prev, struct list_head *next) { new->next = next; new->prev = prev; ({ if (!__builtin_constant_p(new) || ((new) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (prev->next) = (new); }); next->prev = new; } # 43 "include/linux/rculist.h" static inline __attribute__((always_inline)) void list_add_rcu(struct list_head *new, struct list_head *head) { __list_add_rcu(new, head, head->next); } # 64 "include/linux/rculist.h" static inline __attribute__((always_inline)) void list_add_tail_rcu(struct list_head *new, struct list_head *head) { __list_add_rcu(new, head->prev, head); } # 94 "include/linux/rculist.h" static inline __attribute__((always_inline)) void list_del_rcu(struct list_head *entry) { __list_del(entry->prev, entry->next); entry->prev = ((void *) 0x00200200); } # 120 "include/linux/rculist.h" static inline __attribute__((always_inline)) void hlist_del_init_rcu(struct hlist_node *n) { if (!hlist_unhashed(n)) { __hlist_del(n); n->pprev = ((void *)0); } } # 136 "include/linux/rculist.h" static inline __attribute__((always_inline)) void list_replace_rcu(struct list_head *old, struct list_head *new) { new->next = old->next; new->prev = old->prev; ({ if (!__builtin_constant_p(new) || ((new) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (new->prev->next) = (new); }); new->next->prev = new; old->prev = ((void *) 0x00200200); } # 163 "include/linux/rculist.h" static inline __attribute__((always_inline)) void list_splice_init_rcu(struct list_head *list, struct list_head *head, void (*sync)(void)) { struct list_head *first = list->next; struct list_head *last = list->prev; struct list_head *at = head->next; if (list_empty(head)) return; INIT_LIST_HEAD(list); # 185 "include/linux/rculist.h" sync(); # 195 "include/linux/rculist.h" last->next = at; ({ if (!__builtin_constant_p(first) || ((first) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (head->next) = (first); }); first->prev = head; at->prev = last; } # 257 "include/linux/rculist.h" static inline __attribute__((always_inline)) void hlist_del_rcu(struct hlist_node *n) { __hlist_del(n); n->pprev = ((void *) 0x00200200); } # 270 "include/linux/rculist.h" static inline __attribute__((always_inline)) void hlist_replace_rcu(struct hlist_node *old, struct hlist_node *new) { struct hlist_node *next = old->next; new->next = next; new->pprev = old->pprev; ({ if (!__builtin_constant_p(new) || ((new) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (*new->pprev) = (new); }); if (next) new->next->pprev = &new->next; old->pprev = ((void *) 0x00200200); } # 302 "include/linux/rculist.h" static inline __attribute__((always_inline)) void hlist_add_head_rcu(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *first = h->first; n->next = first; n->pprev = &h->first; ({ if (!__builtin_constant_p(n) || ((n) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (h->first) = (n); }); if (first) first->pprev = &n->next; } # 332 "include/linux/rculist.h" static inline __attribute__((always_inline)) void hlist_add_before_rcu(struct hlist_node *n, struct hlist_node *next) { n->pprev = next->pprev; n->next = next; ({ if (!__builtin_constant_p(n) || ((n) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (*(n->pprev)) = (n); }); next->pprev = &n->next; } # 359 "include/linux/rculist.h" static inline __attribute__((always_inline)) void hlist_add_after_rcu(struct hlist_node *prev, struct hlist_node *n) { n->next = prev->next; n->pprev = &prev->next; ({ if (!__builtin_constant_p(n) || ((n) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (prev->next) = (n); }); if (n->next) n->next->pprev = &n->next; } # 7 "include/linux/dcache.h" 2 struct nameidata; struct path; struct vfsmount; # 33 "include/linux/dcache.h" struct qstr { unsigned int hash; unsigned int len; const unsigned char *name; }; struct dentry_stat_t { int nr_dentry; int nr_unused; int age_limit; int want_pages; int dummy[2]; }; extern struct dentry_stat_t dentry_stat; static inline __attribute__((always_inline)) unsigned long partial_name_hash(unsigned long c, unsigned long prevhash) { return (prevhash + (c << 4) + (c >> 4)) * 11; } static inline __attribute__((always_inline)) unsigned long end_name_hash(unsigned long hash) { return (unsigned int) hash; } static inline __attribute__((always_inline)) unsigned int full_name_hash(const unsigned char *name, unsigned int len) { unsigned long hash = 0; while (len--) hash = partial_name_hash(*name++, hash); return end_name_hash(hash); } struct dcookie_struct; struct dentry { atomic_t d_count; unsigned int d_flags; spinlock_t d_lock; struct inode *d_inode; struct hlist_node d_hash; struct dentry *d_parent; struct qstr d_name; struct list_head d_lru; union { struct list_head d_child; struct rcu_head d_rcu; } d_u; struct list_head d_subdirs; struct list_head d_alias; unsigned long d_time; struct dentry_operations *d_op; struct super_block *d_sb; void *d_fsdata; struct dcookie_struct *d_cookie; int d_mounted; unsigned char d_iname[36]; }; enum dentry_d_lock_class { DENTRY_D_LOCK_NORMAL, DENTRY_D_LOCK_NESTED }; struct dentry_operations { int (*d_revalidate)(struct dentry *, struct nameidata *); int (*d_hash) (struct dentry *, struct qstr *); int (*d_compare) (struct dentry *, struct qstr *, struct qstr *); int (*d_delete)(struct dentry *); void (*d_release)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char *(*d_dname)(struct dentry *, char *, int); }; # 180 "include/linux/dcache.h" extern spinlock_t dcache_lock; extern seqlock_t rename_lock; # 199 "include/linux/dcache.h" static inline __attribute__((always_inline)) void __d_drop(struct dentry *dentry) { if (!(dentry->d_flags & 0x0010)) { dentry->d_flags |= 0x0010; hlist_del_rcu(&dentry->d_hash); } } static inline __attribute__((always_inline)) void d_drop(struct dentry *dentry) { _spin_lock(&dcache_lock); _spin_lock(&dentry->d_lock); __d_drop(dentry); _spin_unlock(&dentry->d_lock); _spin_unlock(&dcache_lock); } static inline __attribute__((always_inline)) int dname_external(struct dentry *dentry) { return dentry->d_name.name != dentry->d_iname; } extern void d_instantiate(struct dentry *, struct inode *); extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); extern struct dentry * d_materialise_unique(struct dentry *, struct inode *); extern void d_delete(struct dentry *); extern struct dentry * d_alloc(struct dentry *, const struct qstr *); extern struct dentry * d_splice_alias(struct inode *, struct dentry *); extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); extern struct dentry * d_obtain_alias(struct inode *); extern void shrink_dcache_sb(struct super_block *); extern void shrink_dcache_parent(struct dentry *); extern void shrink_dcache_for_umount(struct super_block *); extern int d_invalidate(struct dentry *); extern struct dentry * d_alloc_root(struct inode *); extern void d_genocide(struct dentry *); extern struct dentry *d_find_alias(struct inode *); extern void d_prune_aliases(struct inode *); extern int have_submounts(struct dentry *); extern void d_rehash(struct dentry *); # 265 "include/linux/dcache.h" static inline __attribute__((always_inline)) void d_add(struct dentry *entry, struct inode *inode) { d_instantiate(entry, inode); d_rehash(entry); } # 279 "include/linux/dcache.h" static inline __attribute__((always_inline)) struct dentry *d_add_unique(struct dentry *entry, struct inode *inode) { struct dentry *res; res = d_instantiate_unique(entry, inode); d_rehash(res != ((void *)0) ? res : entry); return res; } extern void d_move(struct dentry *, struct dentry *); extern struct dentry *d_ancestor(struct dentry *, struct dentry *); extern struct dentry * d_lookup(struct dentry *, struct qstr *); extern struct dentry * __d_lookup(struct dentry *, struct qstr *); extern struct dentry * d_hash_and_lookup(struct dentry *, struct qstr *); extern int d_validate(struct dentry *, struct dentry *); extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); extern char *__d_path(const struct path *path, struct path *root, char *, int); extern char *d_path(const struct path *, char *, int); extern char *dentry_path(struct dentry *, char *, int); # 324 "include/linux/dcache.h" static inline __attribute__((always_inline)) struct dentry *dget(struct dentry *dentry) { if (dentry) { do { if (__builtin_expect(!!(!((&dentry->d_count)->counter)), 0)) do { dump_bfin_trace_buffer(); printk("<0>" "BUG: failure at %s:%d/%s()!\n", "include/linux/dcache.h", 327, __func__); panic("BUG!"); } while (0); } while(0); atomic_inc(&dentry->d_count); } return dentry; } extern struct dentry * dget_locked(struct dentry *); # 342 "include/linux/dcache.h" static inline __attribute__((always_inline)) int d_unhashed(struct dentry *dentry) { return (dentry->d_flags & 0x0010); } static inline __attribute__((always_inline)) struct dentry *dget_parent(struct dentry *dentry) { struct dentry *ret; _spin_lock(&dentry->d_lock); ret = dget(dentry->d_parent); _spin_unlock(&dentry->d_lock); return ret; } extern void dput(struct dentry *); static inline __attribute__((always_inline)) int d_mountpoint(struct dentry *dentry) { return dentry->d_mounted; } extern struct vfsmount *lookup_mnt(struct vfsmount *, struct dentry *); extern struct dentry *lookup_create(struct nameidata *nd, int is_dir); extern int sysctl_vfs_cache_pressure; # 292 "include/linux/fs.h" 2 # 1 "include/linux/path.h" 1 struct dentry; struct vfsmount; struct path { struct vfsmount *mnt; struct dentry *dentry; }; extern void path_get(struct path *); extern void path_put(struct path *); # 293 "include/linux/fs.h" 2 # 1 "include/linux/stat.h" 1 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/stat.h" 1 struct stat { unsigned short st_dev; unsigned short __pad1; unsigned long st_ino; unsigned short st_mode; unsigned short st_nlink; unsigned short st_uid; unsigned short st_gid; unsigned short st_rdev; unsigned short __pad2; unsigned long st_size; unsigned long st_blksize; unsigned long st_blocks; unsigned long st_atime; unsigned long __unused1; unsigned long st_mtime; unsigned long __unused2; unsigned long st_ctime; unsigned long __unused3; unsigned long __unused4; unsigned long __unused5; }; struct stat64 { unsigned long long st_dev; unsigned char __pad1[4]; unsigned long __st_ino; unsigned int st_mode; unsigned int st_nlink; unsigned long st_uid; unsigned long st_gid; unsigned long long st_rdev; unsigned char __pad2[4]; long long st_size; unsigned long st_blksize; long long st_blocks; unsigned long st_atime; unsigned long st_atime_nsec; unsigned long st_mtime; unsigned long st_mtime_nsec; unsigned long st_ctime; unsigned long st_ctime_nsec; unsigned long long st_ino; }; # 7 "include/linux/stat.h" 2 # 62 "include/linux/stat.h" struct kstat { u64 ino; dev_t dev; umode_t mode; unsigned int nlink; uid_t uid; gid_t gid; dev_t rdev; loff_t size; struct timespec atime; struct timespec mtime; struct timespec ctime; unsigned long blksize; unsigned long long blocks; }; # 294 "include/linux/fs.h" 2 # 1 "include/linux/radix-tree.h" 1 # 41 "include/linux/radix-tree.h" static inline __attribute__((always_inline)) void *radix_tree_ptr_to_indirect(void *ptr) { return (void *)((unsigned long)ptr | 1); } static inline __attribute__((always_inline)) void *radix_tree_indirect_to_ptr(void *ptr) { return (void *)((unsigned long)ptr & ~1); } static inline __attribute__((always_inline)) int radix_tree_is_indirect_ptr(void *ptr) { return (int)((unsigned long)ptr & 1); } struct radix_tree_root { unsigned int height; gfp_t gfp_mask; struct radix_tree_node *rnode; }; # 137 "include/linux/radix-tree.h" static inline __attribute__((always_inline)) void *radix_tree_deref_slot(void **pslot) { void *ret = *pslot; if (__builtin_expect(!!(radix_tree_is_indirect_ptr(ret)), 0)) ret = ((void *)-1UL); return ret; } # 152 "include/linux/radix-tree.h" static inline __attribute__((always_inline)) void radix_tree_replace_slot(void **pslot, void *item) { do { if (__builtin_expect(!!(radix_tree_is_indirect_ptr(item)), 0)) do { dump_bfin_trace_buffer(); printk("<0>" "BUG: failure at %s:%d/%s()!\n", "include/linux/radix-tree.h", 154, __func__); panic("BUG!"); } while (0); } while(0); ({ if (!__builtin_constant_p(item) || ((item) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (*pslot) = (item); }); } int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); void *radix_tree_lookup(struct radix_tree_root *, unsigned long); void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); void *radix_tree_delete(struct radix_tree_root *, unsigned long); unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items); unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, unsigned long first_index, unsigned int max_items); unsigned long radix_tree_next_hole(struct radix_tree_root *root, unsigned long index, unsigned long max_scan); int radix_tree_preload(gfp_t gfp_mask); void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *root, unsigned long index, unsigned int tag); void *radix_tree_tag_clear(struct radix_tree_root *root, unsigned long index, unsigned int tag); int radix_tree_tag_get(struct radix_tree_root *root, unsigned long index, unsigned int tag); unsigned int radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items, unsigned int tag); unsigned int radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, unsigned long first_index, unsigned int max_items, unsigned int tag); int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); static inline __attribute__((always_inline)) void radix_tree_preload_end(void) { do { } while (0); } # 298 "include/linux/fs.h" 2 # 1 "include/linux/prio_tree.h" 1 # 14 "include/linux/prio_tree.h" struct raw_prio_tree_node { struct prio_tree_node *left; struct prio_tree_node *right; struct prio_tree_node *parent; }; struct prio_tree_node { struct prio_tree_node *left; struct prio_tree_node *right; struct prio_tree_node *parent; unsigned long start; unsigned long last; }; struct prio_tree_root { struct prio_tree_node *prio_tree_node; unsigned short index_bits; unsigned short raw; }; struct prio_tree_iter { struct prio_tree_node *cur; unsigned long mask; unsigned long value; int size_level; struct prio_tree_root *root; unsigned long r_index; unsigned long h_index; }; static inline __attribute__((always_inline)) void prio_tree_iter_init(struct prio_tree_iter *iter, struct prio_tree_root *root, unsigned long r_index, unsigned long h_index) { iter->root = root; iter->r_index = r_index; iter->h_index = h_index; iter->cur = ((void *)0); } # 84 "include/linux/prio_tree.h" static inline __attribute__((always_inline)) int prio_tree_empty(const struct prio_tree_root *root) { return root->prio_tree_node == ((void *)0); } static inline __attribute__((always_inline)) int prio_tree_root(const struct prio_tree_node *node) { return node->parent == node; } static inline __attribute__((always_inline)) int prio_tree_left_empty(const struct prio_tree_node *node) { return node->left == node; } static inline __attribute__((always_inline)) int prio_tree_right_empty(const struct prio_tree_node *node) { return node->right == node; } struct prio_tree_node *prio_tree_replace(struct prio_tree_root *root, struct prio_tree_node *old, struct prio_tree_node *node); struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root, struct prio_tree_node *node); void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node); struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter); # 299 "include/linux/fs.h" 2 # 1 "include/linux/pid.h" 1 enum pid_type { PIDTYPE_PID, PIDTYPE_PGID, PIDTYPE_SID, PIDTYPE_MAX }; # 50 "include/linux/pid.h" struct upid { int nr; struct pid_namespace *ns; struct hlist_node pid_chain; }; struct pid { atomic_t count; unsigned int level; struct hlist_head tasks[PIDTYPE_MAX]; struct rcu_head rcu; struct upid numbers[1]; }; extern struct pid init_struct_pid; struct pid_link { struct hlist_node node; struct pid *pid; }; static inline __attribute__((always_inline)) struct pid *get_pid(struct pid *pid) { if (pid) atomic_inc(&pid->count); return pid; } extern void put_pid(struct pid *pid); extern struct task_struct *pid_task(struct pid *pid, enum pid_type); extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type); extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type); extern void attach_pid(struct task_struct *task, enum pid_type type, struct pid *pid); extern void detach_pid(struct task_struct *task, enum pid_type); extern void change_pid(struct task_struct *task, enum pid_type, struct pid *pid); extern void transfer_pid(struct task_struct *old, struct task_struct *new, enum pid_type); struct pid_namespace; extern struct pid_namespace init_pid_ns; # 112 "include/linux/pid.h" extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns); extern struct pid *find_vpid(int nr); extern struct pid *find_get_pid(int nr); extern struct pid *find_ge_pid(int nr, struct pid_namespace *); int next_pidmap(struct pid_namespace *pid_ns, int last); extern struct pid *alloc_pid(struct pid_namespace *ns); extern void free_pid(struct pid *pid); # 136 "include/linux/pid.h" static inline __attribute__((always_inline)) pid_t pid_nr(struct pid *pid) { pid_t nr = 0; if (pid) nr = pid->numbers[0].nr; return nr; } pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns); pid_t pid_vnr(struct pid *pid); # 301 "include/linux/fs.h" 2 # 1 "include/linux/capability.h" 1 # 18 "include/linux/capability.h" struct task_struct; # 40 "include/linux/capability.h" typedef struct __user_cap_header_struct { __u32 version; int pid; } *cap_user_header_t; typedef struct __user_cap_data_struct { __u32 effective; __u32 permitted; __u32 inheritable; } *cap_user_data_t; # 72 "include/linux/capability.h" struct vfs_cap_data { __le32 magic_etc; struct { __le32 permitted; __le32 inheritable; } data[2]; }; # 95 "include/linux/capability.h" typedef struct kernel_cap_struct { __u32 cap[2]; } kernel_cap_t; # 416 "include/linux/capability.h" static inline __attribute__((always_inline)) kernel_cap_t cap_combine(const kernel_cap_t a, const kernel_cap_t b) { kernel_cap_t dest; do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] | b.cap[__capi]; } } while (0); return dest; } static inline __attribute__((always_inline)) kernel_cap_t cap_intersect(const kernel_cap_t a, const kernel_cap_t b) { kernel_cap_t dest; do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] & b.cap[__capi]; } } while (0); return dest; } static inline __attribute__((always_inline)) kernel_cap_t cap_drop(const kernel_cap_t a, const kernel_cap_t drop) { kernel_cap_t dest; do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] &~ drop.cap[__capi]; } } while (0); return dest; } static inline __attribute__((always_inline)) kernel_cap_t cap_invert(const kernel_cap_t c) { kernel_cap_t dest; do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = ~ c.cap[__capi]; } } while (0); return dest; } static inline __attribute__((always_inline)) int cap_isclear(const kernel_cap_t a) { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { if (a.cap[__capi] != 0) return 0; } return 1; } static inline __attribute__((always_inline)) int cap_issubset(const kernel_cap_t a, const kernel_cap_t set) { kernel_cap_t dest; dest = cap_drop(a, set); return cap_isclear(dest); } static inline __attribute__((always_inline)) int cap_is_fs_cap(int cap) { const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))), ((1 << ((32) & 31))) } }); return !!((1 << ((cap) & 31)) & __cap_fs_set.cap[((cap) >> 5)]); } static inline __attribute__((always_inline)) kernel_cap_t cap_drop_fs_set(const kernel_cap_t a) { const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))), ((1 << ((32) & 31))) } }); return cap_drop(a, __cap_fs_set); } static inline __attribute__((always_inline)) kernel_cap_t cap_raise_fs_set(const kernel_cap_t a, const kernel_cap_t permitted) { const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))), ((1 << ((32) & 31))) } }); return cap_combine(a, cap_intersect(permitted, __cap_fs_set)); } static inline __attribute__((always_inline)) kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a) { const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31)))|(1 << ((24) & 31)), ((1 << ((32) & 31))) } }); return cap_drop(a, __cap_fs_set); } static inline __attribute__((always_inline)) kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a, const kernel_cap_t permitted) { const kernel_cap_t __cap_nfsd_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31)))|(1 << ((24) & 31)), ((1 << ((32) & 31))) } }); return cap_combine(a, cap_intersect(permitted, __cap_nfsd_set)); } extern const kernel_cap_t __cap_empty_set; extern const kernel_cap_t __cap_full_set; extern const kernel_cap_t __cap_init_eff_set; kernel_cap_t cap_set_effective(const kernel_cap_t pE_new); # 518 "include/linux/capability.h" extern int capable(int cap); # 303 "include/linux/fs.h" 2 # 1 "include/linux/semaphore.h" 1 # 16 "include/linux/semaphore.h" struct semaphore { spinlock_t lock; unsigned int count; struct list_head wait_list; }; # 32 "include/linux/semaphore.h" static inline __attribute__((always_inline)) void sema_init(struct semaphore *sem, int val) { static struct lock_class_key __key; *sem = (struct semaphore) { .lock = (spinlock_t) { .raw_lock = { 1 }, .magic = 0xdead4ead, .owner = ((void *)-1L), .owner_cpu = -1, }, .count = val, .wait_list = { &((*sem).wait_list), &((*sem).wait_list) }, }; do { (void)("semaphore->lock"); (void)(&__key); } while (0); } extern void down(struct semaphore *sem); extern int __attribute__((warn_unused_result)) down_interruptible(struct semaphore *sem); extern int __attribute__((warn_unused_result)) down_killable(struct semaphore *sem); extern int __attribute__((warn_unused_result)) down_trylock(struct semaphore *sem); extern int __attribute__((warn_unused_result)) down_timeout(struct semaphore *sem, long jiffies); extern void up(struct semaphore *sem); # 304 "include/linux/fs.h" 2 # 1 "include/linux/fiemap.h" 1 # 14 "include/linux/fiemap.h" struct fiemap_extent { __u64 fe_logical; __u64 fe_physical; __u64 fe_length; __u64 fe_reserved64[2]; __u32 fe_flags; __u32 fe_reserved[3]; }; struct fiemap { __u64 fm_start; __u64 fm_length; __u32 fm_flags; __u32 fm_mapped_extents; __u32 fm_extent_count; __u32 fm_reserved; struct fiemap_extent fm_extents[0]; }; # 305 "include/linux/fs.h" 2 struct export_operations; struct hd_geometry; struct iovec; struct nameidata; struct kiocb; struct pipe_inode_info; struct poll_table_struct; struct kstatfs; struct vm_area_struct; struct vfsmount; extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) inode_init(void); extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) inode_init_early(void); extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) files_init(unsigned long); struct buffer_head; typedef int (get_block_t)(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, ssize_t bytes, void *private); # 361 "include/linux/fs.h" struct iattr { unsigned int ia_valid; umode_t ia_mode; uid_t ia_uid; gid_t ia_gid; loff_t ia_size; struct timespec ia_atime; struct timespec ia_mtime; struct timespec ia_ctime; struct file *ia_file; }; # 1 "include/linux/quota.h" 1 # 98 "include/linux/quota.h" struct if_dqblk { __u64 dqb_bhardlimit; __u64 dqb_bsoftlimit; __u64 dqb_curspace; __u64 dqb_ihardlimit; __u64 dqb_isoftlimit; __u64 dqb_curinodes; __u64 dqb_btime; __u64 dqb_itime; __u32 dqb_valid; }; # 119 "include/linux/quota.h" struct if_dqinfo { __u64 dqi_bgrace; __u64 dqi_igrace; __u32 dqi_flags; __u32 dqi_valid; }; # 141 "include/linux/quota.h" enum { QUOTA_NL_C_UNSPEC, QUOTA_NL_C_WARNING, __QUOTA_NL_C_MAX, }; enum { QUOTA_NL_A_UNSPEC, QUOTA_NL_A_QTYPE, QUOTA_NL_A_EXCESS_ID, QUOTA_NL_A_WARNING, QUOTA_NL_A_DEV_MAJOR, QUOTA_NL_A_DEV_MINOR, QUOTA_NL_A_CAUSED_ID, __QUOTA_NL_A_MAX, }; # 168 "include/linux/quota.h" # 1 "include/linux/dqblk_xfs.h" 1 # 50 "include/linux/dqblk_xfs.h" typedef struct fs_disk_quota { __s8 d_version; __s8 d_flags; __u16 d_fieldmask; __u32 d_id; __u64 d_blk_hardlimit; __u64 d_blk_softlimit; __u64 d_ino_hardlimit; __u64 d_ino_softlimit; __u64 d_bcount; __u64 d_icount; __s32 d_itimer; __s32 d_btimer; __u16 d_iwarns; __u16 d_bwarns; __s32 d_padding2; __u64 d_rtb_hardlimit; __u64 d_rtb_softlimit; __u64 d_rtbcount; __s32 d_rtbtimer; __u16 d_rtbwarns; __s16 d_padding3; char d_padding4[8]; } fs_disk_quota_t; # 137 "include/linux/dqblk_xfs.h" typedef struct fs_qfilestat { __u64 qfs_ino; __u64 qfs_nblks; __u32 qfs_nextents; } fs_qfilestat_t; typedef struct fs_quota_stat { __s8 qs_version; __u16 qs_flags; __s8 qs_pad; fs_qfilestat_t qs_uquota; fs_qfilestat_t qs_gquota; __u32 qs_incoredqs; __s32 qs_btimelimit; __s32 qs_itimelimit; __s32 qs_rtbtimelimit; __u16 qs_bwarnlimit; __u16 qs_iwarnlimit; } fs_quota_stat_t; # 169 "include/linux/quota.h" 2 # 1 "include/linux/dqblk_v1.h" 1 # 21 "include/linux/dqblk_v1.h" struct v1_mem_dqinfo { }; # 170 "include/linux/quota.h" 2 # 1 "include/linux/dqblk_v2.h" 1 # 20 "include/linux/dqblk_v2.h" struct v2_mem_dqinfo { unsigned int dqi_blocks; unsigned int dqi_free_blk; unsigned int dqi_free_entry; }; # 171 "include/linux/quota.h" 2 typedef __kernel_uid32_t qid_t; typedef __u64 qsize_t; extern spinlock_t dq_data_lock; # 189 "include/linux/quota.h" struct mem_dqblk { __u32 dqb_bhardlimit; __u32 dqb_bsoftlimit; qsize_t dqb_curspace; __u32 dqb_ihardlimit; __u32 dqb_isoftlimit; __u32 dqb_curinodes; time_t dqb_btime; time_t dqb_itime; }; struct quota_format_type; struct mem_dqinfo { struct quota_format_type *dqi_format; int dqi_fmt_id; struct list_head dqi_dirty_list; unsigned long dqi_flags; unsigned int dqi_bgrace; unsigned int dqi_igrace; qsize_t dqi_maxblimit; qsize_t dqi_maxilimit; union { struct v1_mem_dqinfo v1_i; struct v2_mem_dqinfo v2_i; } u; }; struct super_block; extern void mark_info_dirty(struct super_block *sb, int type); static inline __attribute__((always_inline)) int info_dirty(struct mem_dqinfo *info) { return test_bit(16, &info->dqi_flags); } struct dqstats { int lookups; int drops; int reads; int writes; int cache_hits; int allocated_dquots; int free_dquots; int syncs; }; extern struct dqstats dqstats; # 253 "include/linux/quota.h" struct dquot { struct hlist_node dq_hash; struct list_head dq_inuse; struct list_head dq_free; struct list_head dq_dirty; struct mutex dq_lock; atomic_t dq_count; wait_queue_head_t dq_wait_unused; struct super_block *dq_sb; unsigned int dq_id; loff_t dq_off; unsigned long dq_flags; short dq_type; struct mem_dqblk dq_dqb; }; struct quota_format_ops { int (*check_quota_file)(struct super_block *sb, int type); int (*read_file_info)(struct super_block *sb, int type); int (*write_file_info)(struct super_block *sb, int type); int (*free_file_info)(struct super_block *sb, int type); int (*read_dqblk)(struct dquot *dquot); int (*commit_dqblk)(struct dquot *dquot); int (*release_dqblk)(struct dquot *dquot); }; struct dquot_operations { int (*initialize) (struct inode *, int); int (*drop) (struct inode *); int (*alloc_space) (struct inode *, qsize_t, int); int (*alloc_inode) (const struct inode *, unsigned long); int (*free_space) (struct inode *, qsize_t); int (*free_inode) (const struct inode *, unsigned long); int (*transfer) (struct inode *, struct iattr *); int (*write_dquot) (struct dquot *); int (*acquire_dquot) (struct dquot *); int (*release_dquot) (struct dquot *); int (*mark_dirty) (struct dquot *); int (*write_info) (struct super_block *, int); }; struct quotactl_ops { int (*quota_on)(struct super_block *, int, int, char *, int); int (*quota_off)(struct super_block *, int, int); int (*quota_sync)(struct super_block *, int); int (*get_info)(struct super_block *, int, struct if_dqinfo *); int (*set_info)(struct super_block *, int, struct if_dqinfo *); int (*get_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *); int (*set_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *); int (*get_xstate)(struct super_block *, struct fs_quota_stat *); int (*set_xstate)(struct super_block *, unsigned int, int); int (*get_xquota)(struct super_block *, int, qid_t, struct fs_disk_quota *); int (*set_xquota)(struct super_block *, int, qid_t, struct fs_disk_quota *); }; struct quota_format_type { int qf_fmt_id; struct quota_format_ops *qf_ops; struct module *qf_owner; struct quota_format_type *qf_next; }; # 330 "include/linux/quota.h" struct quota_info { unsigned int flags; struct mutex dqio_mutex; struct mutex dqonoff_mutex; struct rw_semaphore dqptr_sem; struct inode *files[2]; struct mem_dqinfo info[2]; struct quota_format_ops *ops[2]; }; int register_quota_format(struct quota_format_type *fmt); void unregister_quota_format(struct quota_format_type *fmt); struct quota_module_name { int qm_fmt_id; char *qm_mod_name; }; # 383 "include/linux/fs.h" 2 # 410 "include/linux/fs.h" enum positive_aop_returns { AOP_WRITEPAGE_ACTIVATE = 0x80000, AOP_TRUNCATED_PAGE = 0x80001, }; struct page; struct address_space; struct writeback_control; struct iov_iter { const struct iovec *iov; unsigned long nr_segs; size_t iov_offset; size_t count; }; size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes); size_t iov_iter_copy_from_user(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes); void iov_iter_advance(struct iov_iter *i, size_t bytes); int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); size_t iov_iter_single_seg_count(struct iov_iter *i); static inline __attribute__((always_inline)) void iov_iter_init(struct iov_iter *i, const struct iovec *iov, unsigned long nr_segs, size_t count, size_t written) { i->iov = iov; i->nr_segs = nr_segs; i->iov_offset = 0; i->count = count + written; iov_iter_advance(i, written); } static inline __attribute__((always_inline)) size_t iov_iter_count(struct iov_iter *i) { return i->count; } # 466 "include/linux/fs.h" typedef struct { size_t written; size_t count; union { char *buf; void *data; } arg; int error; } read_descriptor_t; typedef int (*read_actor_t)(read_descriptor_t *, struct page *, unsigned long, unsigned long); struct address_space_operations { int (*writepage)(struct page *page, struct writeback_control *wbc); int (*readpage)(struct file *, struct page *); void (*sync_page)(struct page *); int (*writepages)(struct address_space *, struct writeback_control *); int (*set_page_dirty)(struct page *page); int (*readpages)(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages); int (*write_begin)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); int (*write_end)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); sector_t (*bmap)(struct address_space *, sector_t); void (*invalidatepage) (struct page *, unsigned long); int (*releasepage) (struct page *, gfp_t); ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, loff_t offset, unsigned long nr_segs); int (*get_xip_mem)(struct address_space *, unsigned long, int, void **, unsigned long *); int (*migratepage) (struct address_space *, struct page *, struct page *); int (*launder_page) (struct page *); int (*is_partially_uptodate) (struct page *, read_descriptor_t *, unsigned long); }; int pagecache_write_begin(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); int pagecache_write_end(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); struct backing_dev_info; struct address_space { struct inode *host; struct radix_tree_root page_tree; spinlock_t tree_lock; unsigned int i_mmap_writable; struct prio_tree_root i_mmap; struct list_head i_mmap_nonlinear; spinlock_t i_mmap_lock; unsigned int truncate_count; unsigned long nrpages; unsigned long writeback_index; const struct address_space_operations *a_ops; unsigned long flags; struct backing_dev_info *backing_dev_info; spinlock_t private_lock; struct list_head private_list; struct address_space *assoc_mapping; } __attribute__((aligned(sizeof(long)))); struct block_device { dev_t bd_dev; struct inode * bd_inode; int bd_openers; struct mutex bd_mutex; struct semaphore bd_mount_sem; struct list_head bd_inodes; void * bd_holder; int bd_holders; struct block_device * bd_contains; unsigned bd_block_size; struct hd_struct * bd_part; unsigned bd_part_count; int bd_invalidated; struct gendisk * bd_disk; struct list_head bd_list; struct backing_dev_info *bd_inode_backing_dev_info; unsigned long bd_private; }; # 590 "include/linux/fs.h" int mapping_tagged(struct address_space *mapping, int tag); static inline __attribute__((always_inline)) int mapping_mapped(struct address_space *mapping) { return !prio_tree_empty(&mapping->i_mmap) || !list_empty(&mapping->i_mmap_nonlinear); } static inline __attribute__((always_inline)) int mapping_writably_mapped(struct address_space *mapping) { return mapping->i_mmap_writable != 0; } # 623 "include/linux/fs.h" struct inode { struct hlist_node i_hash; struct list_head i_list; struct list_head i_sb_list; struct list_head i_dentry; unsigned long i_ino; atomic_t i_count; unsigned int i_nlink; uid_t i_uid; gid_t i_gid; dev_t i_rdev; u64 i_version; loff_t i_size; struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; unsigned int i_blkbits; blkcnt_t i_blocks; unsigned short i_bytes; umode_t i_mode; spinlock_t i_lock; struct mutex i_mutex; struct rw_semaphore i_alloc_sem; const struct inode_operations *i_op; const struct file_operations *i_fop; struct super_block *i_sb; struct file_lock *i_flock; struct address_space *i_mapping; struct address_space i_data; struct dquot *i_dquot[2]; struct list_head i_devices; union { struct pipe_inode_info *i_pipe; struct block_device *i_bdev; struct cdev *i_cdev; }; int i_cindex; __u32 i_generation; unsigned long i_dnotify_mask; struct dnotify_struct *i_dnotify; struct list_head inotify_watches; struct mutex inotify_mutex; unsigned long i_state; unsigned long dirtied_when; unsigned int i_flags; atomic_t i_writecount; void *i_private; }; # 701 "include/linux/fs.h" enum inode_i_mutex_lock_class { I_MUTEX_NORMAL, I_MUTEX_PARENT, I_MUTEX_CHILD, I_MUTEX_XATTR, I_MUTEX_QUOTA }; extern void inode_double_lock(struct inode *inode1, struct inode *inode2); extern void inode_double_unlock(struct inode *inode1, struct inode *inode2); # 723 "include/linux/fs.h" static inline __attribute__((always_inline)) loff_t i_size_read(const struct inode *inode) { # 742 "include/linux/fs.h" return inode->i_size; } static inline __attribute__((always_inline)) void i_size_write(struct inode *inode, loff_t i_size) { # 762 "include/linux/fs.h" inode->i_size = i_size; } static inline __attribute__((always_inline)) unsigned iminor(const struct inode *inode) { return ((unsigned int) ((inode->i_rdev) & ((1U << 20) - 1))); } static inline __attribute__((always_inline)) unsigned imajor(const struct inode *inode) { return ((unsigned int) ((inode->i_rdev) >> 20)); } extern struct block_device *I_BDEV(struct inode *inode); struct fown_struct { rwlock_t lock; struct pid *pid; enum pid_type pid_type; uid_t uid, euid; int signum; }; struct file_ra_state { unsigned long start; unsigned int size; unsigned int async_size; unsigned int ra_pages; int mmap_miss; loff_t prev_pos; }; static inline __attribute__((always_inline)) int ra_has_index(struct file_ra_state *ra, unsigned long index) { return (index >= ra->start && index < ra->start + ra->size); } struct file { union { struct list_head fu_list; struct rcu_head fu_rcuhead; } f_u; struct path f_path; const struct file_operations *f_op; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; loff_t f_pos; struct fown_struct f_owner; unsigned int f_uid, f_gid; struct file_ra_state f_ra; u64 f_version; void *private_data; struct list_head f_ep_links; spinlock_t f_ep_lock; struct address_space *f_mapping; unsigned long f_mnt_write_state; }; extern spinlock_t files_lock; static inline __attribute__((always_inline)) void file_take_write(struct file *f) { ({ int __ret_warn_on = !!(f->f_mnt_write_state != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("include/linux/fs.h", 860); __builtin_expect(!!(__ret_warn_on), 0); }); f->f_mnt_write_state = 1; } static inline __attribute__((always_inline)) void file_release_write(struct file *f) { f->f_mnt_write_state |= 2; } static inline __attribute__((always_inline)) void file_reset_write(struct file *f) { f->f_mnt_write_state = 0; } static inline __attribute__((always_inline)) void file_check_state(struct file *f) { ({ int __ret_warn_on = !!(f->f_mnt_write_state == 1); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("include/linux/fs.h", 877); __builtin_expect(!!(__ret_warn_on), 0); }); ({ int __ret_warn_on = !!(f->f_mnt_write_state == 2); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("include/linux/fs.h", 878); __builtin_expect(!!(__ret_warn_on), 0); }); } static inline __attribute__((always_inline)) int file_check_writeable(struct file *f) { if (f->f_mnt_write_state == 1) return 0; printk("<4>" "writeable file with no " "mnt_want_write()\n"); ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("include/linux/fs.h", 886); __builtin_expect(!!(__ret_warn_on), 0); }); return -22; } # 931 "include/linux/fs.h" typedef struct files_struct *fl_owner_t; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); }; struct lock_manager_operations { int (*fl_compare_owner)(struct file_lock *, struct file_lock *); void (*fl_notify)(struct file_lock *); int (*fl_grant)(struct file_lock *, struct file_lock *, int); void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); void (*fl_break)(struct file_lock *); int (*fl_mylease)(struct file_lock *, struct file_lock *); int (*fl_change)(struct file_lock **, int); }; struct lock_manager { struct list_head list; }; void locks_start_grace(struct lock_manager *); void locks_end_grace(struct lock_manager *); int locks_in_grace(void); # 1 "include/linux/nfs_fs_i.h" 1 # 1 "include/linux/nfs.h" 1 # 39 "include/linux/nfs.h" enum nfs_stat { NFS_OK = 0, NFSERR_PERM = 1, NFSERR_NOENT = 2, NFSERR_IO = 5, NFSERR_NXIO = 6, NFSERR_EAGAIN = 11, NFSERR_ACCES = 13, NFSERR_EXIST = 17, NFSERR_XDEV = 18, NFSERR_NODEV = 19, NFSERR_NOTDIR = 20, NFSERR_ISDIR = 21, NFSERR_INVAL = 22, NFSERR_FBIG = 27, NFSERR_NOSPC = 28, NFSERR_ROFS = 30, NFSERR_MLINK = 31, NFSERR_OPNOTSUPP = 45, NFSERR_NAMETOOLONG = 63, NFSERR_NOTEMPTY = 66, NFSERR_DQUOT = 69, NFSERR_STALE = 70, NFSERR_REMOTE = 71, NFSERR_WFLUSH = 99, NFSERR_BADHANDLE = 10001, NFSERR_NOT_SYNC = 10002, NFSERR_BAD_COOKIE = 10003, NFSERR_NOTSUPP = 10004, NFSERR_TOOSMALL = 10005, NFSERR_SERVERFAULT = 10006, NFSERR_BADTYPE = 10007, NFSERR_JUKEBOX = 10008, NFSERR_SAME = 10009, NFSERR_DENIED = 10010, NFSERR_EXPIRED = 10011, NFSERR_LOCKED = 10012, NFSERR_GRACE = 10013, NFSERR_FHEXPIRED = 10014, NFSERR_SHARE_DENIED = 10015, NFSERR_WRONGSEC = 10016, NFSERR_CLID_INUSE = 10017, NFSERR_RESOURCE = 10018, NFSERR_MOVED = 10019, NFSERR_NOFILEHANDLE = 10020, NFSERR_MINOR_VERS_MISMATCH = 10021, NFSERR_STALE_CLIENTID = 10022, NFSERR_STALE_STATEID = 10023, NFSERR_OLD_STATEID = 10024, NFSERR_BAD_STATEID = 10025, NFSERR_BAD_SEQID = 10026, NFSERR_NOT_SAME = 10027, NFSERR_LOCK_RANGE = 10028, NFSERR_SYMLINK = 10029, NFSERR_RESTOREFH = 10030, NFSERR_LEASE_MOVED = 10031, NFSERR_ATTRNOTSUPP = 10032, NFSERR_NO_GRACE = 10033, NFSERR_RECLAIM_BAD = 10034, NFSERR_RECLAIM_CONFLICT = 10035, NFSERR_BAD_XDR = 10036, NFSERR_LOCKS_HELD = 10037, NFSERR_OPENMODE = 10038, NFSERR_BADOWNER = 10039, NFSERR_BADCHAR = 10040, NFSERR_BADNAME = 10041, NFSERR_BAD_RANGE = 10042, NFSERR_LOCK_NOTSUPP = 10043, NFSERR_OP_ILLEGAL = 10044, NFSERR_DEADLOCK = 10045, NFSERR_FILE_OPEN = 10046, NFSERR_ADMIN_REVOKED = 10047, NFSERR_CB_PATH_DOWN = 10048, NFSERR_REPLAY_ME = 10049 }; enum nfs_ftype { NFNON = 0, NFREG = 1, NFDIR = 2, NFBLK = 3, NFCHR = 4, NFLNK = 5, NFSOCK = 6, NFBAD = 7, NFFIFO = 8 }; # 1 "include/linux/sunrpc/msg_prot.h" 1 # 18 "include/linux/sunrpc/msg_prot.h" typedef u32 rpc_authflavor_t; enum rpc_auth_flavors { RPC_AUTH_NULL = 0, RPC_AUTH_UNIX = 1, RPC_AUTH_SHORT = 2, RPC_AUTH_DES = 3, RPC_AUTH_KRB = 4, RPC_AUTH_GSS = 6, RPC_AUTH_MAXFLAVOR = 8, RPC_AUTH_GSS_KRB5 = 390003, RPC_AUTH_GSS_KRB5I = 390004, RPC_AUTH_GSS_KRB5P = 390005, RPC_AUTH_GSS_LKEY = 390006, RPC_AUTH_GSS_LKEYI = 390007, RPC_AUTH_GSS_LKEYP = 390008, RPC_AUTH_GSS_SPKM = 390009, RPC_AUTH_GSS_SPKMI = 390010, RPC_AUTH_GSS_SPKMP = 390011, }; enum rpc_msg_type { RPC_CALL = 0, RPC_REPLY = 1 }; enum rpc_reply_stat { RPC_MSG_ACCEPTED = 0, RPC_MSG_DENIED = 1 }; enum rpc_accept_stat { RPC_SUCCESS = 0, RPC_PROG_UNAVAIL = 1, RPC_PROG_MISMATCH = 2, RPC_PROC_UNAVAIL = 3, RPC_GARBAGE_ARGS = 4, RPC_SYSTEM_ERR = 5, RPC_DROP_REPLY = 60000, }; enum rpc_reject_stat { RPC_MISMATCH = 0, RPC_AUTH_ERROR = 1 }; enum rpc_auth_stat { RPC_AUTH_OK = 0, RPC_AUTH_BADCRED = 1, RPC_AUTH_REJECTEDCRED = 2, RPC_AUTH_BADVERF = 3, RPC_AUTH_REJECTEDVERF = 4, RPC_AUTH_TOOWEAK = 5, RPCSEC_GSS_CREDPROBLEM = 13, RPCSEC_GSS_CTXPROBLEM = 14 }; # 102 "include/linux/sunrpc/msg_prot.h" typedef __be32 rpc_fraghdr; # 131 "include/linux/nfs.h" 2 struct nfs_fh { unsigned short size; unsigned char data[128]; }; static inline __attribute__((always_inline)) int nfs_compare_fh(const struct nfs_fh *a, const struct nfs_fh *b) { return a->size != b->size || memcmp(a->data, b->data, a->size) != 0; } static inline __attribute__((always_inline)) void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *source) { target->size = source->size; memcpy(target->data, source->data, source->size); } # 165 "include/linux/nfs.h" enum nfs3_stable_how { NFS_UNSTABLE = 0, NFS_DATA_SYNC = 1, NFS_FILE_SYNC = 2 }; # 7 "include/linux/nfs_fs_i.h" 2 struct nlm_lockowner; struct nfs_lock_info { u32 state; struct nlm_lockowner *owner; struct list_head list; }; struct nfs4_lock_state; struct nfs4_lock_info { struct nfs4_lock_state *owner; }; # 959 "include/linux/fs.h" 2 struct file_lock { struct file_lock *fl_next; struct list_head fl_link; struct list_head fl_block; fl_owner_t fl_owner; unsigned char fl_flags; unsigned char fl_type; unsigned int fl_pid; struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; loff_t fl_start; loff_t fl_end; struct fasync_struct * fl_fasync; unsigned long fl_break_time; struct file_lock_operations *fl_ops; struct lock_manager_operations *fl_lmops; union { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct { struct list_head link; int state; } afs; } fl_u; }; # 996 "include/linux/fs.h" # 1 "include/linux/fcntl.h" 1 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/fcntl.h" 1 # 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/fcntl.h" # 1 "include/asm-generic/fcntl.h" 1 # 117 "include/asm-generic/fcntl.h" struct flock { short l_type; short l_whence; off_t l_start; off_t l_len; pid_t l_pid; }; # 140 "include/asm-generic/fcntl.h" struct flock64 { short l_type; short l_whence; loff_t l_start; loff_t l_len; pid_t l_pid; }; # 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/fcntl.h" 2 # 5 "include/linux/fcntl.h" 2 # 997 "include/linux/fs.h" 2 extern void send_sigio(struct fown_struct *fown, int fd, int band); extern int do_sync_mapping_range(struct address_space *mapping, loff_t offset, loff_t endbyte, unsigned int flags); extern int fcntl_getlk(struct file *, struct flock *); extern int fcntl_setlk(unsigned int, struct file *, unsigned int, struct flock *); extern int fcntl_getlk64(struct file *, struct flock64 *); extern int fcntl_setlk64(unsigned int, struct file *, unsigned int, struct flock64 *); extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); extern int fcntl_getlease(struct file *filp); extern void locks_init_lock(struct file_lock *); extern void locks_copy_lock(struct file_lock *, struct file_lock *); extern void __locks_copy_lock(struct file_lock *, const struct file_lock *); extern void locks_remove_posix(struct file *, fl_owner_t); extern void locks_remove_flock(struct file *); extern void posix_test_lock(struct file *, struct file_lock *); extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); extern int posix_lock_file_wait(struct file *, struct file_lock *); extern int posix_unblock_lock(struct file *, struct file_lock *); extern int vfs_test_lock(struct file *, struct file_lock *); extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); extern int __break_lease(struct inode *inode, unsigned int flags); extern void lease_get_mtime(struct inode *, struct timespec *time); extern int generic_setlease(struct file *, long, struct file_lock **); extern int vfs_setlease(struct file *, long, struct file_lock **); extern int lease_modify(struct file_lock **, int); extern int lock_may_read(struct inode *, loff_t start, unsigned long count); extern int lock_may_write(struct inode *, loff_t start, unsigned long count); # 1071 "include/linux/fs.h" struct fasync_struct { int magic; int fa_fd; struct fasync_struct *fa_next; struct file *fa_file; }; extern int fasync_helper(int, struct file *, int, struct fasync_struct **); extern void kill_fasync(struct fasync_struct **, int, int); extern void __kill_fasync(struct fasync_struct *, int, int); extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force); extern int f_setown(struct file *filp, unsigned long arg, int force); extern void f_delown(struct file *filp); extern pid_t f_getown(struct file *filp); extern int send_sigurg(struct fown_struct *fown); # 1101 "include/linux/fs.h" extern struct list_head super_blocks; extern spinlock_t sb_lock; struct super_block { struct list_head s_list; dev_t s_dev; unsigned long s_blocksize; unsigned char s_blocksize_bits; unsigned char s_dirt; unsigned long long s_maxbytes; struct file_system_type *s_type; const struct super_operations *s_op; struct dquot_operations *dq_op; struct quotactl_ops *s_qcop; const struct export_operations *s_export_op; unsigned long s_flags; unsigned long s_magic; struct dentry *s_root; struct rw_semaphore s_umount; struct mutex s_lock; int s_count; int s_syncing; int s_need_sync_fs; atomic_t s_active; struct xattr_handler **s_xattr; struct list_head s_inodes; struct list_head s_dirty; struct list_head s_io; struct list_head s_more_io; struct hlist_head s_anon; struct list_head s_files; struct list_head s_dentry_lru; int s_nr_dentry_unused; struct block_device *s_bdev; struct mtd_info *s_mtd; struct list_head s_instances; struct quota_info s_dquot; int s_frozen; wait_queue_head_t s_wait_unfrozen; char s_id[32]; void *s_fs_info; fmode_t s_mode; struct mutex s_vfs_rename_mutex; u32 s_time_gran; char *s_subtype; char *s_options; }; extern struct timespec current_fs_time(struct super_block *sb); enum { SB_UNFROZEN = 0, SB_FREEZE_WRITE = 1, SB_FREEZE_TRANS = 2, }; # 1200 "include/linux/fs.h" extern void lock_super(struct super_block *); extern void unlock_super(struct super_block *); extern int vfs_permission(struct nameidata *, int); extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *); extern int vfs_mkdir(struct inode *, struct dentry *, int); extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t); extern int vfs_symlink(struct inode *, struct dentry *, const char *); extern int vfs_link(struct dentry *, struct inode *, struct dentry *); extern int vfs_rmdir(struct inode *, struct dentry *); extern int vfs_unlink(struct inode *, struct dentry *); extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); extern void dentry_unhash(struct dentry *dentry); extern int file_permission(struct file *, int); struct fiemap_extent_info { unsigned int fi_flags; unsigned int fi_extents_mapped; unsigned int fi_extents_max; struct fiemap_extent *fi_extents_start; }; int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, u64 phys, u64 len, u32 flags); int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); # 1259 "include/linux/fs.h" int generic_osync_inode(struct inode *, struct address_space *, int); typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned); struct block_device_operations; # 1281 "include/linux/fs.h" struct file_operations { struct module *owner; loff_t (*llseek) (struct file *, loff_t, int); ssize_t (*read) (struct file *, char *, size_t, loff_t *); ssize_t (*write) (struct file *, const char *, size_t, loff_t *); ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t); ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t); int (*readdir) (struct file *, void *, filldir_t); unsigned int (*poll) (struct file *, struct poll_table_struct *); int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); int (*mmap) (struct file *, struct vm_area_struct *); int (*open) (struct inode *, struct file *); int (*flush) (struct file *, fl_owner_t id); int (*release) (struct inode *, struct file *); int (*fsync) (struct file *, struct dentry *, int datasync); int (*aio_fsync) (struct kiocb *, int datasync); int (*fasync) (int, struct file *, int); int (*lock) (struct file *, int, struct file_lock *); ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*dir_notify)(struct file *filp, unsigned long arg); int (*flock) (struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); int (*setlease)(struct file *, long, struct file_lock **); }; struct inode_operations { int (*create) (struct inode *,struct dentry *,int, struct nameidata *); struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *); int (*link) (struct dentry *,struct inode *,struct dentry *); int (*unlink) (struct inode *,struct dentry *); int (*symlink) (struct inode *,struct dentry *,const char *); int (*mkdir) (struct inode *,struct dentry *,int); int (*rmdir) (struct inode *,struct dentry *); int (*mknod) (struct inode *,struct dentry *,int,dev_t); int (*rename) (struct inode *, struct dentry *, struct inode *, struct dentry *); int (*readlink) (struct dentry *, char *,int); void * (*follow_link) (struct dentry *, struct nameidata *); void (*put_link) (struct dentry *, struct nameidata *, void *); void (*truncate) (struct inode *); int (*permission) (struct inode *, int); int (*setattr) (struct dentry *, struct iattr *); int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*removexattr) (struct dentry *, const char *); void (*truncate_range)(struct inode *, loff_t, loff_t); long (*fallocate)(struct inode *inode, int mode, loff_t offset, loff_t len); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); }; struct seq_file; ssize_t rw_copy_check_uvector(int type, const struct iovec * uvector, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_pointer, struct iovec **ret_pointer); extern ssize_t vfs_read(struct file *, char *, size_t, loff_t *); extern ssize_t vfs_write(struct file *, const char *, size_t, loff_t *); extern ssize_t vfs_readv(struct file *, const struct iovec *, unsigned long, loff_t *); extern ssize_t vfs_writev(struct file *, const struct iovec *, unsigned long, loff_t *); struct super_operations { struct inode *(*alloc_inode)(struct super_block *sb); void (*destroy_inode)(struct inode *); void (*dirty_inode) (struct inode *); int (*write_inode) (struct inode *, int); void (*drop_inode) (struct inode *); void (*delete_inode) (struct inode *); void (*put_super) (struct super_block *); void (*write_super) (struct super_block *); int (*sync_fs)(struct super_block *sb, int wait); void (*write_super_lockfs) (struct super_block *); void (*unlockfs) (struct super_block *); int (*statfs) (struct dentry *, struct kstatfs *); int (*remount_fs) (struct super_block *, int *, char *); void (*clear_inode) (struct inode *); void (*umount_begin) (struct super_block *); int (*show_options)(struct seq_file *, struct vfsmount *); int (*show_stats)(struct seq_file *, struct vfsmount *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); }; # 1447 "include/linux/fs.h" extern void __mark_inode_dirty(struct inode *, int); static inline __attribute__((always_inline)) void mark_inode_dirty(struct inode *inode) { __mark_inode_dirty(inode, (1 | 2 | 4)); } static inline __attribute__((always_inline)) void mark_inode_dirty_sync(struct inode *inode) { __mark_inode_dirty(inode, 1); } # 1466 "include/linux/fs.h" static inline __attribute__((always_inline)) void inc_nlink(struct inode *inode) { inode->i_nlink++; } static inline __attribute__((always_inline)) void inode_inc_link_count(struct inode *inode) { inc_nlink(inode); mark_inode_dirty(inode); } # 1488 "include/linux/fs.h" static inline __attribute__((always_inline)) void drop_nlink(struct inode *inode) { inode->i_nlink--; } # 1501 "include/linux/fs.h" static inline __attribute__((always_inline)) void clear_nlink(struct inode *inode) { inode->i_nlink = 0; } static inline __attribute__((always_inline)) void inode_dec_link_count(struct inode *inode) { drop_nlink(inode); mark_inode_dirty(inode); } # 1520 "include/linux/fs.h" static inline __attribute__((always_inline)) void inode_inc_iversion(struct inode *inode) { _spin_lock(&inode->i_lock); inode->i_version++; _spin_unlock(&inode->i_lock); } extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry); static inline __attribute__((always_inline)) void file_accessed(struct file *file) { if (!(file->f_flags & 01000000)) touch_atime(file->f_path.mnt, file->f_path.dentry); } int sync_inode(struct inode *inode, struct writeback_control *wbc); struct file_system_type { const char *name; int fs_flags; int (*get_sb) (struct file_system_type *, int, const char *, void *, struct vfsmount *); void (*kill_sb) (struct super_block *); struct module *owner; struct file_system_type * next; struct list_head fs_supers; struct lock_class_key s_lock_key; struct lock_class_key s_umount_key; struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; struct lock_class_key i_alloc_sem_key; }; extern int get_sb_bdev(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int), struct vfsmount *mnt); extern int get_sb_single(struct file_system_type *fs_type, int flags, void *data, int (*fill_super)(struct super_block *, void *, int), struct vfsmount *mnt); extern int get_sb_nodev(struct file_system_type *fs_type, int flags, void *data, int (*fill_super)(struct super_block *, void *, int), struct vfsmount *mnt); void generic_shutdown_super(struct super_block *sb); void kill_block_super(struct super_block *sb); void kill_anon_super(struct super_block *sb); void kill_litter_super(struct super_block *sb); void deactivate_super(struct super_block *sb); int set_anon_super(struct super_block *s, void *data); struct super_block *sget(struct file_system_type *type, int (*test)(struct super_block *,void *), int (*set)(struct super_block *,void *), void *data); extern int get_sb_pseudo(struct file_system_type *, char *, const struct super_operations *ops, unsigned long, struct vfsmount *mnt); extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); int __put_super_and_need_restart(struct super_block *sb); extern int register_filesystem(struct file_system_type *); extern int unregister_filesystem(struct file_system_type *); extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data); extern int may_umount_tree(struct vfsmount *); extern int may_umount(struct vfsmount *); extern long do_mount(char *, char *, char *, unsigned long, void *); extern struct vfsmount *collect_mounts(struct vfsmount *, struct dentry *); extern void drop_collected_mounts(struct vfsmount *); extern int vfs_statfs(struct dentry *, struct kstatfs *); extern struct kobject *fs_kobj; extern int rw_verify_area(int, struct file *, loff_t *, size_t); extern int locks_mandatory_locked(struct inode *); extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t); static inline __attribute__((always_inline)) int __mandatory_lock(struct inode *ino) { return (ino->i_mode & (0002000 | 00010)) == 0002000; } static inline __attribute__((always_inline)) int mandatory_lock(struct inode *ino) { return ((ino)->i_sb->s_flags & (64)) && __mandatory_lock(ino); } static inline __attribute__((always_inline)) int locks_verify_locked(struct inode *inode) { if (mandatory_lock(inode)) return locks_mandatory_locked(inode); return 0; } static inline __attribute__((always_inline)) int locks_verify_truncate(struct inode *inode, struct file *filp, loff_t size) { if (inode->i_flock && mandatory_lock(inode)) return locks_mandatory_area( 2, inode, filp, size < inode->i_size ? size : inode->i_size, (size < inode->i_size ? inode->i_size - size : size - inode->i_size) ); return 0; } static inline __attribute__((always_inline)) int break_lease(struct inode *inode, unsigned int mode) { if (inode->i_flock) return __break_lease(inode, mode); return 0; } # 1672 "include/linux/fs.h" extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, struct file *filp); extern long do_sys_open(int dfd, const char *filename, int flags, int mode); extern struct file *filp_open(const char *, int, int); extern struct file * dentry_open(struct dentry *, struct vfsmount *, int); extern int filp_close(struct file *, fl_owner_t id); extern char * getname(const char *); extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vfs_caches_init_early(void); extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vfs_caches_init(unsigned long); extern struct kmem_cache *names_cachep; # 1696 "include/linux/fs.h" extern int register_blkdev(unsigned int, const char *); extern void unregister_blkdev(unsigned int, const char *); extern struct block_device *bdget(dev_t); extern void bd_set_size(struct block_device *, loff_t size); extern void bd_forget(struct inode *inode); extern void bdput(struct block_device *); extern struct block_device *open_by_devnum(dev_t, fmode_t); extern const struct file_operations def_blk_fops; extern const struct file_operations def_chr_fops; extern const struct file_operations bad_sock_fops; extern const struct file_operations def_fifo_fops; extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); extern int blkdev_get(struct block_device *, fmode_t); extern int blkdev_put(struct block_device *, fmode_t); extern int bd_claim(struct block_device *, void *); extern void bd_release(struct block_device *); # 1729 "include/linux/fs.h" extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); extern int register_chrdev_region(dev_t, unsigned, const char *); extern int register_chrdev(unsigned int, const char *, const struct file_operations *); extern void unregister_chrdev(unsigned int, const char *); extern void unregister_chrdev_region(dev_t, unsigned); extern void chrdev_show(struct seq_file *,off_t); extern const char *__bdevname(dev_t, char *buffer); extern const char *bdevname(struct block_device *bdev, char *buffer); extern struct block_device *lookup_bdev(const char *); extern struct block_device *open_bdev_exclusive(const char *, fmode_t, void *); extern void close_bdev_exclusive(struct block_device *, fmode_t); extern void blkdev_show(struct seq_file *,off_t); extern void init_special_inode(struct inode *, umode_t, dev_t); extern void make_bad_inode(struct inode *); extern int is_bad_inode(struct inode *); extern const struct file_operations read_pipefifo_fops; extern const struct file_operations write_pipefifo_fops; extern const struct file_operations rdwr_pipefifo_fops; extern int fs_may_remount_ro(struct super_block *); # 1777 "include/linux/fs.h" extern void check_disk_size_change(struct gendisk *disk, struct block_device *bdev); extern int revalidate_disk(struct gendisk *); extern int check_disk_change(struct block_device *); extern int __invalidate_device(struct block_device *); extern int invalidate_partition(struct gendisk *, int); extern int invalidate_inodes(struct super_block *); unsigned long __invalidate_mapping_pages(struct address_space *mapping, unsigned long start, unsigned long end, bool be_atomic); unsigned long invalidate_mapping_pages(struct address_space *mapping, unsigned long start, unsigned long end); static inline __attribute__((always_inline)) unsigned long __attribute__((deprecated)) invalidate_inode_pages(struct address_space *mapping) { return invalidate_mapping_pages(mapping, 0, ~0UL); } static inline __attribute__((always_inline)) void invalidate_remote_inode(struct inode *inode) { if ((((inode->i_mode) & 00170000) == 0100000) || (((inode->i_mode) & 00170000) == 0040000) || (((inode->i_mode) & 00170000) == 0120000)) invalidate_mapping_pages(inode->i_mapping, 0, -1); } extern int invalidate_inode_pages2(struct address_space *mapping); extern int invalidate_inode_pages2_range(struct address_space *mapping, unsigned long start, unsigned long end); extern void generic_sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc); extern int write_inode_now(struct inode *, int); extern int filemap_fdatawrite(struct address_space *); extern int filemap_flush(struct address_space *); extern int filemap_fdatawait(struct address_space *); extern int filemap_write_and_wait(struct address_space *mapping); extern int filemap_write_and_wait_range(struct address_space *mapping, loff_t lstart, loff_t lend); extern int wait_on_page_writeback_range(struct address_space *mapping, unsigned long start, unsigned long end); extern int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end, int sync_mode); extern int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end); extern long do_fsync(struct file *file, int datasync); extern void sync_supers(void); extern void sync_filesystems(int wait); extern void __fsync_super(struct super_block *sb); extern void emergency_sync(void); extern void emergency_remount(void); extern int do_remount_sb(struct super_block *sb, int flags, void *data, int force); extern sector_t bmap(struct inode *, sector_t); extern int notify_change(struct dentry *, struct iattr *); extern int inode_permission(struct inode *, int); extern int generic_permission(struct inode *, int, int (*check_acl)(struct inode *, int)); static inline __attribute__((always_inline)) bool execute_ok(struct inode *inode) { return (inode->i_mode & (00100|00010|00001)) || (((inode->i_mode) & 00170000) == 0040000); } extern int get_write_access(struct inode *); extern int deny_write_access(struct file *); static inline __attribute__((always_inline)) void put_write_access(struct inode * inode) { atomic_dec(&inode->i_writecount); } static inline __attribute__((always_inline)) void allow_write_access(struct file *file) { if (file) atomic_inc(&file->f_path.dentry->d_inode->i_writecount); } extern int do_pipe(int *); extern int do_pipe_flags(int *, int); extern struct file *create_read_pipe(struct file *f, int flags); extern struct file *create_write_pipe(int flags); extern void free_write_pipe(struct file *); extern struct file *do_filp_open(int dfd, const char *pathname, int open_flag, int mode); extern int may_open(struct nameidata *, int, int); extern int kernel_read(struct file *, unsigned long, char *, unsigned long); extern struct file * open_exec(const char *); extern int is_subdir(struct dentry *, struct dentry *); extern ino_t find_inode_number(struct dentry *, struct qstr *); # 1 "include/linux/err.h" 1 # 22 "include/linux/err.h" static inline __attribute__((always_inline)) void *ERR_PTR(long error) { return (void *) error; } static inline __attribute__((always_inline)) long PTR_ERR(const void *ptr) { return (long) ptr; } static inline __attribute__((always_inline)) long IS_ERR(const void *ptr) { return __builtin_expect(!!(((unsigned long)ptr) >= (unsigned long)-4095), 0); } # 44 "include/linux/err.h" static inline __attribute__((always_inline)) void *ERR_CAST(const void *ptr) { return (void *) ptr; } # 1872 "include/linux/fs.h" 2 extern loff_t default_llseek(struct file *file, loff_t offset, int origin); extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); extern void inode_init_once(struct inode *); extern void iput(struct inode *); extern struct inode * igrab(struct inode *); extern ino_t iunique(struct super_block *, ino_t); extern int inode_needs_sync(struct inode *inode); extern void generic_delete_inode(struct inode *inode); extern void generic_drop_inode(struct inode *inode); extern struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, int (*test)(struct inode *, void *), void *data); extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval, int (*test)(struct inode *, void *), void *data); extern struct inode *ilookup(struct super_block *sb, unsigned long ino); extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *); extern struct inode * iget_locked(struct super_block *, unsigned long); extern void unlock_new_inode(struct inode *); extern void __iget(struct inode * inode); extern void iget_failed(struct inode *); extern void clear_inode(struct inode *); extern void destroy_inode(struct inode *); extern struct inode *new_inode(struct super_block *); extern int should_remove_suid(struct dentry *); extern int file_remove_suid(struct file *); extern void __insert_inode_hash(struct inode *, unsigned long hashval); extern void remove_inode_hash(struct inode *); static inline __attribute__((always_inline)) void insert_inode_hash(struct inode *inode) { __insert_inode_hash(inode, inode->i_ino); } extern struct file * get_empty_filp(void); extern void file_move(struct file *f, struct list_head *list); extern void file_kill(struct file *f); struct bio; extern void submit_bio(int, struct bio *); extern int bdev_read_only(struct block_device *); extern int set_blocksize(struct block_device *, int); extern int sb_set_blocksize(struct super_block *, int); extern int sb_min_blocksize(struct super_block *, int); extern int sb_has_dirty_inodes(struct super_block *); extern int generic_file_mmap(struct file *, struct vm_area_struct *); extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size); int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk); extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t); extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t); extern ssize_t generic_file_aio_write_nolock(struct kiocb *, const struct iovec *, unsigned long, loff_t); extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *, unsigned long *, loff_t, loff_t *, size_t, size_t); extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *, unsigned long, loff_t, loff_t *, size_t, ssize_t); extern ssize_t do_sync_read(struct file *filp, char *buf, size_t len, loff_t *ppos); extern ssize_t do_sync_write(struct file *filp, const char *buf, size_t len, loff_t *ppos); extern int generic_segment_checks(const struct iovec *iov, unsigned long *nr_segs, size_t *count, int access_flags); extern ssize_t generic_file_splice_read(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); extern ssize_t generic_file_splice_write(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); extern ssize_t generic_file_splice_write_nolock(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, loff_t *, size_t len, unsigned int flags); extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, size_t len, unsigned int flags); extern void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); extern loff_t no_llseek(struct file *file, loff_t offset, int origin); extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin); extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset, int origin); extern int generic_file_open(struct inode * inode, struct file * filp); extern int nonseekable_open(struct inode * inode, struct file * filp); # 1970 "include/linux/fs.h" static inline __attribute__((always_inline)) int xip_truncate_page(struct address_space *mapping, loff_t from) { return 0; } ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, struct block_device *bdev, const struct iovec *iov, loff_t offset, unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, int lock_type); enum { DIO_LOCKING = 1, DIO_NO_LOCKING, DIO_OWN_LOCKING, }; static inline __attribute__((always_inline)) ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, struct block_device *bdev, const struct iovec *iov, loff_t offset, unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io) { return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, nr_segs, get_block, end_io, DIO_LOCKING); } static inline __attribute__((always_inline)) ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb, struct inode *inode, struct block_device *bdev, const struct iovec *iov, loff_t offset, unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io) { return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, nr_segs, get_block, end_io, DIO_NO_LOCKING); } static inline __attribute__((always_inline)) ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb, struct inode *inode, struct block_device *bdev, const struct iovec *iov, loff_t offset, unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io) { return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, nr_segs, get_block, end_io, DIO_OWN_LOCKING); } extern const struct file_operations generic_ro_fops; extern int vfs_readlink(struct dentry *, char *, int, const char *); extern int vfs_follow_link(struct nameidata *, const char *); extern int page_readlink(struct dentry *, char *, int); extern void *page_follow_link_light(struct dentry *, struct nameidata *); extern void page_put_link(struct dentry *, struct nameidata *, void *); extern int __page_symlink(struct inode *inode, const char *symname, int len, gfp_t gfp_mask); extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern int generic_readlink(struct dentry *, char *, int); extern void generic_fillattr(struct inode *, struct kstat *); extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); void inode_add_bytes(struct inode *inode, loff_t bytes); void inode_sub_bytes(struct inode *inode, loff_t bytes); loff_t inode_get_bytes(struct inode *inode); void inode_set_bytes(struct inode *inode, loff_t bytes); extern int vfs_readdir(struct file *, filldir_t, void *); extern int vfs_stat(char *, struct kstat *); extern int vfs_lstat(char *, struct kstat *); extern int vfs_stat_fd(int dfd, char *, struct kstat *); extern int vfs_lstat_fd(int dfd, char *, struct kstat *); extern int vfs_fstat(unsigned int, struct kstat *); extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, unsigned long arg); extern int generic_block_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len, get_block_t *get_block); extern void get_filesystem(struct file_system_type *fs); extern void put_filesystem(struct file_system_type *fs); extern struct file_system_type *get_fs_type(const char *name); extern struct super_block *get_super(struct block_device *); extern struct super_block *user_get_super(dev_t); extern void drop_super(struct super_block *sb); extern int dcache_dir_open(struct inode *, struct file *); extern int dcache_dir_close(struct inode *, struct file *); extern loff_t dcache_dir_lseek(struct file *, loff_t, int); extern int dcache_readdir(struct file *, void *, filldir_t); extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *); extern int simple_statfs(struct dentry *, struct kstatfs *); extern int simple_link(struct dentry *, struct inode *, struct dentry *); extern int simple_unlink(struct inode *, struct dentry *); extern int simple_rmdir(struct inode *, struct dentry *); extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); extern int simple_sync_file(struct file *, struct dentry *, int); extern int simple_empty(struct dentry *); extern int simple_readpage(struct file *file, struct page *page); extern int simple_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to); extern int simple_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata); extern int simple_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *); extern ssize_t generic_read_dir(struct file *, char *, size_t, loff_t *); extern const struct file_operations simple_dir_operations; extern const struct inode_operations simple_dir_inode_operations; struct tree_descr { char *name; const struct file_operations *ops; int mode; }; struct dentry *d_alloc_name(struct dentry *, const char *); extern int simple_fill_super(struct super_block *, int, struct tree_descr *); extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count); extern void simple_release_fs(struct vfsmount **mount, int *count); extern ssize_t simple_read_from_buffer(void *to, size_t count, loff_t *ppos, const void *from, size_t available); # 2100 "include/linux/fs.h" extern int inode_change_ok(struct inode *, struct iattr *); extern int __attribute__((warn_unused_result)) inode_setattr(struct inode *, struct iattr *); extern void file_update_time(struct file *file); extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt); extern void save_mount_options(struct super_block *sb, char *options); static inline __attribute__((always_inline)) ino_t parent_ino(struct dentry *dentry) { ino_t res; _spin_lock(&dentry->d_lock); res = dentry->d_parent->d_inode->i_ino; _spin_unlock(&dentry->d_lock); return res; } struct simple_transaction_argresp { ssize_t size; char data[0]; }; char *simple_transaction_get(struct file *file, const char *buf, size_t size); ssize_t simple_transaction_read(struct file *file, char *buf, size_t size, loff_t *pos); int simple_transaction_release(struct inode *inode, struct file *file); static inline __attribute__((always_inline)) void simple_transaction_set(struct file *file, size_t n) { struct simple_transaction_argresp *ar = file->private_data; do { if (__builtin_expect(!!(n > ((1UL << 12) - sizeof(struct simple_transaction_argresp))), 0)) do { dump_bfin_trace_buffer(); printk("<0>" "BUG: failure at %s:%d/%s()!\n", "include/linux/fs.h", 2141, __func__); panic("BUG!"); } while (0); } while(0); __asm__ __volatile__("": : :"memory"); ar->size = n; } # 2181 "include/linux/fs.h" static inline __attribute__((always_inline)) void __attribute__((format(printf, 1, 2))) __simple_attr_check_format(const char *fmt, ...) { } int simple_attr_open(struct inode *inode, struct file *file, int (*get)(void *, u64 *), int (*set)(void *, u64), const char *fmt); int simple_attr_release(struct inode *inode, struct file *file); ssize_t simple_attr_read(struct file *file, char *buf, size_t len, loff_t *ppos); ssize_t simple_attr_write(struct file *file, const char *buf, size_t len, loff_t *ppos); # 2208 "include/linux/fs.h" static inline __attribute__((always_inline)) char *alloc_secdata(void) { return (char *)1; } static inline __attribute__((always_inline)) void free_secdata(void *secdata) { } struct ctl_table; int proc_nr_files(struct ctl_table *table, int write, struct file *filp, void *buffer, size_t *lenp, loff_t *ppos); int get_filesystem_list(char * buf); # 19 "include/linux/debugfs.h" 2 struct file_operations; struct debugfs_blob_wrapper { void *data; unsigned long size; }; extern struct dentry *arch_debugfs_dir; extern const struct file_operations debugfs_file_operations; extern const struct inode_operations debugfs_link_operations; struct dentry *debugfs_create_file(const char *name, mode_t mode, struct dentry *parent, void *data, const struct file_operations *fops); struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, const char *dest); void debugfs_remove(struct dentry *dentry); void debugfs_remove_recursive(struct dentry *dentry); struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, struct dentry *new_dir, const char *new_name); struct dentry *debugfs_create_u8(const char *name, mode_t mode, struct dentry *parent, u8 *value); struct dentry *debugfs_create_u16(const char *name, mode_t mode, struct dentry *parent, u16 *value); struct dentry *debugfs_create_u32(const char *name, mode_t mode, struct dentry *parent, u32 *value); struct dentry *debugfs_create_u64(const char *name, mode_t mode, struct dentry *parent, u64 *value); struct dentry *debugfs_create_x8(const char *name, mode_t mode, struct dentry *parent, u8 *value); struct dentry *debugfs_create_x16(const char *name, mode_t mode, struct dentry *parent, u16 *value); struct dentry *debugfs_create_x32(const char *name, mode_t mode, struct dentry *parent, u32 *value); struct dentry *debugfs_create_bool(const char *name, mode_t mode, struct dentry *parent, u32 *value); struct dentry *debugfs_create_blob(const char *name, mode_t mode, struct dentry *parent, struct debugfs_blob_wrapper *blob); # 19 "kernel/trace/trace.c" 2 # 1 "include/linux/pagemap.h" 1 # 1 "include/linux/mm.h" 1 # 10 "include/linux/mm.h" # 1 "include/linux/mmdebug.h" 1 # 1 "include/linux/autoconf.h" 1 # 5 "include/linux/mmdebug.h" 2 # 11 "include/linux/mm.h" 2 # 1 "include/linux/rbtree.h" 1 # 100 "include/linux/rbtree.h" struct rb_node { unsigned long rb_parent_color; struct rb_node *rb_right; struct rb_node *rb_left; } __attribute__((aligned(sizeof(long)))); struct rb_root { struct rb_node *rb_node; }; # 123 "include/linux/rbtree.h" static inline __attribute__((always_inline)) void rb_set_parent(struct rb_node *rb, struct rb_node *p) { rb->rb_parent_color = (rb->rb_parent_color & 3) | (unsigned long)p; } static inline __attribute__((always_inline)) void rb_set_color(struct rb_node *rb, int color) { rb->rb_parent_color = (rb->rb_parent_color & ~1) | color; } # 139 "include/linux/rbtree.h" extern void rb_insert_color(struct rb_node *, struct rb_root *); extern void rb_erase(struct rb_node *, struct rb_root *); extern struct rb_node *rb_next(struct rb_node *); extern struct rb_node *rb_prev(struct rb_node *); extern struct rb_node *rb_first(struct rb_root *); extern struct rb_node *rb_last(struct rb_root *); extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); static inline __attribute__((always_inline)) void rb_link_node(struct rb_node * node, struct rb_node * parent, struct rb_node ** rb_link) { node->rb_parent_color = (unsigned long )parent; node->rb_left = node->rb_right = ((void *)0); *rb_link = node; } # 13 "include/linux/mm.h" 2 # 1 "include/linux/debug_locks.h" 1 struct task_struct; extern int debug_locks; extern int debug_locks_silent; extern int debug_locks_off(void); # 35 "include/linux/debug_locks.h" extern void locking_selftest(void); struct task_struct; # 49 "include/linux/debug_locks.h" static inline __attribute__((always_inline)) void debug_show_all_locks(void) { } static inline __attribute__((always_inline)) void __debug_show_held_locks(struct task_struct *task) { } static inline __attribute__((always_inline)) void debug_show_held_locks(struct task_struct *task) { } static inline __attribute__((always_inline)) void debug_check_no_locks_freed(const void *from, unsigned long len) { } static inline __attribute__((always_inline)) void debug_check_no_locks_held(struct task_struct *task) { } # 15 "include/linux/mm.h" 2 # 1 "include/linux/mm_types.h" 1 # 1 "include/linux/auxvec.h" 1 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/auxvec.h" 1 # 5 "include/linux/auxvec.h" 2 # 5 "include/linux/mm_types.h" 2 # 15 "include/linux/mm_types.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/mmu.h" 1 struct sram_list_struct { struct sram_list_struct *next; void *addr; size_t length; }; typedef struct { struct vm_list_struct *vmlist; unsigned long end_brk; unsigned long stack_start; void *l1_stack_save; struct sram_list_struct *sram_list; # 30 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/mmu.h" } mm_context_t; # 16 "include/linux/mm_types.h" 2 struct address_space; typedef unsigned long mm_counter_t; # 39 "include/linux/mm_types.h" struct page { unsigned long flags; atomic_t _count; union { atomic_t _mapcount; struct { u16 inuse; u16 objects; }; }; union { struct { unsigned long private; struct address_space *mapping; }; struct kmem_cache *slab; struct page *first_page; }; union { unsigned long index; void *freelist; }; struct list_head lru; # 97 "include/linux/mm_types.h" }; struct vm_area_struct { struct mm_struct * vm_mm; unsigned long vm_start; unsigned long vm_end; struct vm_area_struct *vm_next; pgprot_t vm_page_prot; unsigned long vm_flags; struct rb_node vm_rb; union { struct { struct list_head list; void *parent; struct vm_area_struct *head; } vm_set; struct raw_prio_tree_node prio_tree_node; } shared; struct list_head anon_vma_node; struct anon_vma *anon_vma; struct vm_operations_struct * vm_ops; unsigned long vm_pgoff; struct file * vm_file; void * vm_private_data; unsigned long vm_truncate_count; atomic_t vm_usage; }; struct core_thread { struct task_struct *task; struct core_thread *next; }; struct core_state { atomic_t nr_threads; struct core_thread dumper; struct completion startup; }; struct mm_struct { struct vm_area_struct * mmap; struct rb_root mm_rb; struct vm_area_struct * mmap_cache; unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); void (*unmap_area) (struct mm_struct *mm, unsigned long addr); unsigned long mmap_base; unsigned long task_size; unsigned long cached_hole_size; unsigned long free_area_cache; pgd_t * pgd; atomic_t mm_users; atomic_t mm_count; int map_count; struct rw_semaphore mmap_sem; spinlock_t page_table_lock; struct list_head mmlist; mm_counter_t _file_rss; mm_counter_t _anon_rss; unsigned long hiwater_rss; unsigned long hiwater_vm; unsigned long total_vm, locked_vm, shared_vm, exec_vm; unsigned long stack_vm, reserved_vm, def_flags, nr_ptes; unsigned long start_code, end_code, start_data, end_data; unsigned long start_brk, brk, start_stack; unsigned long arg_start, arg_end, env_start, env_end; unsigned long saved_auxv[(2*(0 + 18 + 1))]; cpumask_t cpu_vm_mask; mm_context_t context; # 226 "include/linux/mm_types.h" unsigned int faultstamp; unsigned int token_priority; unsigned int last_interval; unsigned long flags; struct core_state *core_state; rwlock_t ioctx_list_lock; struct kioctx *ioctx_list; # 253 "include/linux/mm_types.h" struct file *exe_file; unsigned long num_exe_file_vmas; }; # 16 "include/linux/mm.h" 2 struct mempolicy; struct anon_vma; struct file_ra_state; struct user_struct; struct writeback_control; extern unsigned long max_mapnr; extern unsigned long num_physpages; extern void * high_memory; extern int page_cluster; extern int sysctl_legacy_va_layout; extern unsigned long mmap_min_addr; # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h" 1 # 1 "include/asm-generic/4level-fixup.h" 1 # 5 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h" 2 typedef pte_t *pte_addr_t; # 34 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h" extern void paging_init(void); static inline __attribute__((always_inline)) int pte_file(pte_t pte) { return 0; } # 62 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h" static inline __attribute__((always_inline)) pte_t pte_rdprotect(pte_t _pte) { _pte.pte &= ~(0x00000004); return _pte; }; static inline __attribute__((always_inline)) pte_t pte_mkread(pte_t _pte) { _pte.pte |= (0x00000004); return _pte; }; static inline __attribute__((always_inline)) pte_t pte_wrprotect(pte_t _pte) { _pte.pte &= ~(0x00000008); return _pte; }; static inline __attribute__((always_inline)) pte_t pte_mkwrite(pte_t _pte) { _pte.pte |= (0x00000008); return _pte; }; static inline __attribute__((always_inline)) pte_t pte_exprotect(pte_t _pte) { _pte.pte &= ~(0x00000004 | 0x00000008); return _pte; }; static inline __attribute__((always_inline)) pte_t pte_mkexec(pte_t _pte) { _pte.pte |= (0x00000004 | 0x00000008); return _pte; }; static inline __attribute__((always_inline)) pte_t pte_mkclean(pte_t _pte) { _pte.pte &= ~(0x00000080); return _pte; }; static inline __attribute__((always_inline)) pte_t pte_mkdirty(pte_t _pte) { _pte.pte |= (0x00000080); return _pte; }; static inline __attribute__((always_inline)) pte_t pte_mkold(pte_t _pte) { _pte.pte &= ~0x00000010 | 0x00000004 | 0x00000008; return _pte; }; static inline __attribute__((always_inline)) pte_t pte_mkyoung(pte_t _pte) { _pte.pte |= 0x00000010 | 0x00000004 | 0x00000008; return _pte; }; extern unsigned int kobjsize(const void *objp); # 95 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h" # 1 "include/asm-generic/pgtable.h" 1 # 96 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h" 2 # 41 "include/linux/mm.h" 2 # 57 "include/linux/mm.h" extern struct kmem_cache *vm_area_cachep; struct vm_list_struct { struct vm_list_struct *next; struct vm_area_struct *vma; }; extern struct rb_root nommu_vma_tree; extern struct rw_semaphore nommu_vma_sem; extern unsigned int kobjsize(const void *objp); # 144 "include/linux/mm.h" extern pgprot_t protection_map[16]; # 159 "include/linux/mm.h" struct vm_fault { unsigned int flags; unsigned long pgoff; void *virtual_address; struct page *page; }; struct vm_operations_struct { void (*open)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area); int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); int (*access)(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); # 215 "include/linux/mm.h" }; struct mmu_gather; struct inode; # 227 "include/linux/mm.h" # 1 "include/linux/page-flags.h" 1 # 72 "include/linux/page-flags.h" enum pageflags { PG_locked, PG_error, PG_referenced, PG_uptodate, PG_dirty, PG_lru, PG_active, PG_slab, PG_owner_priv_1, PG_arch_1, PG_reserved, PG_private, PG_writeback, PG_head, PG_tail, PG_swapcache, PG_mappedtodisk, PG_reclaim, PG_buddy, PG_swapbacked, __NR_PAGEFLAGS, PG_checked = PG_owner_priv_1, PG_pinned = PG_owner_priv_1, PG_savepinned = PG_dirty, PG_slob_page = PG_active, PG_slob_free = PG_private, PG_slub_frozen = PG_active, PG_slub_debug = PG_error, }; # 181 "include/linux/page-flags.h" struct page; static inline __attribute__((always_inline)) int PageLocked(struct page *page) { return test_bit(PG_locked, &page->flags); } static inline __attribute__((always_inline)) int PageError(struct page *page) { return test_bit(PG_error, &page->flags); } static inline __attribute__((always_inline)) void SetPageError(struct page *page) { set_bit(PG_error, &page->flags); } static inline __attribute__((always_inline)) void ClearPageError(struct page *page) { clear_bit(PG_error, &page->flags); } static inline __attribute__((always_inline)) int PageReferenced(struct page *page) { return test_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) void SetPageReferenced(struct page *page) { set_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReferenced(struct page *page) { clear_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageReferenced(struct page *page) { return test_and_clear_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) int PageDirty(struct page *page) { return test_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void SetPageDirty(struct page *page) { set_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void ClearPageDirty(struct page *page) { clear_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) int TestSetPageDirty(struct page *page) { return test_and_set_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageDirty(struct page *page) { return test_and_clear_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageDirty(struct page *page) { __clear_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) int PageLRU(struct page *page) { return test_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void SetPageLRU(struct page *page) { set_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void ClearPageLRU(struct page *page) { clear_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageLRU(struct page *page) { __clear_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) int PageActive(struct page *page) { return test_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void SetPageActive(struct page *page) { set_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void ClearPageActive(struct page *page) { clear_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageActive(struct page *page) { __clear_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageActive(struct page *page) { return test_and_clear_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) int PageSlab(struct page *page) { return test_bit(PG_slab, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlab(struct page *page) { __set_bit(PG_slab, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlab(struct page *page) { __clear_bit(PG_slab, &page->flags); } static inline __attribute__((always_inline)) int PageChecked(struct page *page) { return test_bit(PG_checked, &page->flags); } static inline __attribute__((always_inline)) void SetPageChecked(struct page *page) { set_bit(PG_checked, &page->flags); } static inline __attribute__((always_inline)) void ClearPageChecked(struct page *page) { clear_bit(PG_checked, &page->flags); } static inline __attribute__((always_inline)) int PagePinned(struct page *page) { return test_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) void SetPagePinned(struct page *page) { set_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePinned(struct page *page) { clear_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) int TestSetPagePinned(struct page *page) { return test_and_set_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) int TestClearPagePinned(struct page *page) { return test_and_clear_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) int PageSavePinned(struct page *page) { return test_bit(PG_savepinned, &page->flags); } static inline __attribute__((always_inline)) void SetPageSavePinned(struct page *page) { set_bit(PG_savepinned, &page->flags); } static inline __attribute__((always_inline)) void ClearPageSavePinned(struct page *page) { clear_bit(PG_savepinned, &page->flags); }; static inline __attribute__((always_inline)) int PageReserved(struct page *page) { return test_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void SetPageReserved(struct page *page) { set_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReserved(struct page *page) { clear_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageReserved(struct page *page) { __clear_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) int PagePrivate(struct page *page) { return test_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void SetPagePrivate(struct page *page) { set_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePrivate(struct page *page) { clear_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void __ClearPagePrivate(struct page *page) { __clear_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void __SetPagePrivate(struct page *page) { __set_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) int PageSwapBacked(struct page *page) { return test_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void SetPageSwapBacked(struct page *page) { set_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void ClearPageSwapBacked(struct page *page) { clear_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSwapBacked(struct page *page) { __clear_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) int PageSlobPage(struct page *page) { return test_bit(PG_slob_page, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlobPage(struct page *page) { __set_bit(PG_slob_page, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlobPage(struct page *page) { __clear_bit(PG_slob_page, &page->flags); } static inline __attribute__((always_inline)) int PageSlobFree(struct page *page) { return test_bit(PG_slob_free, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlobFree(struct page *page) { __set_bit(PG_slob_free, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlobFree(struct page *page) { __clear_bit(PG_slob_free, &page->flags); } static inline __attribute__((always_inline)) int PageSlubFrozen(struct page *page) { return test_bit(PG_slub_frozen, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlubFrozen(struct page *page) { __set_bit(PG_slub_frozen, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlubFrozen(struct page *page) { __clear_bit(PG_slub_frozen, &page->flags); } static inline __attribute__((always_inline)) int PageSlubDebug(struct page *page) { return test_bit(PG_slub_debug, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlubDebug(struct page *page) { __set_bit(PG_slub_debug, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlubDebug(struct page *page) { __clear_bit(PG_slub_debug, &page->flags); } static inline __attribute__((always_inline)) int PageWriteback(struct page *page) { return test_bit(PG_writeback, &page->flags); } static inline __attribute__((always_inline)) int TestSetPageWriteback(struct page *page) { return test_and_set_bit(PG_writeback, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageWriteback(struct page *page) { return test_and_clear_bit(PG_writeback, &page->flags); } static inline __attribute__((always_inline)) int PageBuddy(struct page *page) { return test_bit(PG_buddy, &page->flags); } static inline __attribute__((always_inline)) void __SetPageBuddy(struct page *page) { __set_bit(PG_buddy, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageBuddy(struct page *page) { __clear_bit(PG_buddy, &page->flags); } static inline __attribute__((always_inline)) int PageMappedToDisk(struct page *page) { return test_bit(PG_mappedtodisk, &page->flags); } static inline __attribute__((always_inline)) void SetPageMappedToDisk(struct page *page) { set_bit(PG_mappedtodisk, &page->flags); } static inline __attribute__((always_inline)) void ClearPageMappedToDisk(struct page *page) { clear_bit(PG_mappedtodisk, &page->flags); } static inline __attribute__((always_inline)) int PageReclaim(struct page *page) { return test_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void SetPageReclaim(struct page *page) { set_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReclaim(struct page *page) { clear_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageReclaim(struct page *page) { return test_and_clear_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) int PageReadahead(struct page *page) { return test_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void SetPageReadahead(struct page *page) { set_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReadahead(struct page *page) { clear_bit(PG_reclaim, &page->flags); } # 224 "include/linux/page-flags.h" static inline __attribute__((always_inline)) int PageHighMem(struct page *page) { return 0; } static inline __attribute__((always_inline)) int PageSwapCache(struct page *page) { return 0; } # 244 "include/linux/page-flags.h" static inline __attribute__((always_inline)) int PageMlocked(struct page *page) { return 0; } static inline __attribute__((always_inline)) void SetPageMlocked(struct page *page) { } static inline __attribute__((always_inline)) int TestClearPageMlocked(struct page *page) { return 0; } static inline __attribute__((always_inline)) int PageUnevictable(struct page *page) { return 0; } static inline __attribute__((always_inline)) int TestClearPageUnevictable(struct page *page) { return 0; } static inline __attribute__((always_inline)) void SetPageUnevictable(struct page *page) { } static inline __attribute__((always_inline)) void ClearPageUnevictable(struct page *page) { } static inline __attribute__((always_inline)) void __ClearPageUnevictable(struct page *page) { } static inline __attribute__((always_inline)) int PageUncached(struct page *page) { return 0; } static inline __attribute__((always_inline)) int PageUptodate(struct page *page) { int ret = test_bit(PG_uptodate, &(page)->flags); # 270 "include/linux/page-flags.h" if (ret) __asm__ __volatile__("": : :"memory"); return ret; } static inline __attribute__((always_inline)) void __SetPageUptodate(struct page *page) { __asm__ __volatile__("": : :"memory"); __set_bit(PG_uptodate, &(page)->flags); } static inline __attribute__((always_inline)) void SetPageUptodate(struct page *page) { # 296 "include/linux/page-flags.h" __asm__ __volatile__("": : :"memory"); set_bit(PG_uptodate, &(page)->flags); } static inline __attribute__((always_inline)) void ClearPageUptodate(struct page *page) { clear_bit(PG_uptodate, &page->flags); } extern void cancel_dirty_page(struct page *page, unsigned int account_size); int test_clear_page_writeback(struct page *page); int test_set_page_writeback(struct page *page); static inline __attribute__((always_inline)) void set_page_writeback(struct page *page) { test_set_page_writeback(page); } # 320 "include/linux/page-flags.h" static inline __attribute__((always_inline)) int PageHead(struct page *page) { return test_bit(PG_head, &page->flags); } static inline __attribute__((always_inline)) void __SetPageHead(struct page *page) { __set_bit(PG_head, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageHead(struct page *page) { __clear_bit(PG_head, &page->flags); } static inline __attribute__((always_inline)) int PageTail(struct page *page) { return test_bit(PG_tail, &page->flags); } static inline __attribute__((always_inline)) void __SetPageTail(struct page *page) { __set_bit(PG_tail, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageTail(struct page *page) { __clear_bit(PG_tail, &page->flags); } static inline __attribute__((always_inline)) int PageCompound(struct page *page) { return page->flags & ((1L << PG_head) | (1L << PG_tail)); } # 228 "include/linux/mm.h" 2 # 245 "include/linux/mm.h" static inline __attribute__((always_inline)) int put_page_testzero(struct page *page) { do { } while (0); return (atomic_sub_return(1, (&page->_count)) == 0); } static inline __attribute__((always_inline)) int get_page_unless_zero(struct page *page) { do { } while (0); return ({ int c, old; c = (((&page->_count))->counter); while (c != (0) && (old = ((int)((__typeof__(*((&((((&page->_count)))->counter)))))__cmpxchg_local_generic(((&((((&page->_count)))->counter))), (unsigned long)(((c))), (unsigned long)(((c + (1)))), sizeof(*((&((((&page->_count)))->counter)))))))) != c) c = old; c != (0); }); } struct page *vmalloc_to_page(const void *addr); unsigned long vmalloc_to_pfn(const void *addr); static inline __attribute__((always_inline)) int is_vmalloc_addr(const void *x) { return 0; } static inline __attribute__((always_inline)) struct page *compound_head(struct page *page) { if (__builtin_expect(!!(PageTail(page)), 0)) return page->first_page; return page; } static inline __attribute__((always_inline)) int page_count(struct page *page) { return ((&compound_head(page)->_count)->counter); } static inline __attribute__((always_inline)) void get_page(struct page *page) { page = compound_head(page); do { } while (0); atomic_inc(&page->_count); } static inline __attribute__((always_inline)) struct page *virt_to_head_page(const void *x) { struct page *page = (mem_map + (((unsigned long)(x)-(0x00000000)) >> 12)); return compound_head(page); } static inline __attribute__((always_inline)) void init_page_count(struct page *page) { (((&page->_count)->counter) = 1); } void put_page(struct page *page); void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); void split_compound_page(struct page *page, unsigned int order); typedef void compound_page_dtor(struct page *); static inline __attribute__((always_inline)) void set_compound_page_dtor(struct page *page, compound_page_dtor *dtor) { page[1].lru.next = (void *)dtor; } static inline __attribute__((always_inline)) compound_page_dtor *get_compound_page_dtor(struct page *page) { return (compound_page_dtor *)page[1].lru.next; } static inline __attribute__((always_inline)) int compound_order(struct page *page) { if (!PageHead(page)) return 0; return (unsigned long)page[1].lru.prev; } static inline __attribute__((always_inline)) void set_compound_order(struct page *page, unsigned long order) { page[1].lru.prev = (void *)order; } # 497 "include/linux/mm.h" static inline __attribute__((always_inline)) enum zone_type page_zonenum(struct page *page) { return (page->flags >> (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0))) & ((1UL << 2) - 1); } # 510 "include/linux/mm.h" static inline __attribute__((always_inline)) int page_zone_id(struct page *page) { return (page->flags >> ((((((sizeof(unsigned long)*8) - 0) - 0) < ((((sizeof(unsigned long)*8) - 0) - 0) - 2))? (((sizeof(unsigned long)*8) - 0) - 0) : ((((sizeof(unsigned long)*8) - 0) - 0) - 2)) * ((0 + 2) != 0))) & ((1UL << (0 + 2)) - 1); } static inline __attribute__((always_inline)) int zone_to_nid(struct zone *zone) { return 0; } static inline __attribute__((always_inline)) int page_to_nid(struct page *page) { return (page->flags >> ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0))) & ((1UL << 0) - 1); } static inline __attribute__((always_inline)) struct zone *page_zone(struct page *page) { return &(&contig_page_data)->node_zones[page_zonenum(page)]; } # 545 "include/linux/mm.h" static inline __attribute__((always_inline)) void set_page_zone(struct page *page, enum zone_type zone) { page->flags &= ~(((1UL << 2) - 1) << (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0))); page->flags |= (zone & ((1UL << 2) - 1)) << (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0)); } static inline __attribute__((always_inline)) void set_page_node(struct page *page, unsigned long node) { page->flags &= ~(((1UL << 0) - 1) << ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0))); page->flags |= (node & ((1UL << 0) - 1)) << ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0)); } static inline __attribute__((always_inline)) void set_page_section(struct page *page, unsigned long section) { page->flags &= ~(((1UL << 0) - 1) << (((sizeof(unsigned long)*8) - 0) * (0 != 0))); page->flags |= (section & ((1UL << 0) - 1)) << (((sizeof(unsigned long)*8) - 0) * (0 != 0)); } static inline __attribute__((always_inline)) void set_page_links(struct page *page, enum zone_type zone, unsigned long node, unsigned long pfn) { set_page_zone(page, zone); set_page_node(page, node); set_page_section(page, ((pfn) >> 0)); } static inline __attribute__((always_inline)) unsigned long round_hint_to_min(unsigned long hint) { return hint; } # 1 "include/linux/vmstat.h" 1 # 1 "include/linux/mm.h" 1 # 7 "include/linux/vmstat.h" 2 # 31 "include/linux/vmstat.h" enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PGALLOC_DMA, PGALLOC_NORMAL , PGALLOC_MOVABLE, PGFREE, PGACTIVATE, PGDEACTIVATE, PGFAULT, PGMAJFAULT, PGREFILL_DMA, PGREFILL_NORMAL , PGREFILL_MOVABLE, PGSTEAL_DMA, PGSTEAL_NORMAL , PGSTEAL_MOVABLE, PGSCAN_KSWAPD_DMA, PGSCAN_KSWAPD_NORMAL , PGSCAN_KSWAPD_MOVABLE, PGSCAN_DIRECT_DMA, PGSCAN_DIRECT_NORMAL , PGSCAN_DIRECT_MOVABLE, PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, PAGEOUTRUN, ALLOCSTALL, PGROTATED, # 54 "include/linux/vmstat.h" NR_VM_EVENT_ITEMS }; extern int sysctl_stat_interval; # 70 "include/linux/vmstat.h" struct vm_event_state { unsigned long event[NR_VM_EVENT_ITEMS]; }; extern __typeof__(struct vm_event_state) per_cpu__vm_event_states; static inline __attribute__((always_inline)) void __count_vm_event(enum vm_event_item item) { per_cpu__vm_event_states.event[item]++; } static inline __attribute__((always_inline)) void count_vm_event(enum vm_event_item item) { (*({ extern int simple_identifier_vm_event_states(void); do { } while (0); &per_cpu__vm_event_states; })).event[item]++; do { } while (0); } static inline __attribute__((always_inline)) void __count_vm_events(enum vm_event_item item, long delta) { per_cpu__vm_event_states.event[item] += delta; } static inline __attribute__((always_inline)) void count_vm_events(enum vm_event_item item, long delta) { (*({ extern int simple_identifier_vm_event_states(void); do { } while (0); &per_cpu__vm_event_states; })).event[item] += delta; do { } while (0); } extern void all_vm_events(unsigned long *); static inline __attribute__((always_inline)) void vm_events_fold_cpu(int cpu) { } # 138 "include/linux/vmstat.h" extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; static inline __attribute__((always_inline)) void zone_page_state_add(long x, struct zone *zone, enum zone_stat_item item) { atomic_long_add(x, &zone->vm_stat[item]); atomic_long_add(x, &vm_stat[item]); } static inline __attribute__((always_inline)) unsigned long global_page_state(enum zone_stat_item item) { long x = atomic_long_read(&vm_stat[item]); return x; } static inline __attribute__((always_inline)) unsigned long zone_page_state(struct zone *zone, enum zone_stat_item item) { long x = atomic_long_read(&zone->vm_stat[item]); return x; } extern unsigned long global_lru_pages(void); static inline __attribute__((always_inline)) unsigned long zone_lru_pages(struct zone *zone) { return (zone_page_state(zone, NR_ACTIVE_ANON) + zone_page_state(zone, NR_ACTIVE_FILE) + zone_page_state(zone, NR_INACTIVE_ANON) + zone_page_state(zone, NR_INACTIVE_FILE)); } # 220 "include/linux/vmstat.h" static inline __attribute__((always_inline)) void zap_zone_vm_stats(struct zone *zone) { memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); } extern void inc_zone_state(struct zone *, enum zone_stat_item); # 248 "include/linux/vmstat.h" static inline __attribute__((always_inline)) void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, int delta) { zone_page_state_add(delta, zone, item); } static inline __attribute__((always_inline)) void __inc_zone_state(struct zone *zone, enum zone_stat_item item) { atomic_long_inc(&zone->vm_stat[item]); atomic_long_inc(&vm_stat[item]); } static inline __attribute__((always_inline)) void __inc_zone_page_state(struct page *page, enum zone_stat_item item) { __inc_zone_state(page_zone(page), item); } static inline __attribute__((always_inline)) void __dec_zone_state(struct zone *zone, enum zone_stat_item item) { atomic_long_dec(&zone->vm_stat[item]); atomic_long_dec(&vm_stat[item]); } static inline __attribute__((always_inline)) void __dec_zone_page_state(struct page *page, enum zone_stat_item item) { __dec_zone_state(page_zone(page), item); } # 286 "include/linux/vmstat.h" static inline __attribute__((always_inline)) void refresh_cpu_vm_stats(int cpu) { } # 590 "include/linux/mm.h" 2 static inline __attribute__((always_inline)) __attribute__((always_inline)) void *lowmem_page_address(struct page *page) { return ((void *) ((unsigned long)((((unsigned long) ((void *)(((((page) - mem_map) << 12) + (0x00000000))))) >> 12) << 12))); } # 632 "include/linux/mm.h" extern struct address_space swapper_space; static inline __attribute__((always_inline)) struct address_space *page_mapping(struct page *page) { struct address_space *mapping = page->mapping; do { } while (0); if (__builtin_expect(!!((unsigned long)mapping & 1), 0)) mapping = ((void *)0); return mapping; } static inline __attribute__((always_inline)) int PageAnon(struct page *page) { return ((unsigned long)page->mapping & 1) != 0; } static inline __attribute__((always_inline)) unsigned long page_index(struct page *page) { if (__builtin_expect(!!(PageSwapCache(page)), 0)) return ((page)->private); return page->index; } static inline __attribute__((always_inline)) void reset_page_mapcount(struct page *page) { (((&(page)->_mapcount)->counter) = -1); } static inline __attribute__((always_inline)) int page_mapcount(struct page *page) { return ((&(page)->_mapcount)->counter) + 1; } static inline __attribute__((always_inline)) int page_mapped(struct page *page) { return ((&(page)->_mapcount)->counter) >= 0; } # 707 "include/linux/mm.h" extern void show_free_areas(void); static inline __attribute__((always_inline)) int shmem_lock(struct file *file, int lock, struct user_struct *user) { return 0; } struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); int shmem_zero_setup(struct vm_area_struct *); extern unsigned long shmem_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); extern int can_do_mlock(void); extern int user_shm_lock(size_t, struct user_struct *); extern void user_shm_unlock(size_t, struct user_struct *); struct zap_details { struct vm_area_struct *nonlinear_vma; struct address_space *check_mapping; unsigned long first_index; unsigned long last_index; spinlock_t *i_mmap_lock; unsigned long truncate_count; }; struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte); int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size); unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *); unsigned long unmap_vmas(struct mmu_gather **tlb, struct vm_area_struct *start_vma, unsigned long start_addr, unsigned long end_addr, unsigned long *nr_accounted, struct zap_details *); # 768 "include/linux/mm.h" struct mm_walk { int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *); int (*pud_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *); int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); struct mm_struct *mm; void *private; }; int walk_page_range(unsigned long addr, unsigned long end, struct mm_walk *walk); void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); static inline __attribute__((always_inline)) void unmap_shared_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen) { unmap_mapping_range(mapping, holebegin, holelen, 0); } extern int vmtruncate(struct inode * inode, loff_t offset); extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); static inline __attribute__((always_inline)) int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access) { do { dump_bfin_trace_buffer(); printk("<0>" "BUG: failure at %s:%d/%s()!\n", "include/linux/mm.h", 807, __func__); panic("BUG!"); } while (0); return 0x0002; } extern int make_pages_present(unsigned long addr, unsigned long end); extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); extern int try_to_release_page(struct page * page, gfp_t gfp_mask); extern void do_invalidatepage(struct page *page, unsigned long offset); int __set_page_dirty_nobuffers(struct page *page); int __set_page_dirty_no_writeback(struct page *page); int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page); int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len); extern unsigned long do_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr); extern int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags); # 849 "include/linux/mm.h" int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages); # 867 "include/linux/mm.h" struct shrinker { int (*shrink)(int nr_to_scan, gfp_t gfp_mask); int seeks; struct list_head list; long nr; }; extern void register_shrinker(struct shrinker *); extern void unregister_shrinker(struct shrinker *); int vma_wants_writenotify(struct vm_area_struct *vma); extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl); static inline __attribute__((always_inline)) int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { return 0; } # 900 "include/linux/mm.h" int __pmd_alloc(struct mm_struct *mm, pgd_t *pud, unsigned long address); int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); # 946 "include/linux/mm.h" static inline __attribute__((always_inline)) void pgtable_page_ctor(struct page *page) { do {} while (0); __inc_zone_page_state(page, NR_PAGETABLE); } static inline __attribute__((always_inline)) void pgtable_page_dtor(struct page *page) { do {} while (0); __dec_zone_page_state(page, NR_PAGETABLE); } # 984 "include/linux/mm.h" extern void free_area_init(unsigned long * zones_size); extern void free_area_init_node(int nid, unsigned long * zones_size, unsigned long zone_start_pfn, unsigned long *zholes_size); # 1037 "include/linux/mm.h" extern void set_dma_reserve(unsigned long new_dma_reserve); extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, enum memmap_context); extern void setup_per_zone_pages_min(void); extern void mem_init(void); extern void show_mem(void); extern void si_meminfo(struct sysinfo * val); extern void si_meminfo_node(struct sysinfo *val, int nid); extern int after_bootmem; static inline __attribute__((always_inline)) void setup_per_cpu_pageset(void) {} void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *); void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *); struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma, struct prio_tree_iter *iter); static inline __attribute__((always_inline)) void vma_nonlinear_insert(struct vm_area_struct *vma, struct list_head *list) { vma->shared.vm_set.parent = ((void *)0); list_add_tail(&vma->shared.vm_set.list, list); } extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); extern void vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long pgoff, struct vm_area_struct *insert); extern struct vm_area_struct *vma_merge(struct mm_struct *, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *, struct file *, unsigned long, struct mempolicy *); extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); extern int split_vma(struct mm_struct *, struct vm_area_struct *, unsigned long addr, int new_below); extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, struct rb_node **, struct rb_node *); extern void unlink_file_vma(struct vm_area_struct *); extern struct vm_area_struct *copy_vma(struct vm_area_struct **, unsigned long addr, unsigned long len, unsigned long pgoff); extern void exit_mmap(struct mm_struct *); extern int mm_take_all_locks(struct mm_struct *mm); extern void mm_drop_all_locks(struct mm_struct *mm); extern void added_exe_file_vma(struct mm_struct *mm); extern void removed_exe_file_vma(struct mm_struct *mm); # 1105 "include/linux/mm.h" extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); extern int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long flags, struct page **pages); extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long pgoff); extern unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, unsigned long flags, unsigned int vm_flags, unsigned long pgoff, int accountable); static inline __attribute__((always_inline)) unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long offset) { unsigned long ret = -22; if ((offset + (((len)+((typeof(len))((1UL << 12))-1))&~((typeof(len))((1UL << 12))-1))) < offset) goto out; if (!(offset & ~(~((1UL << 12)-1)))) ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> 12); out: return ret; } extern int do_munmap(struct mm_struct *, unsigned long, size_t); extern unsigned long do_brk(unsigned long, unsigned long); extern unsigned long page_unuse(struct page *); extern void truncate_inode_pages(struct address_space *, loff_t); extern void truncate_inode_pages_range(struct address_space *, loff_t lstart, loff_t lend); extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); int write_one_page(struct page *page, int wait); int do_page_cache_readahead(struct address_space *mapping, struct file *filp, unsigned long offset, unsigned long nr_to_read); int force_page_cache_readahead(struct address_space *mapping, struct file *filp, unsigned long offset, unsigned long nr_to_read); void page_cache_sync_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, unsigned long offset, unsigned long size); void page_cache_async_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, struct page *pg, unsigned long offset, unsigned long size); unsigned long max_sane_readahead(unsigned long nr); extern int expand_stack(struct vm_area_struct *vma, unsigned long address); extern int expand_stack_downwards(struct vm_area_struct *vma, unsigned long address); extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, struct vm_area_struct **pprev); static inline __attribute__((always_inline)) struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) { struct vm_area_struct * vma = find_vma(mm,start_addr); if (vma && end_addr <= vma->vm_start) vma = ((void *)0); return vma; } static inline __attribute__((always_inline)) unsigned long vma_pages(struct vm_area_struct *vma) { return (vma->vm_end - vma->vm_start) >> 12; } pgprot_t vm_get_page_prot(unsigned long vm_flags); struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); int remap_pfn_range(struct vm_area_struct *, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t); int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); struct page *follow_page(struct vm_area_struct *, unsigned long address, unsigned int foll_flags); typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, void *data); extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn, void *data); void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long); # 1246 "include/linux/mm.h" static inline __attribute__((always_inline)) void kernel_map_pages(struct page *page, int numpages, int enable) {} static inline __attribute__((always_inline)) void enable_debug_pagealloc(void) { } extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk); int in_gate_area_no_task(unsigned long addr); int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, void *, size_t *, loff_t *); unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages); void drop_pagecache(void); const char * arch_vma_name(struct vm_area_struct *vma); void print_vma_addr(char *prefix, unsigned long rip); struct page *sparse_mem_map_populate(unsigned long pnum, int nid); pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); pgd_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); pmd_t *vmemmap_pmd_populate(pgd_t *pud, unsigned long addr, int node); pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); void *vmemmap_alloc_block(unsigned long size, int node); void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); int vmemmap_populate_basepages(struct page *start_page, unsigned long pages, int node); int vmemmap_populate(struct page *start_page, unsigned long pages, int node); void vmemmap_populate_print_last(void); # 8 "include/linux/pagemap.h" 2 # 1 "include/linux/highmem.h" 1 # 1 "include/linux/uaccess.h" 1 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h" 1 # 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h" # 1 "include/linux/sched.h" 1 # 44 "include/linux/sched.h" struct sched_param { int sched_priority; }; # 66 "include/linux/sched.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cputime.h" 1 # 1 "include/asm-generic/cputime.h" 1 typedef unsigned long cputime_t; # 24 "include/asm-generic/cputime.h" typedef u64 cputime64_t; # 5 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cputime.h" 2 # 67 "include/linux/sched.h" 2 # 1 "include/linux/sem.h" 1 # 1 "include/linux/ipc.h" 1 # 9 "include/linux/ipc.h" struct ipc_perm { __kernel_key_t key; __kernel_uid_t uid; __kernel_gid_t gid; __kernel_uid_t cuid; __kernel_gid_t cgid; __kernel_mode_t mode; unsigned short seq; }; # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ipcbuf.h" 1 # 16 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ipcbuf.h" struct ipc64_perm { __kernel_key_t key; __kernel_uid32_t uid; __kernel_gid32_t gid; __kernel_uid32_t cuid; __kernel_gid32_t cgid; __kernel_mode_t mode; unsigned short __pad1; unsigned short seq; unsigned short __pad2; unsigned long __unused1; unsigned long __unused2; }; # 22 "include/linux/ipc.h" 2 # 57 "include/linux/ipc.h" struct ipc_kludge { struct msgbuf *msgp; long msgtyp; }; # 88 "include/linux/ipc.h" struct kern_ipc_perm { spinlock_t lock; int deleted; int id; key_t key; uid_t uid; gid_t gid; uid_t cuid; gid_t cgid; mode_t mode; unsigned long seq; void *security; }; # 5 "include/linux/sem.h" 2 # 23 "include/linux/sem.h" struct semid_ds { struct ipc_perm sem_perm; __kernel_time_t sem_otime; __kernel_time_t sem_ctime; struct sem *sem_base; struct sem_queue *sem_pending; struct sem_queue **sem_pending_last; struct sem_undo *undo; unsigned short sem_nsems; }; # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/sembuf.h" 1 # 14 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/sembuf.h" struct semid64_ds { struct ipc64_perm sem_perm; __kernel_time_t sem_otime; unsigned long __unused1; __kernel_time_t sem_ctime; unsigned long __unused2; unsigned long sem_nsems; unsigned long __unused3; unsigned long __unused4; }; # 36 "include/linux/sem.h" 2 struct sembuf { unsigned short sem_num; short sem_op; short sem_flg; }; union semun { int val; struct semid_ds *buf; unsigned short *array; struct seminfo *__buf; void *__pad; }; struct seminfo { int semmap; int semmni; int semmns; int semmnu; int semmsl; int semopm; int semume; int semusz; int semvmx; int semaem; }; # 83 "include/linux/sem.h" struct task_struct; struct sem { int semval; int sempid; }; struct sem_array { struct kern_ipc_perm sem_perm; time_t sem_otime; time_t sem_ctime; struct sem *sem_base; struct list_head sem_pending; struct list_head list_id; unsigned long sem_nsems; }; struct sem_queue { struct list_head list; struct task_struct *sleeper; struct sem_undo *undo; int pid; int status; struct sembuf *sops; int nsops; int alter; }; struct sem_undo { struct list_head list_proc; struct rcu_head rcu; struct sem_undo_list *ulp; struct list_head list_id; int semid; short * semadj; }; struct sem_undo_list { atomic_t refcnt; spinlock_t lock; struct list_head list_proc; }; struct sysv_sem { struct sem_undo_list *undo_list; }; extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk); extern void exit_sem(struct task_struct *tsk); # 70 "include/linux/sched.h" 2 # 1 "include/linux/signal.h" 1 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/signal.h" 1 struct siginfo; # 17 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/signal.h" typedef unsigned long old_sigset_t; typedef struct { unsigned long sig[(64 / 32)]; } sigset_t; # 107 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/signal.h" # 1 "include/asm-generic/signal.h" 1 # 17 "include/asm-generic/signal.h" typedef void __signalfn_t(int); typedef __signalfn_t *__sighandler_t; typedef void __restorefn_t(void); typedef __restorefn_t *__sigrestore_t; # 108 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/signal.h" 2 struct old_sigaction { __sighandler_t sa_handler; old_sigset_t sa_mask; unsigned long sa_flags; void (*sa_restorer) (void); }; struct sigaction { __sighandler_t sa_handler; unsigned long sa_flags; void (*sa_restorer) (void); sigset_t sa_mask; }; struct k_sigaction { struct sigaction sa; }; # 145 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/signal.h" typedef struct sigaltstack { void *ss_sp; int ss_flags; size_t ss_size; } stack_t; # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/sigcontext.h" 1 struct sigcontext { unsigned long sc_r0; unsigned long sc_r1; unsigned long sc_r2; unsigned long sc_r3; unsigned long sc_r4; unsigned long sc_r5; unsigned long sc_r6; unsigned long sc_r7; unsigned long sc_p0; unsigned long sc_p1; unsigned long sc_p2; unsigned long sc_p3; unsigned long sc_p4; unsigned long sc_p5; unsigned long sc_usp; unsigned long sc_a0w; unsigned long sc_a1w; unsigned long sc_a0x; unsigned long sc_a1x; unsigned long sc_astat; unsigned long sc_rets; unsigned long sc_pc; unsigned long sc_retx; unsigned long sc_fp; unsigned long sc_i0; unsigned long sc_i1; unsigned long sc_i2; unsigned long sc_i3; unsigned long sc_m0; unsigned long sc_m1; unsigned long sc_m2; unsigned long sc_m3; unsigned long sc_l0; unsigned long sc_l1; unsigned long sc_l2; unsigned long sc_l3; unsigned long sc_b0; unsigned long sc_b1; unsigned long sc_b2; unsigned long sc_b3; unsigned long sc_lc0; unsigned long sc_lc1; unsigned long sc_lt0; unsigned long sc_lt1; unsigned long sc_lb0; unsigned long sc_lb1; unsigned long sc_seqstat; }; # 154 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/signal.h" 2 # 5 "include/linux/signal.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/siginfo.h" 1 # 1 "include/asm-generic/siginfo.h" 1 typedef union sigval { int sival_int; void *sival_ptr; } sigval_t; # 40 "include/asm-generic/siginfo.h" typedef struct siginfo { int si_signo; int si_errno; int si_code; union { int _pad[((128 - (3 * sizeof(int))) / sizeof(int))]; struct { pid_t _pid; uid_t _uid; } _kill; struct { timer_t _tid; int _overrun; char _pad[sizeof( uid_t) - sizeof(int)]; sigval_t _sigval; int _sys_private; } _timer; struct { pid_t _pid; uid_t _uid; sigval_t _sigval; } _rt; struct { pid_t _pid; uid_t _uid; int _status; clock_t _utime; clock_t _stime; } _sigchld; struct { void *_addr; } _sigfault; struct { long _band; int _fd; } _sigpoll; } _sifields; } siginfo_t; # 253 "include/asm-generic/siginfo.h" typedef struct sigevent { sigval_t sigev_value; int sigev_signo; int sigev_notify; union { int _pad[((64 - (sizeof(int) * 2 + sizeof(sigval_t))) / sizeof(int))]; int _tid; struct { void (*_function)(sigval_t); void *_attribute; } _sigev_thread; } _sigev_un; } sigevent_t; struct siginfo; void do_schedule_next_timer(struct siginfo *info); static inline __attribute__((always_inline)) void copy_siginfo(struct siginfo *to, struct siginfo *from) { if (from->si_code < 0) memcpy(to, from, sizeof(*to)); else memcpy(to, from, (3 * sizeof(int)) + sizeof(from->_sifields._sigchld)); } extern int copy_siginfo_to_user(struct siginfo *to, struct siginfo *from); # 6 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/siginfo.h" 2 # 6 "include/linux/signal.h" 2 # 14 "include/linux/signal.h" struct sigqueue { struct list_head list; int flags; siginfo_t info; struct user_struct *user; }; struct sigpending { struct list_head list; sigset_t signal; }; # 38 "include/linux/signal.h" static inline __attribute__((always_inline)) void sigaddset(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; if ((64 / 32) == 1) set->sig[0] |= 1UL << sig; else set->sig[sig / 32] |= 1UL << (sig % 32); } static inline __attribute__((always_inline)) void sigdelset(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; if ((64 / 32) == 1) set->sig[0] &= ~(1UL << sig); else set->sig[sig / 32] &= ~(1UL << (sig % 32)); } static inline __attribute__((always_inline)) int sigismember(sigset_t *set, int _sig) { unsigned long sig = _sig - 1; if ((64 / 32) == 1) return 1 & (set->sig[0] >> sig); else return 1 & (set->sig[sig / 32] >> (sig % 32)); } static inline __attribute__((always_inline)) int sigfindinword(unsigned long word) { return __ffs(~(~word)); } static inline __attribute__((always_inline)) int sigisemptyset(sigset_t *set) { extern void _NSIG_WORDS_is_unsupported_size(void); switch ((64 / 32)) { case 4: return (set->sig[3] | set->sig[2] | set->sig[1] | set->sig[0]) == 0; case 2: return (set->sig[1] | set->sig[0]) == 0; case 1: return set->sig[0] == 0; default: _NSIG_WORDS_is_unsupported_size(); return 0; } } # 119 "include/linux/signal.h" static inline __attribute__((always_inline)) void sigorsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) | (b3)); r->sig[2] = ((a2) | (b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) | (b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) | (b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } } static inline __attribute__((always_inline)) void sigandsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & (b3)); r->sig[2] = ((a2) & (b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & (b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & (b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } } static inline __attribute__((always_inline)) void signandsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & ~(b3)); r->sig[2] = ((a2) & ~(b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & ~(b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & ~(b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } } # 149 "include/linux/signal.h" static inline __attribute__((always_inline)) void signotset(sigset_t *set) { extern void _NSIG_WORDS_is_unsupported_size(void); switch ((64 / 32)) { case 4: set->sig[3] = (~(set->sig[3])); set->sig[2] = (~(set->sig[2])); case 2: set->sig[1] = (~(set->sig[1])); case 1: set->sig[0] = (~(set->sig[0])); break; default: _NSIG_WORDS_is_unsupported_size(); } } static inline __attribute__((always_inline)) void sigemptyset(sigset_t *set) { switch ((64 / 32)) { default: memset(set, 0, sizeof(sigset_t)); break; case 2: set->sig[1] = 0; case 1: set->sig[0] = 0; break; } } static inline __attribute__((always_inline)) void sigfillset(sigset_t *set) { switch ((64 / 32)) { default: memset(set, -1, sizeof(sigset_t)); break; case 2: set->sig[1] = -1; case 1: set->sig[0] = -1; break; } } static inline __attribute__((always_inline)) void sigaddsetmask(sigset_t *set, unsigned long mask) { set->sig[0] |= mask; } static inline __attribute__((always_inline)) void sigdelsetmask(sigset_t *set, unsigned long mask) { set->sig[0] &= ~mask; } static inline __attribute__((always_inline)) int sigtestsetmask(sigset_t *set, unsigned long mask) { return (set->sig[0] & mask) != 0; } static inline __attribute__((always_inline)) void siginitset(sigset_t *set, unsigned long mask) { set->sig[0] = mask; switch ((64 / 32)) { default: memset(&set->sig[1], 0, sizeof(long)*((64 / 32)-1)); break; case 2: set->sig[1] = 0; case 1: ; } } static inline __attribute__((always_inline)) void siginitsetinv(sigset_t *set, unsigned long mask) { set->sig[0] = ~mask; switch ((64 / 32)) { default: memset(&set->sig[1], -1, sizeof(long)*((64 / 32)-1)); break; case 2: set->sig[1] = -1; case 1: ; } } static inline __attribute__((always_inline)) void init_sigpending(struct sigpending *sig) { sigemptyset(&sig->signal); INIT_LIST_HEAD(&sig->list); } extern void flush_sigqueue(struct sigpending *queue); static inline __attribute__((always_inline)) int valid_signal(unsigned long sig) { return sig <= 64 ? 1 : 0; } extern int next_signal(struct sigpending *pending, sigset_t *mask); extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p); extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); extern long do_sigpending(void *, unsigned long); extern int sigprocmask(int, sigset_t *, sigset_t *); extern int show_unhandled_signals; struct pt_regs; extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie); extern void exit_signals(struct task_struct *tsk); extern struct kmem_cache *sighand_cachep; int unhandled_signal(struct task_struct *tsk, int sig); # 373 "include/linux/signal.h" void signals_init(void); # 71 "include/linux/sched.h" 2 # 1 "include/linux/fs_struct.h" 1 struct fs_struct { atomic_t count; rwlock_t lock; int umask; struct path root, pwd; }; extern struct kmem_cache *fs_cachep; extern void exit_fs(struct task_struct *); extern void set_fs_root(struct fs_struct *, struct path *); extern void set_fs_pwd(struct fs_struct *, struct path *); extern struct fs_struct *copy_fs_struct(struct fs_struct *); extern void put_fs_struct(struct fs_struct *); # 72 "include/linux/sched.h" 2 # 1 "include/linux/proportions.h" 1 # 12 "include/linux/proportions.h" # 1 "include/linux/percpu_counter.h" 1 # 78 "include/linux/percpu_counter.h" struct percpu_counter { s64 count; }; static inline __attribute__((always_inline)) int percpu_counter_init(struct percpu_counter *fbc, s64 amount) { fbc->count = amount; return 0; } static inline __attribute__((always_inline)) void percpu_counter_destroy(struct percpu_counter *fbc) { } static inline __attribute__((always_inline)) void percpu_counter_set(struct percpu_counter *fbc, s64 amount) { fbc->count = amount; } static inline __attribute__((always_inline)) void percpu_counter_add(struct percpu_counter *fbc, s64 amount) { do { } while (0); fbc->count += amount; do { } while (0); } static inline __attribute__((always_inline)) s64 percpu_counter_read(struct percpu_counter *fbc) { return fbc->count; } static inline __attribute__((always_inline)) s64 percpu_counter_read_positive(struct percpu_counter *fbc) { return fbc->count; } static inline __attribute__((always_inline)) s64 percpu_counter_sum_positive(struct percpu_counter *fbc) { return percpu_counter_read_positive(fbc); } static inline __attribute__((always_inline)) s64 percpu_counter_sum(struct percpu_counter *fbc) { return percpu_counter_read(fbc); } static inline __attribute__((always_inline)) void percpu_counter_inc(struct percpu_counter *fbc) { percpu_counter_add(fbc, 1); } static inline __attribute__((always_inline)) void percpu_counter_dec(struct percpu_counter *fbc) { percpu_counter_add(fbc, -1); } static inline __attribute__((always_inline)) void percpu_counter_sub(struct percpu_counter *fbc, s64 amount) { percpu_counter_add(fbc, -amount); } # 13 "include/linux/proportions.h" 2 struct prop_global { int shift; struct percpu_counter events; }; struct prop_descriptor { int index; struct prop_global pg[2]; struct mutex mutex; }; int prop_descriptor_init(struct prop_descriptor *pd, int shift); void prop_change_shift(struct prop_descriptor *pd, int new_shift); struct prop_local_percpu { struct percpu_counter events; int shift; unsigned long period; spinlock_t lock; }; int prop_local_init_percpu(struct prop_local_percpu *pl); void prop_local_destroy_percpu(struct prop_local_percpu *pl); void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl); void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl, long *numerator, long *denominator); static inline __attribute__((always_inline)) void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl) { unsigned long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); __prop_inc_percpu(pd, pl); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); } # 89 "include/linux/proportions.h" void __prop_inc_percpu_max(struct prop_descriptor *pd, struct prop_local_percpu *pl, long frac); struct prop_local_single { unsigned long events; unsigned long period; int shift; spinlock_t lock; }; int prop_local_init_single(struct prop_local_single *pl); void prop_local_destroy_single(struct prop_local_single *pl); void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl); void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl, long *numerator, long *denominator); static inline __attribute__((always_inline)) void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl) { unsigned long flags; __asm__ __volatile__( "cli %0;" "sti %1;" : "=&d" (flags) : "d" (0x3F) ); __prop_inc_single(pd, pl); do { if ((((flags) & ~0x3f) != 0)) __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); } while (0); } # 78 "include/linux/sched.h" 2 # 1 "include/linux/seccomp.h" 1 # 24 "include/linux/seccomp.h" typedef struct { } seccomp_t; static inline __attribute__((always_inline)) long prctl_get_seccomp(void) { return -22; } static inline __attribute__((always_inline)) long prctl_set_seccomp(unsigned long arg2) { return -22; } # 79 "include/linux/sched.h" 2 # 1 "include/linux/rtmutex.h" 1 # 16 "include/linux/rtmutex.h" # 1 "include/linux/plist.h" 1 # 80 "include/linux/plist.h" struct plist_head { struct list_head prio_list; struct list_head node_list; }; struct plist_node { int prio; struct plist_head plist; }; # 127 "include/linux/plist.h" static inline __attribute__((always_inline)) void plist_head_init(struct plist_head *head, spinlock_t *lock) { INIT_LIST_HEAD(&head->prio_list); INIT_LIST_HEAD(&head->node_list); } static inline __attribute__((always_inline)) void plist_node_init(struct plist_node *node, int prio) { node->prio = prio; plist_head_init(&node->plist, ((void *)0)); } extern void plist_add(struct plist_node *node, struct plist_head *head); extern void plist_del(struct plist_node *node, struct plist_head *head); # 195 "include/linux/plist.h" static inline __attribute__((always_inline)) int plist_head_empty(const struct plist_head *head) { return list_empty(&head->node_list); } static inline __attribute__((always_inline)) int plist_node_empty(const struct plist_node *node) { return plist_head_empty(&node->plist); } # 234 "include/linux/plist.h" static inline __attribute__((always_inline)) struct plist_node* plist_first(const struct plist_head *head) { return ({ const typeof( ((struct plist_node *)0)->plist.node_list ) *__mptr = (head->node_list.next); (struct plist_node *)( (char *)__mptr - __builtin_offsetof(struct plist_node,plist.node_list) );}); } # 17 "include/linux/rtmutex.h" 2 # 26 "include/linux/rtmutex.h" struct rt_mutex { spinlock_t wait_lock; struct plist_head wait_list; struct task_struct *owner; }; struct rt_mutex_waiter; struct hrtimer_sleeper; static inline __attribute__((always_inline)) int rt_mutex_debug_check_no_locks_freed(const void *from, unsigned long len) { return 0; } # 80 "include/linux/rtmutex.h" static inline __attribute__((always_inline)) int rt_mutex_is_locked(struct rt_mutex *lock) { return lock->owner != ((void *)0); } extern void __rt_mutex_init(struct rt_mutex *lock, const char *name); extern void rt_mutex_destroy(struct rt_mutex *lock); extern void rt_mutex_lock(struct rt_mutex *lock); extern int rt_mutex_lock_interruptible(struct rt_mutex *lock, int detect_deadlock); extern int rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout, int detect_deadlock); extern int rt_mutex_trylock(struct rt_mutex *lock); extern void rt_mutex_unlock(struct rt_mutex *lock); # 81 "include/linux/sched.h" 2 # 1 "include/linux/resource.h" 1 struct task_struct; # 24 "include/linux/resource.h" struct rusage { struct timeval ru_utime; struct timeval ru_stime; long ru_maxrss; long ru_ixrss; long ru_idrss; long ru_isrss; long ru_minflt; long ru_majflt; long ru_nswap; long ru_inblock; long ru_oublock; long ru_msgsnd; long ru_msgrcv; long ru_nsignals; long ru_nvcsw; long ru_nivcsw; }; struct rlimit { unsigned long rlim_cur; unsigned long rlim_max; }; # 71 "include/linux/resource.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/resource.h" 1 # 1 "include/asm-generic/resource.h" 1 # 5 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/resource.h" 2 # 72 "include/linux/resource.h" 2 int getrusage(struct task_struct *p, int who, struct rusage *ru); # 85 "include/linux/sched.h" 2 # 1 "include/linux/hrtimer.h" 1 # 26 "include/linux/hrtimer.h" struct hrtimer_clock_base; struct hrtimer_cpu_base; enum hrtimer_mode { HRTIMER_MODE_ABS, HRTIMER_MODE_REL, }; enum hrtimer_restart { HRTIMER_NORESTART, HRTIMER_RESTART, }; # 59 "include/linux/hrtimer.h" enum hrtimer_cb_mode { HRTIMER_CB_SOFTIRQ, HRTIMER_CB_IRQSAFE_PERCPU, HRTIMER_CB_IRQSAFE_UNLOCKED, }; # 124 "include/linux/hrtimer.h" struct hrtimer { struct rb_node node; ktime_t _expires; ktime_t _softexpires; enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; struct list_head cb_entry; enum hrtimer_cb_mode cb_mode; int start_pid; void *start_site; char start_comm[16]; }; # 147 "include/linux/hrtimer.h" struct hrtimer_sleeper { struct hrtimer timer; struct task_struct *task; }; # 164 "include/linux/hrtimer.h" struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; clockid_t index; struct rb_root active; struct rb_node *first; ktime_t resolution; ktime_t (*get_time)(void); ktime_t softirq_time; }; # 196 "include/linux/hrtimer.h" struct hrtimer_cpu_base { spinlock_t lock; struct hrtimer_clock_base clock_base[2]; struct list_head cb_pending; }; static inline __attribute__((always_inline)) void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) { timer->_expires = time; timer->_softexpires = time; } static inline __attribute__((always_inline)) void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta) { timer->_softexpires = time; timer->_expires = ktime_add_safe(time, delta); } static inline __attribute__((always_inline)) void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta) { timer->_softexpires = time; timer->_expires = ktime_add_safe(time, ns_to_ktime(delta)); } static inline __attribute__((always_inline)) void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) { timer->_expires.tv64 = tv64; timer->_softexpires.tv64 = tv64; } static inline __attribute__((always_inline)) void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) { timer->_expires = ktime_add_safe(timer->_expires, time); timer->_softexpires = ktime_add_safe(timer->_softexpires, time); } static inline __attribute__((always_inline)) void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns) { timer->_expires = ktime_add_ns(timer->_expires, ns); timer->_softexpires = ktime_add_ns(timer->_softexpires, ns); } static inline __attribute__((always_inline)) ktime_t hrtimer_get_expires(const struct hrtimer *timer) { return timer->_expires; } static inline __attribute__((always_inline)) ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) { return timer->_softexpires; } static inline __attribute__((always_inline)) s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) { return timer->_expires.tv64; } static inline __attribute__((always_inline)) s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) { return timer->_softexpires.tv64; } static inline __attribute__((always_inline)) s64 hrtimer_get_expires_ns(const struct hrtimer *timer) { return ktime_to_ns(timer->_expires); } static inline __attribute__((always_inline)) ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) { return ktime_sub(timer->_expires, timer->base->get_time()); } # 315 "include/linux/hrtimer.h" static inline __attribute__((always_inline)) void clock_was_set(void) { } static inline __attribute__((always_inline)) void hrtimer_peek_ahead_timers(void) { } static inline __attribute__((always_inline)) void hres_timers_resume(void) { } static inline __attribute__((always_inline)) ktime_t hrtimer_cb_get_time(struct hrtimer *timer) { return timer->base->softirq_time; } static inline __attribute__((always_inline)) int hrtimer_is_hres_active(struct hrtimer *timer) { return 0; } extern ktime_t ktime_get(void); extern ktime_t ktime_get_real(void); extern __typeof__(struct tick_device) per_cpu__tick_cpu_device; extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock, enum hrtimer_mode mode); static inline __attribute__((always_inline)) void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock, enum hrtimer_mode mode) { hrtimer_init(timer, which_clock, mode); } static inline __attribute__((always_inline)) void destroy_hrtimer_on_stack(struct hrtimer *timer) { } extern int hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode); extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long range_ns, const enum hrtimer_mode mode); extern int hrtimer_cancel(struct hrtimer *timer); extern int hrtimer_try_to_cancel(struct hrtimer *timer); static inline __attribute__((always_inline)) int hrtimer_start_expires(struct hrtimer *timer, enum hrtimer_mode mode) { unsigned long delta; ktime_t soft, hard; soft = hrtimer_get_softexpires(timer); hard = hrtimer_get_expires(timer); delta = ktime_to_ns(ktime_sub(hard, soft)); return hrtimer_start_range_ns(timer, soft, delta, mode); } static inline __attribute__((always_inline)) int hrtimer_restart(struct hrtimer *timer) { return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); extern ktime_t hrtimer_get_next_event(void); static inline __attribute__((always_inline)) int hrtimer_active(const struct hrtimer *timer) { return timer->state != 0x00; } static inline __attribute__((always_inline)) int hrtimer_is_queued(struct hrtimer *timer) { return timer->state & (0x01 | 0x04); } static inline __attribute__((always_inline)) int hrtimer_callback_running(struct hrtimer *timer) { return timer->state & 0x02; } extern u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); static inline __attribute__((always_inline)) u64 hrtimer_forward_now(struct hrtimer *timer, ktime_t interval) { return hrtimer_forward(timer, timer->base->get_time(), interval); } extern long hrtimer_nanosleep(struct timespec *rqtp, struct timespec *rmtp, const enum hrtimer_mode mode, const clockid_t clockid); extern long hrtimer_nanosleep_restart(struct restart_block *restart_block); extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *tsk); extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, const enum hrtimer_mode mode); extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); extern void hrtimer_run_queues(void); extern void hrtimer_run_pending(void); extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) hrtimers_init(void); extern u64 ktime_divns(const ktime_t kt, s64 div); extern void sysrq_timer_list_show(void); extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf, void *timerf, char *comm, unsigned int timer_flag); static inline __attribute__((always_inline)) void timer_stats_account_hrtimer(struct hrtimer *timer) { timer_stats_update_stats(timer, timer->start_pid, timer->start_site, timer->function, timer->start_comm, 0); } extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr); static inline __attribute__((always_inline)) void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) { __timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0)); } static inline __attribute__((always_inline)) void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer) { timer->start_site = ((void *)0); } # 87 "include/linux/sched.h" 2 # 1 "include/linux/task_io_accounting.h" 1 # 11 "include/linux/task_io_accounting.h" struct task_io_accounting { # 45 "include/linux/task_io_accounting.h" }; # 88 "include/linux/sched.h" 2 # 1 "include/linux/latencytop.h" 1 # 33 "include/linux/latencytop.h" static inline __attribute__((always_inline)) void account_scheduler_latency(struct task_struct *task, int usecs, int inter) { } static inline __attribute__((always_inline)) void clear_all_latency_tracing(struct task_struct *p) { } # 90 "include/linux/sched.h" 2 # 1 "include/linux/cred.h" 1 # 91 "include/linux/sched.h" 2 struct mem_cgroup; struct exec_domain; struct futex_pi_state; struct robust_list_head; struct bio; # 116 "include/linux/sched.h" extern unsigned long avenrun[]; # 130 "include/linux/sched.h" extern unsigned long total_forks; extern int nr_threads; extern __typeof__(unsigned long) per_cpu__process_counts; extern int nr_processes(void); extern unsigned long nr_running(void); extern unsigned long nr_uninterruptible(void); extern unsigned long nr_active(void); extern unsigned long nr_iowait(void); struct seq_file; struct cfs_rq; struct task_group; extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); extern void proc_sched_set_task(struct task_struct *p); extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); # 161 "include/linux/sched.h" extern unsigned long long time_sync_thresh; # 238 "include/linux/sched.h" extern rwlock_t tasklist_lock; extern spinlock_t mmlist_lock; struct task_struct; extern void sched_init(void); extern void sched_init_smp(void); extern void schedule_tail(struct task_struct *prev); extern void init_idle(struct task_struct *idle, int cpu); extern void init_idle_bootup_task(struct task_struct *idle); extern int runqueue_is_locked(void); extern void task_rq_unlock_wait(struct task_struct *p); extern cpumask_t nohz_cpu_mask; static inline __attribute__((always_inline)) int select_nohz_load_balancer(int cpu) { return 0; } extern unsigned long rt_needs_cpu(int cpu); extern void show_state_filter(unsigned long state_filter); static inline __attribute__((always_inline)) void show_state(void) { show_state_filter(0); } extern void show_regs(struct pt_regs *); extern void show_stack(struct task_struct *task, unsigned long *sp); void io_schedule(void); long io_schedule_timeout(long timeout); extern void cpu_init (void); extern void trap_init(void); extern void account_process_tick(struct task_struct *task, int user); extern void update_process_times(int user); extern void scheduler_tick(void); extern void sched_show_task(struct task_struct *p); # 304 "include/linux/sched.h" static inline __attribute__((always_inline)) void softlockup_tick(void) { } static inline __attribute__((always_inline)) void spawn_softlockup_task(void) { } static inline __attribute__((always_inline)) void touch_softlockup_watchdog(void) { } static inline __attribute__((always_inline)) void touch_all_softlockup_watchdogs(void) { } extern char __sched_text_start[], __sched_text_end[]; extern int in_sched_functions(unsigned long addr); extern signed long schedule_timeout(signed long timeout); extern signed long schedule_timeout_interruptible(signed long timeout); extern signed long schedule_timeout_killable(signed long timeout); extern signed long schedule_timeout_uninterruptible(signed long timeout); void schedule(void); struct nsproxy; struct user_namespace; extern int sysctl_max_map_count; # 1 "include/linux/aio.h" 1 # 1 "include/linux/aio_abi.h" 1 # 32 "include/linux/aio_abi.h" typedef unsigned long aio_context_t; enum { IOCB_CMD_PREAD = 0, IOCB_CMD_PWRITE = 1, IOCB_CMD_FSYNC = 2, IOCB_CMD_FDSYNC = 3, IOCB_CMD_NOOP = 6, IOCB_CMD_PREADV = 7, IOCB_CMD_PWRITEV = 8, }; # 57 "include/linux/aio_abi.h" struct io_event { __u64 data; __u64 obj; __s64 res; __s64 res2; }; # 78 "include/linux/aio_abi.h" struct iocb { __u64 aio_data; __u32 aio_key, aio_reserved1; __u16 aio_lio_opcode; __s16 aio_reqprio; __u32 aio_fildes; __u64 aio_buf; __u64 aio_nbytes; __s64 aio_offset; __u64 aio_reserved2; __u32 aio_flags; __u32 aio_resfd; }; # 7 "include/linux/aio.h" 2 # 1 "include/linux/uio.h" 1 # 16 "include/linux/uio.h" struct iovec { void *iov_base; __kernel_size_t iov_len; }; struct kvec { void *iov_base; size_t iov_len; }; # 45 "include/linux/uio.h" static inline __attribute__((always_inline)) size_t iov_length(const struct iovec *iov, unsigned long nr_segs) { unsigned long seg; size_t ret = 0; for (seg = 0; seg < nr_segs; seg++) ret += iov[seg].iov_len; return ret; } unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to); # 8 "include/linux/aio.h" 2 struct kioctx; # 86 "include/linux/aio.h" struct kiocb { struct list_head ki_run_list; unsigned long ki_flags; int ki_users; unsigned ki_key; struct file *ki_filp; struct kioctx *ki_ctx; int (*ki_cancel)(struct kiocb *, struct io_event *); ssize_t (*ki_retry)(struct kiocb *); void (*ki_dtor)(struct kiocb *); union { void *user; struct task_struct *tsk; } ki_obj; __u64 ki_user_data; wait_queue_t ki_wait; loff_t ki_pos; void *private; unsigned short ki_opcode; size_t ki_nbytes; char *ki_buf; size_t ki_left; struct iovec ki_inline_vec; struct iovec *ki_iovec; unsigned long ki_nr_segs; unsigned long ki_cur_seg; struct list_head ki_list; struct file *ki_eventfd; }; # 148 "include/linux/aio.h" struct aio_ring { unsigned id; unsigned nr; unsigned head; unsigned tail; unsigned magic; unsigned compat_features; unsigned incompat_features; unsigned header_length; struct io_event io_events[0]; }; struct aio_ring_info { unsigned long mmap_base; unsigned long mmap_size; struct page **ring_pages; spinlock_t ring_lock; long nr_pages; unsigned nr, tail; struct page *internal_pages[8]; }; struct kioctx { atomic_t users; int dead; struct mm_struct *mm; unsigned long user_id; struct kioctx *next; wait_queue_head_t wait; spinlock_t ctx_lock; int reqs_active; struct list_head active_reqs; struct list_head run_list; unsigned max_reqs; struct aio_ring_info ring_info; struct delayed_work wq; }; extern unsigned aio_max_size; # 215 "include/linux/aio.h" static inline __attribute__((always_inline)) ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; } static inline __attribute__((always_inline)) int aio_put_req(struct kiocb *iocb) { return 0; } static inline __attribute__((always_inline)) void kick_iocb(struct kiocb *iocb) { } static inline __attribute__((always_inline)) int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; } struct mm_struct; static inline __attribute__((always_inline)) void exit_aio(struct mm_struct *mm) { } static inline __attribute__((always_inline)) struct kiocb *list_kiocb(struct list_head *h) { return ({ const typeof( ((struct kiocb *)0)->ki_list ) *__mptr = (h); (struct kiocb *)( (char *)__mptr - __builtin_offsetof(struct kiocb,ki_list) );}); } extern unsigned long aio_nr; extern unsigned long aio_max_nr; # 344 "include/linux/sched.h" 2 extern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); extern unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); extern void arch_unmap_area(struct mm_struct *, unsigned long); extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); # 391 "include/linux/sched.h" extern void set_dumpable(struct mm_struct *mm, int value); extern int get_dumpable(struct mm_struct *mm); # 422 "include/linux/sched.h" struct sighand_struct { atomic_t count; struct k_sigaction action[64]; spinlock_t siglock; wait_queue_head_t signalfd_wqh; }; struct pacct_struct { int ac_flag; long ac_exitcode; unsigned long ac_mem; cputime_t ac_utime, ac_stime; unsigned long ac_minflt, ac_majflt; }; # 448 "include/linux/sched.h" struct task_cputime { cputime_t utime; cputime_t stime; unsigned long long sum_exec_runtime; }; # 466 "include/linux/sched.h" struct thread_group_cputime { struct task_cputime *totals; }; # 477 "include/linux/sched.h" struct signal_struct { atomic_t count; atomic_t live; wait_queue_head_t wait_chldexit; struct task_struct *curr_target; struct sigpending shared_pending; int group_exit_code; int notify_count; struct task_struct *group_exit_task; int group_stop_count; unsigned int flags; struct list_head posix_timers; struct hrtimer real_timer; struct pid *leader_pid; ktime_t it_real_incr; cputime_t it_prof_expires, it_virt_expires; cputime_t it_prof_incr, it_virt_incr; struct thread_group_cputime cputime; struct task_cputime cputime_expires; struct list_head cpu_timers[3]; # 533 "include/linux/sched.h" union { pid_t pgrp __attribute__((deprecated)); pid_t __pgrp; }; struct pid *tty_old_pgrp; union { pid_t session __attribute__((deprecated)); pid_t __session; }; int leader; struct tty_struct *tty; cputime_t cutime, cstime; cputime_t gtime; cputime_t cgtime; unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; unsigned long inblock, oublock, cinblock, coublock; struct task_io_accounting ioac; # 573 "include/linux/sched.h" struct rlimit rlim[16]; # 582 "include/linux/sched.h" struct pacct_struct pacct; # 591 "include/linux/sched.h" }; # 615 "include/linux/sched.h" static inline __attribute__((always_inline)) int signal_group_exit(const struct signal_struct *sig) { return (sig->flags & 0x00000008) || (sig->group_exit_task != ((void *)0)); } struct user_struct { atomic_t __count; atomic_t processes; atomic_t files; atomic_t sigpending; atomic_t inotify_watches; atomic_t inotify_devs; atomic_t epoll_devs; atomic_t epoll_watches; unsigned long locked_shm; struct hlist_node uidhash_node; uid_t uid; # 659 "include/linux/sched.h" }; extern int uids_sysfs_init(void); extern struct user_struct *find_user(uid_t); extern struct user_struct root_user; struct backing_dev_info; struct reclaim_state; struct sched_info { unsigned long pcount; unsigned long long cpu_time, run_delay; unsigned long long last_arrival, last_queued; unsigned int bkl_count; }; # 722 "include/linux/sched.h" static inline __attribute__((always_inline)) int sched_info_on(void) { return 1; } enum cpu_idle_type { CPU_IDLE, CPU_NOT_IDLE, CPU_NEWLY_IDLE, CPU_MAX_IDLE_TYPES }; # 881 "include/linux/sched.h" struct sched_domain_attr; static inline __attribute__((always_inline)) void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, struct sched_domain_attr *dattr_new) { } struct io_context; struct group_info { int ngroups; atomic_t usage; gid_t small_block[32]; int nblocks; gid_t *blocks[0]; }; # 916 "include/linux/sched.h" extern struct group_info *groups_alloc(int gidsetsize); extern void groups_free(struct group_info *group_info); extern int set_current_groups(struct group_info *group_info); extern int groups_search(struct group_info *group_info, gid_t grp); static inline __attribute__((always_inline)) void prefetch_stack(struct task_struct *t) { } struct audit_context; struct mempolicy; struct pipe_inode_info; struct uts_namespace; struct rq; struct sched_domain; struct sched_class { const struct sched_class *next; void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); void (*yield_task) (struct rq *rq); void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync); struct task_struct * (*pick_next_task) (struct rq *rq); void (*put_prev_task) (struct rq *rq, struct task_struct *p); # 972 "include/linux/sched.h" void (*set_curr_task) (struct rq *rq); void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); void (*task_new) (struct rq *rq, struct task_struct *p); void (*switched_from) (struct rq *this_rq, struct task_struct *task, int running); void (*switched_to) (struct rq *this_rq, struct task_struct *task, int running); void (*prio_changed) (struct rq *this_rq, struct task_struct *task, int oldprio, int running); }; struct load_weight { unsigned long weight, inv_weight; }; # 1002 "include/linux/sched.h" struct sched_entity { struct load_weight load; struct rb_node run_node; struct list_head group_node; unsigned int on_rq; u64 exec_start; u64 sum_exec_runtime; u64 vruntime; u64 prev_sum_exec_runtime; u64 last_wakeup; u64 avg_overlap; u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; u64 sleep_start; u64 sleep_max; s64 sum_sleep_runtime; u64 block_start; u64 block_max; u64 exec_max; u64 slice_max; u64 nr_migrations; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; u64 nr_failed_migrations_hot; u64 nr_forced_migrations; u64 nr_forced2_migrations; u64 nr_wakeups; u64 nr_wakeups_sync; u64 nr_wakeups_migrate; u64 nr_wakeups_local; u64 nr_wakeups_remote; u64 nr_wakeups_affine; u64 nr_wakeups_affine_attempts; u64 nr_wakeups_passive; u64 nr_wakeups_idle; # 1057 "include/linux/sched.h" }; struct sched_rt_entity { struct list_head run_list; unsigned long timeout; unsigned int time_slice; int nr_cpus_allowed; struct sched_rt_entity *back; }; struct task_struct { volatile long state; void *stack; atomic_t usage; unsigned int flags; unsigned int ptrace; int lock_depth; int prio, static_prio, normal_prio; unsigned int rt_priority; const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; # 1109 "include/linux/sched.h" unsigned char fpu_counter; s8 oomkilladj; unsigned int policy; cpumask_t cpus_allowed; struct sched_info sched_info; struct list_head tasks; struct mm_struct *mm, *active_mm; struct linux_binfmt *binfmt; int exit_state; int exit_code, exit_signal; int pdeath_signal; unsigned int personality; unsigned did_exec:1; pid_t pid; pid_t tgid; # 1151 "include/linux/sched.h" struct task_struct *real_parent; struct task_struct *parent; struct list_head children; struct list_head sibling; struct task_struct *group_leader; struct list_head ptraced; struct list_head ptrace_entry; struct pid_link pids[PIDTYPE_MAX]; struct list_head thread_group; struct completion *vfork_done; int *set_child_tid; int *clear_child_tid; cputime_t utime, stime, utimescaled, stimescaled; cputime_t gtime; cputime_t prev_utime, prev_stime; unsigned long nvcsw, nivcsw; struct timespec start_time; struct timespec real_start_time; unsigned long min_flt, maj_flt; struct task_cputime cputime_expires; struct list_head cpu_timers[3]; uid_t uid,euid,suid,fsuid; gid_t gid,egid,sgid,fsgid; struct group_info *group_info; kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; struct user_struct *user; unsigned securebits; char comm[16]; int link_count, total_link_count; struct sysv_sem sysvsem; struct thread_struct thread; struct fs_struct *fs; struct files_struct *files; struct nsproxy *nsproxy; struct signal_struct *signal; struct sighand_struct *sighand; sigset_t blocked, real_blocked; sigset_t saved_sigmask; struct sigpending pending; unsigned long sas_ss_sp; size_t sas_ss_size; int (*notifier)(void *priv); void *notifier_data; sigset_t *notifier_mask; struct audit_context *audit_context; seccomp_t seccomp; u32 parent_exec_id; u32 self_exec_id; spinlock_t alloc_lock; spinlock_t pi_lock; struct plist_head pi_waiters; struct rt_mutex_waiter *pi_blocked_on; struct mutex_waiter *blocked_on; # 1290 "include/linux/sched.h" void *journal_info; struct bio *bio_list, **bio_tail; struct reclaim_state *reclaim_state; struct backing_dev_info *backing_dev_info; struct io_context *io_context; unsigned long ptrace_message; siginfo_t *last_siginfo; struct task_io_accounting ioac; # 1317 "include/linux/sched.h" struct css_set *cgroups; struct list_head cg_list; struct robust_list_head *robust_list; struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; atomic_t fs_excl; struct rcu_head rcu; struct pipe_inode_info *splice_pipe; int make_it_fail; struct prop_local_single dirties; # 1355 "include/linux/sched.h" unsigned long timer_slack_ns; unsigned long default_timer_slack_ns; struct list_head *scm_work_list; }; # 1380 "include/linux/sched.h" static inline __attribute__((always_inline)) int rt_prio(int prio) { if (__builtin_expect(!!(prio < 100), 0)) return 1; return 0; } static inline __attribute__((always_inline)) int rt_task(struct task_struct *p) { return rt_prio(p->prio); } static inline __attribute__((always_inline)) void set_task_session(struct task_struct *tsk, pid_t session) { tsk->signal->__session = session; } static inline __attribute__((always_inline)) void set_task_pgrp(struct task_struct *tsk, pid_t pgrp) { tsk->signal->__pgrp = pgrp; } static inline __attribute__((always_inline)) struct pid *task_pid(struct task_struct *task) { return task->pids[PIDTYPE_PID].pid; } static inline __attribute__((always_inline)) struct pid *task_tgid(struct task_struct *task) { return task->group_leader->pids[PIDTYPE_PID].pid; } static inline __attribute__((always_inline)) struct pid *task_pgrp(struct task_struct *task) { return task->group_leader->pids[PIDTYPE_PGID].pid; } static inline __attribute__((always_inline)) struct pid *task_session(struct task_struct *task) { return task->group_leader->pids[PIDTYPE_SID].pid; } struct pid_namespace; # 1438 "include/linux/sched.h" static inline __attribute__((always_inline)) pid_t task_pid_nr(struct task_struct *tsk) { return tsk->pid; } pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); static inline __attribute__((always_inline)) pid_t task_pid_vnr(struct task_struct *tsk) { return pid_vnr(task_pid(tsk)); } static inline __attribute__((always_inline)) pid_t task_tgid_nr(struct task_struct *tsk) { return tsk->tgid; } pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); static inline __attribute__((always_inline)) pid_t task_tgid_vnr(struct task_struct *tsk) { return pid_vnr(task_tgid(tsk)); } static inline __attribute__((always_inline)) pid_t task_pgrp_nr(struct task_struct *tsk) { return tsk->signal->__pgrp; } pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); static inline __attribute__((always_inline)) pid_t task_pgrp_vnr(struct task_struct *tsk) { return pid_vnr(task_pgrp(tsk)); } static inline __attribute__((always_inline)) pid_t task_session_nr(struct task_struct *tsk) { return tsk->signal->__session; } pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); static inline __attribute__((always_inline)) pid_t task_session_vnr(struct task_struct *tsk) { return pid_vnr(task_session(tsk)); } # 1498 "include/linux/sched.h" static inline __attribute__((always_inline)) int pid_alive(struct task_struct *p) { return p->pids[PIDTYPE_PID].pid != ((void *)0); } static inline __attribute__((always_inline)) int is_global_init(struct task_struct *tsk) { return tsk->pid == 1; } extern int is_container_init(struct task_struct *tsk); extern struct pid *cad_pid; extern void free_task(struct task_struct *tsk); extern void __put_task_struct(struct task_struct *t); static inline __attribute__((always_inline)) void put_task_struct(struct task_struct *t) { if ((atomic_sub_return(1, (&t->usage)) == 0)) __put_task_struct(t); } extern cputime_t task_utime(struct task_struct *p); extern cputime_t task_stime(struct task_struct *p); extern cputime_t task_gtime(struct task_struct *p); # 1599 "include/linux/sched.h" static inline __attribute__((always_inline)) int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) { if (!test_bit((0), (*new_mask).bits)) return -22; return 0; } static inline __attribute__((always_inline)) int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) { return set_cpus_allowed_ptr(p, &new_mask); } extern unsigned long long sched_clock(void); extern void sched_clock_init(void); extern u64 sched_clock_cpu(int cpu); static inline __attribute__((always_inline)) void sched_clock_tick(void) { } static inline __attribute__((always_inline)) void sched_clock_idle_sleep_event(void) { } static inline __attribute__((always_inline)) void sched_clock_idle_wakeup_event(u64 delta_ns) { } # 1639 "include/linux/sched.h" extern unsigned long long cpu_clock(int cpu); extern unsigned long long task_sched_runtime(struct task_struct *task); extern unsigned long long thread_group_sched_runtime(struct task_struct *task); # 1652 "include/linux/sched.h" extern void sched_clock_idle_sleep_event(void); extern void sched_clock_idle_wakeup_event(u64 delta_ns); static inline __attribute__((always_inline)) void idle_task_exit(void) {} extern void sched_idle_next(void); static inline __attribute__((always_inline)) void wake_up_idle_cpu(int cpu) { } extern unsigned int sysctl_sched_latency; extern unsigned int sysctl_sched_min_granularity; extern unsigned int sysctl_sched_wakeup_granularity; extern unsigned int sysctl_sched_child_runs_first; extern unsigned int sysctl_sched_features; extern unsigned int sysctl_sched_migration_cost; extern unsigned int sysctl_sched_nr_migrate; extern unsigned int sysctl_sched_shares_ratelimit; extern unsigned int sysctl_sched_shares_thresh; int sched_nr_latency_handler(struct ctl_table *table, int write, struct file *file, void *buffer, size_t *length, loff_t *ppos); extern unsigned int sysctl_sched_rt_period; extern int sysctl_sched_rt_runtime; int sched_rt_handler(struct ctl_table *table, int write, struct file *filp, void *buffer, size_t *lenp, loff_t *ppos); extern unsigned int sysctl_sched_compat_yield; extern int rt_mutex_getprio(struct task_struct *p); extern void rt_mutex_setprio(struct task_struct *p, int prio); extern void rt_mutex_adjust_pi(struct task_struct *p); # 1705 "include/linux/sched.h" extern void set_user_nice(struct task_struct *p, long nice); extern int task_prio(const struct task_struct *p); extern int task_nice(const struct task_struct *p); extern int can_nice(const struct task_struct *p, const int nice); extern int task_curr(const struct task_struct *p); extern int idle_cpu(int cpu); extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); extern int sched_setscheduler_nocheck(struct task_struct *, int, struct sched_param *); extern struct task_struct *idle_task(int cpu); extern struct task_struct *curr_task(int cpu); extern void set_curr_task(int cpu, struct task_struct *p); void yield(void); extern struct exec_domain default_exec_domain; union thread_union { struct thread_info thread_info; unsigned long stack[8192/sizeof(long)]; }; static inline __attribute__((always_inline)) int kstack_end(void *addr) { return !(((unsigned long)addr+sizeof(void*)-1) & (8192 -sizeof(void*))); } extern union thread_union init_thread_union; extern struct task_struct init_task; extern struct mm_struct init_mm; extern struct pid_namespace init_pid_ns; # 1761 "include/linux/sched.h" extern struct task_struct *find_task_by_pid_type_ns(int type, int pid, struct pid_namespace *ns); extern struct task_struct *find_task_by_vpid(pid_t nr); extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); extern void __set_special_pids(struct pid *pid); extern struct user_struct * alloc_uid(struct user_namespace *, uid_t); static inline __attribute__((always_inline)) struct user_struct *get_uid(struct user_struct *u) { atomic_inc(&u->__count); return u; } extern void free_uid(struct user_struct *); extern void switch_uid(struct user_struct *); extern void release_uids(struct user_namespace *ns); extern void do_timer(unsigned long ticks); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); extern void wake_up_new_task(struct task_struct *tsk, unsigned long clone_flags); static inline __attribute__((always_inline)) void kick_process(struct task_struct *tsk) { } extern void sched_fork(struct task_struct *p, int clone_flags); extern void sched_dead(struct task_struct *p); extern int in_group_p(gid_t); extern int in_egroup_p(gid_t); extern void proc_caches_init(void); extern void flush_signals(struct task_struct *); extern void ignore_signals(struct task_struct *); extern void flush_signal_handlers(struct task_struct *, int force_default); extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); static inline __attribute__((always_inline)) int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) { unsigned long flags; int ret; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _spin_lock_irqsave(&tsk->sighand->siglock); } while (0); ret = dequeue_signal(tsk, mask, info); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _spin_unlock_irqrestore(&tsk->sighand->siglock, flags); } while (0); return ret; } extern void block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask); extern void unblock_all_signals(void); extern void release_task(struct task_struct * p); extern int send_sig_info(int, struct siginfo *, struct task_struct *); extern int force_sigsegv(int, struct task_struct *); extern int force_sig_info(int, struct siginfo *, struct task_struct *); extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32); extern int kill_pgrp(struct pid *pid, int sig, int priv); extern int kill_pid(struct pid *pid, int sig, int priv); extern int kill_proc_info(int, struct siginfo *, pid_t); extern int do_notify_parent(struct task_struct *, int); extern void force_sig(int, struct task_struct *); extern void force_sig_specific(int, struct task_struct *); extern int send_sig(int, struct task_struct *, int); extern void zap_other_threads(struct task_struct *p); extern struct sigqueue *sigqueue_alloc(void); extern void sigqueue_free(struct sigqueue *); extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long); static inline __attribute__((always_inline)) int kill_cad_pid(int sig, int priv) { return kill_pid(cad_pid, sig, priv); } static inline __attribute__((always_inline)) int is_si_special(const struct siginfo *info) { return info <= ((struct siginfo *) 2); } static inline __attribute__((always_inline)) int on_sig_stack(unsigned long sp) { return (sp - (get_current())->sas_ss_sp < (get_current())->sas_ss_size); } static inline __attribute__((always_inline)) int sas_ss_flags(unsigned long sp) { return ((get_current())->sas_ss_size == 0 ? 2 : on_sig_stack(sp) ? 1 : 0); } extern struct mm_struct * mm_alloc(void); extern void __mmdrop(struct mm_struct *); static inline __attribute__((always_inline)) void mmdrop(struct mm_struct * mm) { if (__builtin_expect(!!((atomic_sub_return(1, (&mm->mm_count)) == 0)), 0)) __mmdrop(mm); } extern void mmput(struct mm_struct *); extern struct mm_struct *get_task_mm(struct task_struct *task); extern void mm_release(struct task_struct *, struct mm_struct *); extern struct mm_struct *dup_mm(struct task_struct *tsk); extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); extern void flush_thread(void); extern void exit_thread(void); extern void exit_files(struct task_struct *); extern void __cleanup_signal(struct signal_struct *); extern void __cleanup_sighand(struct sighand_struct *); extern void exit_itimers(struct signal_struct *); extern void flush_itimer_signals(void); extern void do_group_exit(int); extern void daemonize(const char *, ...); extern int allow_signal(int); extern int disallow_signal(int); extern int do_execve(char *, char * *, char * *, struct pt_regs *); extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int *, int *); struct task_struct *fork_idle(int); extern void set_task_comm(struct task_struct *tsk, char *from); extern char *get_task_comm(char *to, struct task_struct *tsk); static inline __attribute__((always_inline)) unsigned long wait_task_inactive(struct task_struct *p, long match_state) { return 1; } # 1950 "include/linux/sched.h" static inline __attribute__((always_inline)) int has_group_leader_pid(struct task_struct *p) { return p->pid == p->tgid; } static inline __attribute__((always_inline)) int same_thread_group(struct task_struct *p1, struct task_struct *p2) { return p1->tgid == p2->tgid; } static inline __attribute__((always_inline)) struct task_struct *next_thread(const struct task_struct *p) { return ({ const typeof( ((struct task_struct *)0)->thread_group ) *__mptr = (({ typeof(p->thread_group.next) _________p1 = (*(volatile typeof(p->thread_group.next) *)&(p->thread_group.next)); do { } while(0); (_________p1); })); (struct task_struct *)( (char *)__mptr - __builtin_offsetof(struct task_struct,thread_group) );}); } static inline __attribute__((always_inline)) int thread_group_empty(struct task_struct *p) { return list_empty(&p->thread_group); } # 1985 "include/linux/sched.h" static inline __attribute__((always_inline)) void task_lock(struct task_struct *p) { _spin_lock(&p->alloc_lock); } static inline __attribute__((always_inline)) void task_unlock(struct task_struct *p) { _spin_unlock(&p->alloc_lock); } extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags); static inline __attribute__((always_inline)) void unlock_task_sighand(struct task_struct *tsk, unsigned long *flags) { do { ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); } while (0); } static inline __attribute__((always_inline)) void setup_thread_stack(struct task_struct *p, struct task_struct *org) { *((struct thread_info *)(p)->stack) = *((struct thread_info *)(org)->stack); ((struct thread_info *)(p)->stack)->task = p; } static inline __attribute__((always_inline)) unsigned long *end_of_stack(struct task_struct *p) { return (unsigned long *)(((struct thread_info *)(p)->stack) + 1); } static inline __attribute__((always_inline)) int object_is_on_stack(void *obj) { void *stack = (((get_current()))->stack); return (obj >= stack) && (obj < (stack + 8192)); } extern void thread_info_cache_init(void); static inline __attribute__((always_inline)) void set_tsk_thread_flag(struct task_struct *tsk, int flag) { set_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag); } static inline __attribute__((always_inline)) void clear_tsk_thread_flag(struct task_struct *tsk, int flag) { clear_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag); } static inline __attribute__((always_inline)) int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) { return test_and_set_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag); } static inline __attribute__((always_inline)) int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) { return test_and_clear_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag); } static inline __attribute__((always_inline)) int test_tsk_thread_flag(struct task_struct *tsk, int flag) { return test_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag); } static inline __attribute__((always_inline)) void set_tsk_need_resched(struct task_struct *tsk) { set_tsk_thread_flag(tsk,2); } static inline __attribute__((always_inline)) void clear_tsk_need_resched(struct task_struct *tsk) { clear_tsk_thread_flag(tsk,2); } static inline __attribute__((always_inline)) int test_tsk_need_resched(struct task_struct *tsk) { return __builtin_expect(!!(test_tsk_thread_flag(tsk,2)), 0); } static inline __attribute__((always_inline)) int signal_pending(struct task_struct *p) { return __builtin_expect(!!(test_tsk_thread_flag(p,1)), 0); } extern int __fatal_signal_pending(struct task_struct *p); static inline __attribute__((always_inline)) int fatal_signal_pending(struct task_struct *p) { return signal_pending(p) && __fatal_signal_pending(p); } static inline __attribute__((always_inline)) int signal_pending_state(long state, struct task_struct *p) { if (!(state & (1 | 128))) return 0; if (!signal_pending(p)) return 0; return (state & 1) || __fatal_signal_pending(p); } static inline __attribute__((always_inline)) int need_resched(void) { return __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 2)), 0); } # 2108 "include/linux/sched.h" extern int _cond_resched(void); static inline __attribute__((always_inline)) int cond_resched(void) { return _cond_resched(); } extern int cond_resched_lock(spinlock_t * lock); extern int cond_resched_softirq(void); static inline __attribute__((always_inline)) int cond_resched_bkl(void) { return _cond_resched(); } static inline __attribute__((always_inline)) int spin_needbreak(spinlock_t *lock) { return 0; } extern int thread_group_cputime_alloc(struct task_struct *); extern void thread_group_cputime(struct task_struct *, struct task_cputime *); static inline __attribute__((always_inline)) void thread_group_cputime_init(struct signal_struct *sig) { sig->cputime.totals = ((void *)0); } static inline __attribute__((always_inline)) int thread_group_cputime_clone_thread(struct task_struct *curr) { if (curr->signal->cputime.totals) return 0; return thread_group_cputime_alloc(curr); } static inline __attribute__((always_inline)) void thread_group_cputime_free(struct signal_struct *sig) { percpu_free((sig->cputime.totals)); } extern void recalc_sigpending_and_wake(struct task_struct *t); extern void recalc_sigpending(void); extern void signal_wake_up(struct task_struct *t, int resume_stopped); # 2190 "include/linux/sched.h" static inline __attribute__((always_inline)) unsigned int task_cpu(const struct task_struct *p) { return 0; } static inline __attribute__((always_inline)) void set_task_cpu(struct task_struct *p, unsigned int cpu) { } extern void arch_pick_mmap_layout(struct mm_struct *mm); extern void __trace_special(void *__tr, void *__data, unsigned long arg1, unsigned long arg2, unsigned long arg3); # 2215 "include/linux/sched.h" extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); extern long sched_getaffinity(pid_t pid, cpumask_t *mask); extern int sched_mc_power_savings, sched_smt_power_savings; extern void normalize_rt_tasks(void); # 2267 "include/linux/sched.h" static inline __attribute__((always_inline)) void add_rchar(struct task_struct *tsk, ssize_t amt) { } static inline __attribute__((always_inline)) void add_wchar(struct task_struct *tsk, ssize_t amt) { } static inline __attribute__((always_inline)) void inc_syscr(struct task_struct *tsk) { } static inline __attribute__((always_inline)) void inc_syscw(struct task_struct *tsk) { } # 2292 "include/linux/sched.h" static inline __attribute__((always_inline)) void mm_update_next_owner(struct mm_struct *mm) { } static inline __attribute__((always_inline)) void mm_init_owner(struct mm_struct *mm, struct task_struct *p) { } # 13 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h" 2 # 24 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h" static inline __attribute__((always_inline)) void set_fs(mm_segment_t fs) { current_thread_info()->addr_limit = fs; } # 36 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h" static inline __attribute__((always_inline)) int is_in_rom(unsigned long addr) { # 46 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h" if ((addr < _ramstart) || (addr >= _ramend)) return (1); return (0); } # 60 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h" static inline __attribute__((always_inline)) int _access_ok(unsigned long addr, unsigned long size) { return 1; } # 82 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h" struct exception_table_entry { unsigned long insn, fixup; }; extern unsigned long search_exception_table(unsigned long); # 129 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h" static inline __attribute__((always_inline)) int bad_user_access_length(void) { panic("bad_user_access_length"); return -1; } # 200 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h" static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result)) copy_from_user(void *to, const void *from, unsigned long n) { if (_access_ok((unsigned long)(from), (n))) memcpy(to, from, n); else return n; return 0; } static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result)) copy_to_user(void *to, const void *from, unsigned long n) { if (_access_ok((unsigned long)(to), (n))) memcpy(to, from, n); else return n; return 0; } static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) strncpy_from_user(char *dst, const char *src, long count) { char *tmp; if (!_access_ok((unsigned long)(src), (1))) return -14; strncpy(dst, src, count); for (tmp = dst; *tmp && count > 0; tmp++, count--) ; return (tmp - dst); } static inline __attribute__((always_inline)) long strnlen_user(const char *src, long n) { return (strlen(src) + 1); } static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result)) __clear_user(void *to, unsigned long n) { memset(to, 0, n); return 0; } # 6 "include/linux/uaccess.h" 2 # 16 "include/linux/uaccess.h" static inline __attribute__((always_inline)) void pagefault_disable(void) { do { (current_thread_info()->preempt_count) += (1); } while (0); __asm__ __volatile__("": : :"memory"); } static inline __attribute__((always_inline)) void pagefault_enable(void) { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); __asm__ __volatile__("": : :"memory"); do { } while (0); } static inline __attribute__((always_inline)) unsigned long __copy_from_user_inatomic_nocache(void *to, const void *from, unsigned long n) { return copy_from_user(to, from, n); } static inline __attribute__((always_inline)) unsigned long __copy_from_user_nocache(void *to, const void *from, unsigned long n) { return copy_from_user(to, from, n); } # 96 "include/linux/uaccess.h" extern long probe_kernel_read(void *dst, void *src, size_t size); # 107 "include/linux/uaccess.h" extern long probe_kernel_write(void *dst, void *src, size_t size); # 7 "include/linux/highmem.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cacheflush.h" 1 # 33 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cacheflush.h" extern void blackfin_icache_dcache_flush_range(unsigned long start_address, unsigned long end_address); extern void blackfin_icache_flush_range(unsigned long start_address, unsigned long end_address); extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address); extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address); extern void blackfin_dflush_page(void *page); extern void blackfin_invalidate_entire_dcache(void); # 55 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cacheflush.h" static inline __attribute__((always_inline)) void flush_icache_range(unsigned start, unsigned end) { blackfin_icache_dcache_flush_range((start), (end)); # 77 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cacheflush.h" } # 100 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cacheflush.h" extern unsigned long reserved_mem_dcache_on; extern unsigned long reserved_mem_icache_on; static inline __attribute__((always_inline)) int bfin_addr_dcachable(unsigned long addr) { if (addr < (_ramend - (1024 * 1024))) return 1; if (reserved_mem_dcache_on && addr >= _ramend && addr < physical_mem_end) return 1; return 0; } # 9 "include/linux/highmem.h" 2 static inline __attribute__((always_inline)) void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) { } static inline __attribute__((always_inline)) void flush_kernel_dcache_page(struct page *page) { } # 34 "include/linux/highmem.h" static inline __attribute__((always_inline)) unsigned int nr_free_highpages(void) { return 0; } static inline __attribute__((always_inline)) void *kmap(struct page *page) { do { do { } while (0); } while (0); return lowmem_page_address(page); } # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/kmap_types.h" 1 enum km_type { KM_BOUNCE_READ, KM_SKB_SUNRPC_DATA, KM_SKB_DATA_SOFTIRQ, KM_USER0, KM_USER1, KM_BIO_SRC_IRQ, KM_BIO_DST_IRQ, KM_PTE0, KM_PTE1, KM_IRQ0, KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, KM_TYPE_NR }; # 48 "include/linux/highmem.h" 2 static inline __attribute__((always_inline)) void *kmap_atomic(struct page *page, enum km_type idx) { pagefault_disable(); return lowmem_page_address(page); } # 67 "include/linux/highmem.h" static inline __attribute__((always_inline)) void clear_user_highpage(struct page *page, unsigned long vaddr) { void *addr = kmap_atomic(page, KM_USER0); memset((addr), 0, (1UL << 12)); do { pagefault_enable(); } while (0); } # 90 "include/linux/highmem.h" static inline __attribute__((always_inline)) struct page * __alloc_zeroed_user_highpage(gfp_t movableflags, struct vm_area_struct *vma, unsigned long vaddr) { struct page *page = alloc_pages_node((((void)(0),0)), ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u) | (( gfp_t)0x20000u) | (( gfp_t)0x02u)) | movableflags, 0); if (page) clear_user_highpage(page, vaddr); return page; } # 113 "include/linux/highmem.h" static inline __attribute__((always_inline)) struct page * alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, unsigned long vaddr) { return __alloc_zeroed_user_highpage((( gfp_t)0x100000u), vma, vaddr); } static inline __attribute__((always_inline)) void clear_highpage(struct page *page) { void *kaddr = kmap_atomic(page, KM_USER0); memset((kaddr), 0, (1UL << 12)); do { pagefault_enable(); } while (0); } static inline __attribute__((always_inline)) void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2) { void *kaddr = kmap_atomic(page, KM_USER0); do { if (__builtin_expect(!!(end1 > (1UL << 12) || end2 > (1UL << 12)), 0)) do { dump_bfin_trace_buffer(); printk("<0>" "BUG: failure at %s:%d/%s()!\n", "include/linux/highmem.h", 133, __func__); panic("BUG!"); } while (0); } while(0); if (end1 > start1) memset(kaddr + start1, 0, end1 - start1); if (end2 > start2) memset(kaddr + start2, 0, end2 - start2); do { pagefault_enable(); } while (0); blackfin_dflush_page(lowmem_page_address(page)); } static inline __attribute__((always_inline)) void zero_user_segment(struct page *page, unsigned start, unsigned end) { zero_user_segments(page, start, end, 0, 0); } static inline __attribute__((always_inline)) void zero_user(struct page *page, unsigned start, unsigned size) { zero_user_segments(page, start, start + size, 0, 0); } static inline __attribute__((always_inline)) void __attribute__((deprecated)) memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size) { zero_user(page, offset, size); } static inline __attribute__((always_inline)) void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { char *vfrom, *vto; vfrom = kmap_atomic(from, KM_USER0); vto = kmap_atomic(to, KM_USER1); memcpy((vto), (vfrom), (1UL << 12)); do { pagefault_enable(); } while (0); do { pagefault_enable(); } while (0); } static inline __attribute__((always_inline)) void copy_highpage(struct page *to, struct page *from) { char *vfrom, *vto; vfrom = kmap_atomic(from, KM_USER0); vto = kmap_atomic(to, KM_USER1); memcpy((vto), (vfrom), (1UL << 12)); do { pagefault_enable(); } while (0); do { pagefault_enable(); } while (0); } # 11 "include/linux/pagemap.h" 2 # 1 "include/linux/hardirq.h" 1 # 1 "include/linux/smp_lock.h" 1 # 6 "include/linux/hardirq.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/hardirq.h" 1 typedef struct { unsigned int __softirq_pending; unsigned int __syscall_count; struct task_struct *__ksoftirqd_task; } irq_cpustat_t; # 1 "include/linux/irq_cpustat.h" 1 # 20 "include/linux/irq_cpustat.h" extern irq_cpustat_t irq_stat[]; # 15 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/hardirq.h" 2 # 45 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/hardirq.h" extern void ack_bad_irq(unsigned int irq); # 8 "include/linux/hardirq.h" 2 # 113 "include/linux/hardirq.h" struct task_struct; static inline __attribute__((always_inline)) void account_system_vtime(struct task_struct *tsk) { } # 146 "include/linux/hardirq.h" extern void irq_enter(void); # 162 "include/linux/hardirq.h" extern void irq_exit(void); # 16 "include/linux/pagemap.h" 2 # 25 "include/linux/pagemap.h" static inline __attribute__((always_inline)) void mapping_set_error(struct address_space *mapping, int error) { if (__builtin_expect(!!(error), 0)) { if (error == -28) set_bit((21 + 1), &mapping->flags); else set_bit((21 + 0), &mapping->flags); } } # 55 "include/linux/pagemap.h" static inline __attribute__((always_inline)) void mapping_set_unevictable(struct address_space *mapping) { } static inline __attribute__((always_inline)) void mapping_clear_unevictable(struct address_space *mapping) { } static inline __attribute__((always_inline)) int mapping_unevictable(struct address_space *mapping) { return 0; } static inline __attribute__((always_inline)) gfp_t mapping_gfp_mask(struct address_space * mapping) { return ( gfp_t)mapping->flags & (( gfp_t)((1 << 21) - 1)); } static inline __attribute__((always_inline)) void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) { m->flags = (m->flags & ~( unsigned long)(( gfp_t)((1 << 21) - 1))) | ( unsigned long)mask; } # 93 "include/linux/pagemap.h" void release_pages(struct page **pages, int nr, int cold); # 139 "include/linux/pagemap.h" static inline __attribute__((always_inline)) int page_cache_get_speculative(struct page *page) { do { } while (0); # 156 "include/linux/pagemap.h" do { } while (0); atomic_inc(&page->_count); # 169 "include/linux/pagemap.h" do { } while (0); return 1; } static inline __attribute__((always_inline)) int page_cache_add_speculative(struct page *page, int count) { do { } while (0); do { } while (0); atomic_add(count, &page->_count); do { } while (0); return 1; } static inline __attribute__((always_inline)) int page_freeze_refs(struct page *page, int count) { return __builtin_expect(!!(((int)((__typeof__(*((&((&page->_count)->counter)))))__cmpxchg_local_generic(((&((&page->_count)->counter))), (unsigned long)(((count))), (unsigned long)(((0))), sizeof(*((&((&page->_count)->counter))))))) == count), 1); } static inline __attribute__((always_inline)) void page_unfreeze_refs(struct page *page, int count) { do { } while (0); do { } while (0); (((&page->_count)->counter) = count); } static inline __attribute__((always_inline)) struct page *__page_cache_alloc(gfp_t gfp) { return alloc_pages_node((((void)(0),0)), gfp, 0); } static inline __attribute__((always_inline)) struct page *page_cache_alloc(struct address_space *x) { return __page_cache_alloc(mapping_gfp_mask(x)); } static inline __attribute__((always_inline)) struct page *page_cache_alloc_cold(struct address_space *x) { return __page_cache_alloc(mapping_gfp_mask(x)|(( gfp_t)0x100u)); } typedef int filler_t(void *, struct page *); extern struct page * find_get_page(struct address_space *mapping, unsigned long index); extern struct page * find_lock_page(struct address_space *mapping, unsigned long index); extern struct page * find_or_create_page(struct address_space *mapping, unsigned long index, gfp_t gfp_mask); unsigned find_get_pages(struct address_space *mapping, unsigned long start, unsigned int nr_pages, struct page **pages); unsigned find_get_pages_contig(struct address_space *mapping, unsigned long start, unsigned int nr_pages, struct page **pages); unsigned find_get_pages_tag(struct address_space *mapping, unsigned long *index, int tag, unsigned int nr_pages, struct page **pages); struct page *__grab_cache_page(struct address_space *mapping, unsigned long index); static inline __attribute__((always_inline)) struct page *grab_cache_page(struct address_space *mapping, unsigned long index) { return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); } extern struct page * grab_cache_page_nowait(struct address_space *mapping, unsigned long index); extern struct page * read_cache_page_async(struct address_space *mapping, unsigned long index, filler_t *filler, void *data); extern struct page * read_cache_page(struct address_space *mapping, unsigned long index, filler_t *filler, void *data); extern int read_cache_pages(struct address_space *mapping, struct list_head *pages, filler_t *filler, void *data); static inline __attribute__((always_inline)) struct page *read_mapping_page_async( struct address_space *mapping, unsigned long index, void *data) { filler_t *filler = (filler_t *)mapping->a_ops->readpage; return read_cache_page_async(mapping, index, filler, data); } static inline __attribute__((always_inline)) struct page *read_mapping_page(struct address_space *mapping, unsigned long index, void *data) { filler_t *filler = (filler_t *)mapping->a_ops->readpage; return read_cache_page(mapping, index, filler, data); } static inline __attribute__((always_inline)) loff_t page_offset(struct page *page) { return ((loff_t)page->index) << 12; } static inline __attribute__((always_inline)) unsigned long linear_page_index(struct vm_area_struct *vma, unsigned long address) { unsigned long pgoff = (address - vma->vm_start) >> 12; pgoff += vma->vm_pgoff; return pgoff >> (12 - 12); } extern void __lock_page(struct page *page); extern int __lock_page_killable(struct page *page); extern void __lock_page_nosync(struct page *page); extern void unlock_page(struct page *page); static inline __attribute__((always_inline)) void __set_page_locked(struct page *page) { __set_bit(PG_locked, &page->flags); } static inline __attribute__((always_inline)) void __clear_page_locked(struct page *page) { __clear_bit(PG_locked, &page->flags); } static inline __attribute__((always_inline)) int trylock_page(struct page *page) { return (__builtin_expect(!!(!test_and_set_bit(PG_locked, &page->flags)), 1)); } static inline __attribute__((always_inline)) void lock_page(struct page *page) { do { do { } while (0); } while (0); if (!trylock_page(page)) __lock_page(page); } static inline __attribute__((always_inline)) int lock_page_killable(struct page *page) { do { do { } while (0); } while (0); if (!trylock_page(page)) return __lock_page_killable(page); return 0; } static inline __attribute__((always_inline)) void lock_page_nosync(struct page *page) { do { do { } while (0); } while (0); if (!trylock_page(page)) __lock_page_nosync(page); } extern void wait_on_page_bit(struct page *page, int bit_nr); # 364 "include/linux/pagemap.h" static inline __attribute__((always_inline)) void wait_on_page_locked(struct page *page) { if (PageLocked(page)) wait_on_page_bit(page, PG_locked); } static inline __attribute__((always_inline)) void wait_on_page_writeback(struct page *page) { if (PageWriteback(page)) wait_on_page_bit(page, PG_writeback); } extern void end_page_writeback(struct page *page); static inline __attribute__((always_inline)) int fault_in_pages_writeable(char *uaddr, int size) { int ret; if (__builtin_expect(!!(size == 0), 0)) return 0; ret = ({ int _err = 0; typeof(*(uaddr)) _x = (0); typeof(*(uaddr)) *_p = (uaddr); if (!_access_ok((unsigned long)(_p), (sizeof(*(_p))))) { _err = -14; } else { switch (sizeof (*(_p))) { case 1: __asm__ ("B""[%1] = %0;\n\t" : :"d" (_x),"a" (((unsigned long *)(_p))) : "memory"); break; case 2: __asm__ ("W""[%1] = %0;\n\t" : :"d" (_x),"a" (((unsigned long *)(_p))) : "memory"); break; case 4: __asm__ ("""[%1] = %0;\n\t" : :"d" (_x),"a" (((unsigned long *)(_p))) : "memory"); break; case 8: { long _xl, _xh; _xl = ((long *)&_x)[0]; _xh = ((long *)&_x)[1]; __asm__ ("""[%1] = %0;\n\t" : :"d" (_xl),"a" (((unsigned long *)(((long *)_p)+0))) : "memory"); __asm__ ("""[%1] = %0;\n\t" : :"d" (_xh),"a" (((unsigned long *)(((long *)_p)+1))) : "memory"); } break; default: _err = (printk("<6>" "put_user_bad %s:%d %s\n", "include/linux/pagemap.h", 398, __func__), bad_user_access_length(), (-14)); break; } } _err; }); if (ret == 0) { char *end = uaddr + size - 1; if (((unsigned long)uaddr & (~((1UL << 12)-1))) != ((unsigned long)end & (~((1UL << 12)-1)))) ret = ({ int _err = 0; typeof(*(end)) _x = (0); typeof(*(end)) *_p = (end); if (!_access_ok((unsigned long)(_p), (sizeof(*(_p))))) { _err = -14; } else { switch (sizeof (*(_p))) { case 1: __asm__ ("B""[%1] = %0;\n\t" : :"d" (_x),"a" (((unsigned long *)(_p))) : "memory"); break; case 2: __asm__ ("W""[%1] = %0;\n\t" : :"d" (_x),"a" (((unsigned long *)(_p))) : "memory"); break; case 4: __asm__ ("""[%1] = %0;\n\t" : :"d" (_x),"a" (((unsigned long *)(_p))) : "memory"); break; case 8: { long _xl, _xh; _xl = ((long *)&_x)[0]; _xh = ((long *)&_x)[1]; __asm__ ("""[%1] = %0;\n\t" : :"d" (_xl),"a" (((unsigned long *)(((long *)_p)+0))) : "memory"); __asm__ ("""[%1] = %0;\n\t" : :"d" (_xh),"a" (((unsigned long *)(((long *)_p)+1))) : "memory"); } break; default: _err = (printk("<6>" "put_user_bad %s:%d %s\n", "include/linux/pagemap.h", 408, __func__), bad_user_access_length(), (-14)); break; } } _err; }); } return ret; } static inline __attribute__((always_inline)) int fault_in_pages_readable(const char *uaddr, int size) { volatile char c; int ret; if (__builtin_expect(!!(size == 0), 0)) return 0; ret = ({ int _err = 0; unsigned long _val = 0; const typeof(*(uaddr)) *_p = (uaddr); const size_t ptr_size = sizeof(*(_p)); if (__builtin_expect(!!(_access_ok((unsigned long)(_p), (ptr_size))), 1)) { ((void)sizeof(char[1 - 2*!!(ptr_size >= 8)])); switch (ptr_size) { case 1: ({ __asm__ __volatile__ ( "%0 =" "B" "[%1]" "(Z)" ";" : "=d" (_val) : "a" (((unsigned long *)(_p)))); }); break; case 2: ({ __asm__ __volatile__ ( "%0 =" "W" "[%1]" "(Z)" ";" : "=d" (_val) : "a" (((unsigned long *)(_p)))); }); break; case 4: ({ __asm__ __volatile__ ( "%0 =" "" "[%1]" "" ";" : "=d" (_val) : "a" (((unsigned long *)(_p)))); }); break; } } else _err = -14; c = (typeof(*(uaddr)))_val; _err; }); if (ret == 0) { const char *end = uaddr + size - 1; if (((unsigned long)uaddr & (~((1UL << 12)-1))) != ((unsigned long)end & (~((1UL << 12)-1)))) ret = ({ int _err = 0; unsigned long _val = 0; const typeof(*(end)) *_p = (end); const size_t ptr_size = sizeof(*(_p)); if (__builtin_expect(!!(_access_ok((unsigned long)(_p), (ptr_size))), 1)) { ((void)sizeof(char[1 - 2*!!(ptr_size >= 8)])); switch (ptr_size) { case 1: ({ __asm__ __volatile__ ( "%0 =" "B" "[%1]" "(Z)" ";" : "=d" (_val) : "a" (((unsigned long *)(_p)))); }); break; case 2: ({ __asm__ __volatile__ ( "%0 =" "W" "[%1]" "(Z)" ";" : "=d" (_val) : "a" (((unsigned long *)(_p)))); }); break; case 4: ({ __asm__ __volatile__ ( "%0 =" "" "[%1]" "" ";" : "=d" (_val) : "a" (((unsigned long *)(_p)))); }); break; } } else _err = -14; c = (typeof(*(end)))_val; _err; }); } return ret; } int add_to_page_cache_locked(struct page *page, struct address_space *mapping, unsigned long index, gfp_t gfp_mask); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, unsigned long index, gfp_t gfp_mask); extern void remove_from_page_cache(struct page *page); extern void __remove_from_page_cache(struct page *page); static inline __attribute__((always_inline)) int add_to_page_cache(struct page *page, struct address_space *mapping, unsigned long offset, gfp_t gfp_mask) { int error; __set_page_locked(page); error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); if (__builtin_expect(!!(error), 0)) __clear_page_locked(page); return error; } # 20 "kernel/trace/trace.c" 2 # 1 "include/linux/ftrace.h" 1 # 9 "include/linux/ftrace.h" # 1 "include/linux/module.h" 1 # 13 "include/linux/module.h" # 1 "include/linux/kmod.h" 1 # 35 "include/linux/kmod.h" static inline __attribute__((always_inline)) int request_module(const char * name, ...) { return -38; } struct key; struct file; struct subprocess_info; struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, char **envp, gfp_t gfp_mask); void call_usermodehelper_setkeys(struct subprocess_info *info, struct key *session_keyring); int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info, struct file **filp); void call_usermodehelper_setcleanup(struct subprocess_info *info, void (*cleanup)(char **argv, char **envp)); enum umh_wait { UMH_NO_WAIT = -1, UMH_WAIT_EXEC = 0, UMH_WAIT_PROC = 1, }; int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait); void call_usermodehelper_freeinfo(struct subprocess_info *info); static inline __attribute__((always_inline)) int call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait) { struct subprocess_info *info; gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? ((( gfp_t)0x20u)) : ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u)); info = call_usermodehelper_setup(path, argv, envp, gfp_mask); if (info == ((void *)0)) return -12; return call_usermodehelper_exec(info, wait); } static inline __attribute__((always_inline)) int call_usermodehelper_keys(char *path, char **argv, char **envp, struct key *session_keyring, enum umh_wait wait) { struct subprocess_info *info; gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? ((( gfp_t)0x20u)) : ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u)); info = call_usermodehelper_setup(path, argv, envp, gfp_mask); if (info == ((void *)0)) return -12; call_usermodehelper_setkeys(info, session_keyring); return call_usermodehelper_exec(info, wait); } extern void usermodehelper_init(void); struct file; extern int call_usermodehelper_pipe(char *path, char *argv[], char *envp[], struct file **filp); extern int usermodehelper_disable(void); extern void usermodehelper_enable(void); # 14 "include/linux/module.h" 2 # 1 "include/linux/elf.h" 1 # 1 "include/linux/elf-em.h" 1 # 6 "include/linux/elf.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/elf.h" 1 # 21 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/elf.h" typedef unsigned long elf_greg_t; typedef elf_greg_t elf_gregset_t[(sizeof(struct user_regs_struct) / sizeof(elf_greg_t))]; typedef struct user_bfinfp_struct elf_fpregset_t; # 8 "include/linux/elf.h" 2 struct file; # 20 "include/linux/elf.h" typedef __u32 Elf32_Addr; typedef __u16 Elf32_Half; typedef __u32 Elf32_Off; typedef __s32 Elf32_Sword; typedef __u32 Elf32_Word; typedef __u64 Elf64_Addr; typedef __u16 Elf64_Half; typedef __s16 Elf64_SHalf; typedef __u64 Elf64_Off; typedef __s32 Elf64_Sword; typedef __u32 Elf64_Word; typedef __u64 Elf64_Xword; typedef __s64 Elf64_Sxword; # 127 "include/linux/elf.h" typedef struct dynamic{ Elf32_Sword d_tag; union{ Elf32_Sword d_val; Elf32_Addr d_ptr; } d_un; } Elf32_Dyn; typedef struct { Elf64_Sxword d_tag; union { Elf64_Xword d_val; Elf64_Addr d_ptr; } d_un; } Elf64_Dyn; # 150 "include/linux/elf.h" typedef struct elf32_rel { Elf32_Addr r_offset; Elf32_Word r_info; } Elf32_Rel; typedef struct elf64_rel { Elf64_Addr r_offset; Elf64_Xword r_info; } Elf64_Rel; typedef struct elf32_rela{ Elf32_Addr r_offset; Elf32_Word r_info; Elf32_Sword r_addend; } Elf32_Rela; typedef struct elf64_rela { Elf64_Addr r_offset; Elf64_Xword r_info; Elf64_Sxword r_addend; } Elf64_Rela; typedef struct elf32_sym{ Elf32_Word st_name; Elf32_Addr st_value; Elf32_Word st_size; unsigned char st_info; unsigned char st_other; Elf32_Half st_shndx; } Elf32_Sym; typedef struct elf64_sym { Elf64_Word st_name; unsigned char st_info; unsigned char st_other; Elf64_Half st_shndx; Elf64_Addr st_value; Elf64_Xword st_size; } Elf64_Sym; typedef struct elf32_hdr{ unsigned char e_ident[16]; Elf32_Half e_type; Elf32_Half e_machine; Elf32_Word e_version; Elf32_Addr e_entry; Elf32_Off e_phoff; Elf32_Off e_shoff; Elf32_Word e_flags; Elf32_Half e_ehsize; Elf32_Half e_phentsize; Elf32_Half e_phnum; Elf32_Half e_shentsize; Elf32_Half e_shnum; Elf32_Half e_shstrndx; } Elf32_Ehdr; typedef struct elf64_hdr { unsigned char e_ident[16]; Elf64_Half e_type; Elf64_Half e_machine; Elf64_Word e_version; Elf64_Addr e_entry; Elf64_Off e_phoff; Elf64_Off e_shoff; Elf64_Word e_flags; Elf64_Half e_ehsize; Elf64_Half e_phentsize; Elf64_Half e_phnum; Elf64_Half e_shentsize; Elf64_Half e_shnum; Elf64_Half e_shstrndx; } Elf64_Ehdr; typedef struct elf32_phdr{ Elf32_Word p_type; Elf32_Off p_offset; Elf32_Addr p_vaddr; Elf32_Addr p_paddr; Elf32_Word p_filesz; Elf32_Word p_memsz; Elf32_Word p_flags; Elf32_Word p_align; } Elf32_Phdr; typedef struct elf64_phdr { Elf64_Word p_type; Elf64_Word p_flags; Elf64_Off p_offset; Elf64_Addr p_vaddr; Elf64_Addr p_paddr; Elf64_Xword p_filesz; Elf64_Xword p_memsz; Elf64_Xword p_align; } Elf64_Phdr; # 289 "include/linux/elf.h" typedef struct { Elf32_Word sh_name; Elf32_Word sh_type; Elf32_Word sh_flags; Elf32_Addr sh_addr; Elf32_Off sh_offset; Elf32_Word sh_size; Elf32_Word sh_link; Elf32_Word sh_info; Elf32_Word sh_addralign; Elf32_Word sh_entsize; } Elf32_Shdr; typedef struct elf64_shdr { Elf64_Word sh_name; Elf64_Word sh_type; Elf64_Xword sh_flags; Elf64_Addr sh_addr; Elf64_Off sh_offset; Elf64_Xword sh_size; Elf64_Word sh_link; Elf64_Word sh_info; Elf64_Xword sh_addralign; Elf64_Xword sh_entsize; } Elf64_Shdr; # 367 "include/linux/elf.h" typedef struct elf32_note { Elf32_Word n_namesz; Elf32_Word n_descsz; Elf32_Word n_type; } Elf32_Nhdr; typedef struct elf64_note { Elf64_Word n_namesz; Elf64_Word n_descsz; Elf64_Word n_type; } Elf64_Nhdr; extern Elf32_Dyn _DYNAMIC []; # 400 "include/linux/elf.h" static inline __attribute__((always_inline)) int elf_coredump_extra_notes_size(void) { return 0; } static inline __attribute__((always_inline)) int elf_coredump_extra_notes_write(struct file *file, loff_t *foffset) { return 0; } # 15 "include/linux/module.h" 2 # 1 "include/linux/moduleparam.h" 1 # 32 "include/linux/moduleparam.h" struct kernel_param; typedef int (*param_set_fn)(const char *val, struct kernel_param *kp); typedef int (*param_get_fn)(char *buffer, struct kernel_param *kp); struct kernel_param { const char *name; unsigned int perm; param_set_fn set; param_get_fn get; union { void *arg; const struct kparam_string *str; const struct kparam_array *arr; }; }; struct kparam_string { unsigned int maxlen; char *string; }; struct kparam_array { unsigned int max; unsigned int *num; param_set_fn set; param_get_fn get; unsigned int elemsize; void *elem; }; # 135 "include/linux/moduleparam.h" extern int parse_args(const char *name, char *args, struct kernel_param *params, unsigned num, int (*unknown)(char *param, char *val)); extern int param_set_byte(const char *val, struct kernel_param *kp); extern int param_get_byte(char *buffer, struct kernel_param *kp); extern int param_set_short(const char *val, struct kernel_param *kp); extern int param_get_short(char *buffer, struct kernel_param *kp); extern int param_set_ushort(const char *val, struct kernel_param *kp); extern int param_get_ushort(char *buffer, struct kernel_param *kp); extern int param_set_int(const char *val, struct kernel_param *kp); extern int param_get_int(char *buffer, struct kernel_param *kp); extern int param_set_uint(const char *val, struct kernel_param *kp); extern int param_get_uint(char *buffer, struct kernel_param *kp); extern int param_set_long(const char *val, struct kernel_param *kp); extern int param_get_long(char *buffer, struct kernel_param *kp); extern int param_set_ulong(const char *val, struct kernel_param *kp); extern int param_get_ulong(char *buffer, struct kernel_param *kp); extern int param_set_charp(const char *val, struct kernel_param *kp); extern int param_get_charp(char *buffer, struct kernel_param *kp); extern int param_set_bool(const char *val, struct kernel_param *kp); extern int param_get_bool(char *buffer, struct kernel_param *kp); extern int param_set_invbool(const char *val, struct kernel_param *kp); extern int param_get_invbool(char *buffer, struct kernel_param *kp); # 199 "include/linux/moduleparam.h" extern int param_array_set(const char *val, struct kernel_param *kp); extern int param_array_get(char *buffer, struct kernel_param *kp); extern int param_set_copystring(const char *val, struct kernel_param *kp); extern int param_get_string(char *buffer, struct kernel_param *kp); struct module; # 216 "include/linux/moduleparam.h" static inline __attribute__((always_inline)) int module_param_sysfs_setup(struct module *mod, struct kernel_param *kparam, unsigned int num_params) { return 0; } static inline __attribute__((always_inline)) void module_param_sysfs_remove(struct module *mod) { } # 18 "include/linux/module.h" 2 # 1 "include/linux/marker.h" 1 # 17 "include/linux/marker.h" struct module; struct marker; # 32 "include/linux/marker.h" typedef void marker_probe_func(void *probe_private, void *call_private, const char *fmt, va_list *args); struct marker_probe_closure { marker_probe_func *func; void *probe_private; }; struct marker { const char *name; const char *format; char state; char ptype; void (*call)(const struct marker *mdata, void *call_private, ...); struct marker_probe_closure single; struct marker_probe_closure *multi; } __attribute__((aligned(8))); # 83 "include/linux/marker.h" extern void marker_update_probe_range(struct marker *begin, struct marker *end); # 125 "include/linux/marker.h" static inline __attribute__((always_inline)) void __attribute__((format(printf,1,2))) ___mark_check_format(const char *fmt, ...) { } extern marker_probe_func __mark_empty_function; extern void marker_probe_cb(const struct marker *mdata, void *call_private, ...); extern void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...); extern int marker_probe_register(const char *name, const char *format, marker_probe_func *probe, void *probe_private); extern int marker_probe_unregister(const char *name, marker_probe_func *probe, void *probe_private); extern int marker_probe_unregister_private_data(marker_probe_func *probe, void *probe_private); extern void *marker_get_private_data(const char *name, marker_probe_func *probe, int num); # 19 "include/linux/module.h" 2 # 1 "include/linux/tracepoint.h" 1 # 20 "include/linux/tracepoint.h" struct module; struct tracepoint; struct tracepoint { const char *name; int state; void **funcs; } __attribute__((aligned(8))); # 82 "include/linux/tracepoint.h" extern void tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end); # 107 "include/linux/tracepoint.h" extern int tracepoint_probe_register(const char *name, void *probe); extern int tracepoint_probe_unregister(const char *name, void *probe); struct tracepoint_iter { struct module *module; struct tracepoint *tracepoint; }; extern void tracepoint_iter_start(struct tracepoint_iter *iter); extern void tracepoint_iter_next(struct tracepoint_iter *iter); extern void tracepoint_iter_stop(struct tracepoint_iter *iter); extern void tracepoint_iter_reset(struct tracepoint_iter *iter); extern int tracepoint_get_iter_range(struct tracepoint **tracepoint, struct tracepoint *begin, struct tracepoint *end); static inline __attribute__((always_inline)) void tracepoint_synchronize_unregister(void) { synchronize_rcu(); } # 20 "include/linux/module.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/local.h" 1 # 1 "include/asm-generic/local.h" 1 # 22 "include/asm-generic/local.h" typedef struct { atomic_long_t a; } local_t; # 5 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/local.h" 2 # 21 "include/linux/module.h" 2 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/module.h" 1 # 10 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/module.h" struct mod_arch_specific { Elf32_Shdr *text_l1; Elf32_Shdr *data_a_l1; Elf32_Shdr *bss_a_l1; Elf32_Shdr *data_b_l1; Elf32_Shdr *bss_b_l1; Elf32_Shdr *text_l2; Elf32_Shdr *data_l2; Elf32_Shdr *bss_l2; }; # 23 "include/linux/module.h" 2 # 34 "include/linux/module.h" struct kernel_symbol { unsigned long value; const char *name; }; struct modversion_info { unsigned long crc; char name[(64 - sizeof(unsigned long))]; }; struct module; struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module *, char *); ssize_t (*store)(struct module_attribute *, struct module *, const char *, size_t count); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); }; struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; }; extern int init_module(void); extern void cleanup_module(void); struct exception_table_entry; const struct exception_table_entry * search_extable(const struct exception_table_entry *first, const struct exception_table_entry *last, unsigned long value); void sort_extable(struct exception_table_entry *start, struct exception_table_entry *finish); void sort_main_extable(void); # 165 "include/linux/module.h" const struct exception_table_entry *search_exception_tables(unsigned long add); struct notifier_block; # 472 "include/linux/module.h" static inline __attribute__((always_inline)) const struct exception_table_entry * search_module_extables(unsigned long addr) { return ((void *)0); } static inline __attribute__((always_inline)) struct module *module_text_address(unsigned long addr) { return ((void *)0); } static inline __attribute__((always_inline)) struct module *__module_text_address(unsigned long addr) { return ((void *)0); } static inline __attribute__((always_inline)) int is_module_address(unsigned long addr) { return 0; } static inline __attribute__((always_inline)) void __module_get(struct module *module) { } static inline __attribute__((always_inline)) int try_module_get(struct module *module) { return 1; } static inline __attribute__((always_inline)) void module_put(struct module *module) { } static inline __attribute__((always_inline)) const char *module_address_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf) { return ((void *)0); } static inline __attribute__((always_inline)) int lookup_module_symbol_name(unsigned long addr, char *symname) { return -34; } static inline __attribute__((always_inline)) int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name) { return -34; } static inline __attribute__((always_inline)) int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *name, char *module_name, int *exported) { return -34; } static inline __attribute__((always_inline)) unsigned long module_kallsyms_lookup_name(const char *name) { return 0; } static inline __attribute__((always_inline)) int register_module_notifier(struct notifier_block * nb) { return 0; } static inline __attribute__((always_inline)) int unregister_module_notifier(struct notifier_block * nb) { return 0; } static inline __attribute__((always_inline)) void print_modules(void) { } static inline __attribute__((always_inline)) void module_update_markers(void) { } static inline __attribute__((always_inline)) void module_update_tracepoints(void) { } static inline __attribute__((always_inline)) int module_get_iter_tracepoints(struct tracepoint_iter *iter) { return 0; } struct device_driver; # 596 "include/linux/module.h" static inline __attribute__((always_inline)) int mod_sysfs_init(struct module *mod) { return 0; } static inline __attribute__((always_inline)) int mod_sysfs_setup(struct module *mod, struct kernel_param *kparam, unsigned int num_params) { return 0; } static inline __attribute__((always_inline)) int module_add_modinfo_attrs(struct module *mod) { return 0; } static inline __attribute__((always_inline)) void module_remove_modinfo_attrs(struct module *mod) { } # 10 "include/linux/ftrace.h" 2 # 44 "include/linux/ftrace.h" static inline __attribute__((always_inline)) void ftrace_kill(void) { } # 113 "include/linux/ftrace.h" static inline __attribute__((always_inline)) void ftrace_release(void *start, unsigned long size) { } void ftrace_kill(void); static inline __attribute__((always_inline)) void tracer_disable(void) { } static inline __attribute__((always_inline)) int __ftrace_enabled_save(void) { return 0; } static inline __attribute__((always_inline)) void __ftrace_enabled_restore(int enabled) { } # 185 "include/linux/ftrace.h" extern void ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); # 205 "include/linux/ftrace.h" extern int __ftrace_printk(unsigned long ip, const char *fmt, ...) __attribute__ ((format (printf, 2, 3))); extern void ftrace_dump(void); # 227 "include/linux/ftrace.h" static inline __attribute__((always_inline)) void ftrace_init(void) { } static inline __attribute__((always_inline)) void ftrace_init_module(unsigned long *start, unsigned long *end) { } struct boot_trace { pid_t caller; char func[(sizeof("%s+%#lx/%#lx [%s]") + (128 - 1) + 2*(32*3/10) + ((64 - sizeof(unsigned long)) - 1) + 1)]; int result; unsigned long long duration; ktime_t calltime; ktime_t rettime; }; extern void trace_boot(struct boot_trace *it, initcall_t fn); extern void start_boot_trace(void); extern void stop_boot_trace(void); # 24 "kernel/trace/trace.c" 2 # 1 "include/linux/kdebug.h" 1 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/kdebug.h" 1 # 1 "include/asm-generic/kdebug.h" 1 enum die_val { DIE_UNUSED, DIE_OOPS=1 }; # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/kdebug.h" 2 # 5 "include/linux/kdebug.h" 2 struct notifier_block; struct die_args { struct pt_regs *regs; const char *str; long err; int trapnr; int signr; }; int register_die_notifier(struct notifier_block *nb); int unregister_die_notifier(struct notifier_block *nb); int notify_die(enum die_val val, const char *str, struct pt_regs *regs, long err, int trap, int sig); # 27 "kernel/trace/trace.c" 2 # 1 "include/linux/ctype.h" 1 # 18 "include/linux/ctype.h" extern unsigned char _ctype[]; # 37 "include/linux/ctype.h" static inline __attribute__((always_inline)) unsigned char __tolower(unsigned char c) { if ((((_ctype[(int)(unsigned char)(c)])&(0x01)) != 0)) c -= 'A'-'a'; return c; } static inline __attribute__((always_inline)) unsigned char __toupper(unsigned char c) { if ((((_ctype[(int)(unsigned char)(c)])&(0x02)) != 0)) c -= 'a'-'A'; return c; } # 28 "kernel/trace/trace.c" 2 # 1 "include/linux/poll.h" 1 # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/poll.h" 1 # 18 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/poll.h" struct pollfd { int fd; short events; short revents; }; # 5 "include/linux/poll.h" 2 # 26 "include/linux/poll.h" struct poll_table_struct; typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); typedef struct poll_table_struct { poll_queue_proc qproc; } poll_table; static inline __attribute__((always_inline)) void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) { if (p && wait_address) p->qproc(filp, wait_address, p); } static inline __attribute__((always_inline)) void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) { pt->qproc = qproc; } struct poll_table_entry { struct file * filp; wait_queue_t wait; wait_queue_head_t * wait_address; }; struct poll_wqueues { poll_table pt; struct poll_table_page * table; int error; int inline_index; struct poll_table_entry inline_entries[((832 - 256) / sizeof(struct poll_table_entry))]; }; extern void poll_initwait(struct poll_wqueues *pwq); extern void poll_freewait(struct poll_wqueues *pwq); typedef struct { unsigned long *in, *out, *ex; unsigned long *res_in, *res_out, *res_ex; } fd_set_bits; # 90 "include/linux/poll.h" static inline __attribute__((always_inline)) int get_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset) { nr = ((((nr)+(8*sizeof(long))-1)/(8*sizeof(long)))*sizeof(long)); if (ufdset) return copy_from_user(fdset, ufdset, nr) ? -14 : 0; memset(fdset, 0, nr); return 0; } static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result)) set_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset) { if (ufdset) return copy_to_user(ufdset, fdset, ((((nr)+(8*sizeof(long))-1)/(8*sizeof(long)))*sizeof(long))); return 0; } static inline __attribute__((always_inline)) void zero_fd_set(unsigned long nr, unsigned long *fdset) { memset(fdset, 0, ((((nr)+(8*sizeof(long))-1)/(8*sizeof(long)))*sizeof(long))); } extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time); extern int do_sys_poll(struct pollfd * ufds, unsigned int nfds, struct timespec *end_time); extern int core_sys_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timespec *end_time); extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec); # 30 "kernel/trace/trace.c" 2 # 1 "include/linux/kprobes.h" 1 # 261 "include/linux/kprobes.h" struct jprobe; struct kretprobe; static inline __attribute__((always_inline)) struct kprobe *get_kprobe(void *addr) { return ((void *)0); } static inline __attribute__((always_inline)) struct kprobe *kprobe_running(void) { return ((void *)0); } static inline __attribute__((always_inline)) int register_kprobe(struct kprobe *p) { return -38; } static inline __attribute__((always_inline)) int register_kprobes(struct kprobe **kps, int num) { return -38; } static inline __attribute__((always_inline)) void unregister_kprobe(struct kprobe *p) { } static inline __attribute__((always_inline)) void unregister_kprobes(struct kprobe **kps, int num) { } static inline __attribute__((always_inline)) int register_jprobe(struct jprobe *p) { return -38; } static inline __attribute__((always_inline)) int register_jprobes(struct jprobe **jps, int num) { return -38; } static inline __attribute__((always_inline)) void unregister_jprobe(struct jprobe *p) { } static inline __attribute__((always_inline)) void unregister_jprobes(struct jprobe **jps, int num) { } static inline __attribute__((always_inline)) void jprobe_return(void) { } static inline __attribute__((always_inline)) int register_kretprobe(struct kretprobe *rp) { return -38; } static inline __attribute__((always_inline)) int register_kretprobes(struct kretprobe **rps, int num) { return -38; } static inline __attribute__((always_inline)) void unregister_kretprobe(struct kretprobe *rp) { } static inline __attribute__((always_inline)) void unregister_kretprobes(struct kretprobe **rps, int num) { } static inline __attribute__((always_inline)) void kprobe_flush_task(struct task_struct *tk) { } # 33 "kernel/trace/trace.c" 2 # 1 "include/linux/writeback.h" 1 # 10 "include/linux/writeback.h" struct backing_dev_info; extern spinlock_t inode_lock; extern struct list_head inode_in_use; extern struct list_head inode_unused; static inline __attribute__((always_inline)) int task_is_pdflush(struct task_struct *task) { return task->flags & 0x00001000; } enum writeback_sync_modes { WB_SYNC_NONE, WB_SYNC_ALL, WB_SYNC_HOLD, }; struct writeback_control { struct backing_dev_info *bdi; enum writeback_sync_modes sync_mode; unsigned long *older_than_this; long nr_to_write; long pages_skipped; loff_t range_start; loff_t range_end; unsigned nonblocking:1; unsigned encountered_congestion:1; unsigned for_kupdate:1; unsigned for_reclaim:1; unsigned for_writepages:1; unsigned range_cyclic:1; unsigned more_io:1; # 74 "include/linux/writeback.h" unsigned no_nrwrite_index_update:1; }; void writeback_inodes(struct writeback_control *wbc); int inode_wait(void *); void sync_inodes_sb(struct super_block *, int wait); void sync_inodes(int wait); static inline __attribute__((always_inline)) void wait_on_inode(struct inode *inode) { do { do { } while (0); } while (0); wait_on_bit(&inode->i_state, 7, inode_wait, 2); } static inline __attribute__((always_inline)) void inode_sync_wait(struct inode *inode) { do { do { } while (0); } while (0); wait_on_bit(&inode->i_state, 8, inode_wait, 2); } int wakeup_pdflush(long nr_pages); void laptop_io_completion(void); void laptop_sync_completion(void); void throttle_vm_writeout(gfp_t gfp_mask); extern int dirty_background_ratio; extern int vm_dirty_ratio; extern int dirty_writeback_interval; extern int dirty_expire_interval; extern int vm_highmem_is_dirtyable; extern int block_dump; extern int laptop_mode; extern unsigned long determine_dirtyable_memory(void); extern int dirty_ratio_handler(struct ctl_table *table, int write, struct file *filp, void *buffer, size_t *lenp, loff_t *ppos); struct ctl_table; struct file; int dirty_writeback_centisecs_handler(struct ctl_table *, int, struct file *, void *, size_t *, loff_t *); void get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty, struct backing_dev_info *bdi); void page_writeback_init(void); void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, unsigned long nr_pages_dirtied); static inline __attribute__((always_inline)) void balance_dirty_pages_ratelimited(struct address_space *mapping) { balance_dirty_pages_ratelimited_nr(mapping, 1); } typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, void *data); int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0); int generic_writepages(struct address_space *mapping, struct writeback_control *wbc); int write_cache_pages(struct address_space *mapping, struct writeback_control *wbc, writepage_t writepage, void *data); int do_writepages(struct address_space *mapping, struct writeback_control *wbc); int sync_page_range(struct inode *inode, struct address_space *mapping, loff_t pos, loff_t count); int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, loff_t pos, loff_t count); void set_page_dirty_balance(struct page *page, int page_mkwrite); void writeback_set_ratelimit(void); extern int nr_pdflush_threads; # 34 "kernel/trace/trace.c" 2 # 1 "include/linux/stacktrace.h" 1 struct task_struct; # 36 "kernel/trace/trace.c" 2 # 1 "include/linux/ring_buffer.h" 1 struct ring_buffer; struct ring_buffer_iter; struct ring_buffer_event { u32 type:2, len:3, time_delta:27; u32 array[]; }; # 43 "include/linux/ring_buffer.h" enum ring_buffer_type { RINGBUF_TYPE_PADDING, RINGBUF_TYPE_TIME_EXTEND, RINGBUF_TYPE_TIME_STAMP, RINGBUF_TYPE_DATA, }; unsigned ring_buffer_event_length(struct ring_buffer_event *event); void *ring_buffer_event_data(struct ring_buffer_event *event); static inline __attribute__((always_inline)) unsigned ring_buffer_event_time_delta(struct ring_buffer_event *event) { return event->time_delta; } struct ring_buffer * ring_buffer_alloc(unsigned long size, unsigned flags); void ring_buffer_free(struct ring_buffer *buffer); int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); struct ring_buffer_event * ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length, unsigned long *flags); int ring_buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event, unsigned long flags); int ring_buffer_write(struct ring_buffer *buffer, unsigned long length, void *data); struct ring_buffer_event * ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts); struct ring_buffer_event * ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts); struct ring_buffer_iter * ring_buffer_read_start(struct ring_buffer *buffer, int cpu); void ring_buffer_read_finish(struct ring_buffer_iter *iter); struct ring_buffer_event * ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); struct ring_buffer_event * ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts); void ring_buffer_iter_reset(struct ring_buffer_iter *iter); int ring_buffer_iter_empty(struct ring_buffer_iter *iter); unsigned long ring_buffer_size(struct ring_buffer *buffer); void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_reset(struct ring_buffer *buffer); int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, struct ring_buffer *buffer_b, int cpu); int ring_buffer_empty(struct ring_buffer *buffer); int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_record_disable(struct ring_buffer *buffer); void ring_buffer_record_enable(struct ring_buffer *buffer); void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_entries(struct ring_buffer *buffer); unsigned long ring_buffer_overruns(struct ring_buffer *buffer); u64 ring_buffer_time_stamp(int cpu); void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); void tracing_on(void); void tracing_off(void); enum ring_buffer_flags { RB_FL_OVERWRITE = 1 << 0, }; # 37 "kernel/trace/trace.c" 2 # 1 "include/linux/irqflags.h" 1 # 38 "kernel/trace/trace.c" 2 # 1 "kernel/trace/trace.h" 1 # 1 "include/linux/clocksource.h" 1 # 17 "include/linux/clocksource.h" # 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/div64.h" 1 # 18 "include/linux/clocksource.h" 2 typedef u64 cycle_t; struct clocksource; # 57 "include/linux/clocksource.h" struct clocksource { char *name; struct list_head list; int rating; cycle_t (*read)(void); cycle_t mask; u32 mult; u32 mult_orig; u32 shift; unsigned long flags; cycle_t (*vread)(void); void (*resume)(void); # 80 "include/linux/clocksource.h" cycle_t cycle_interval; u64 xtime_interval; u32 raw_interval; cycle_t cycle_last ; u64 xtime_nsec; s64 error; struct timespec raw_time; }; extern struct clocksource *clock; # 122 "include/linux/clocksource.h" static inline __attribute__((always_inline)) u32 clocksource_khz2mult(u32 khz, u32 shift_constant) { u64 tmp = ((u64)1000000) << shift_constant; tmp += khz/2; ({ uint32_t __base = (khz); uint32_t __rem; (void)(((typeof((tmp)) *)0) == ((uint64_t *)0)); if (__builtin_expect(!!(((tmp) >> 32) == 0), 1)) { __rem = (uint32_t)(tmp) % __base; (tmp) = (uint32_t)(tmp) / __base; } else __rem = __div64_32(&(tmp), __base); __rem; }); return (u32)tmp; } # 148 "include/linux/clocksource.h" static inline __attribute__((always_inline)) u32 clocksource_hz2mult(u32 hz, u32 shift_constant) { u64 tmp = ((u64)1000000000) << shift_constant; tmp += hz/2; ({ uint32_t __base = (hz); uint32_t __rem; (void)(((typeof((tmp)) *)0) == ((uint64_t *)0)); if (__builtin_expect(!!(((tmp) >> 32) == 0), 1)) { __rem = (uint32_t)(tmp) % __base; (tmp) = (uint32_t)(tmp) / __base; } else __rem = __div64_32(&(tmp), __base); __rem; }); return (u32)tmp; } static inline __attribute__((always_inline)) cycle_t clocksource_read(struct clocksource *cs) { return cs->read(); } # 185 "include/linux/clocksource.h" static inline __attribute__((always_inline)) s64 cyc2ns(struct clocksource *cs, cycle_t cycles) { u64 ret = (u64)cycles; ret = (ret * cs->mult) >> cs->shift; return ret; } # 203 "include/linux/clocksource.h" static inline __attribute__((always_inline)) void clocksource_calculate_interval(struct clocksource *c, unsigned long length_nsec) { u64 tmp; tmp = length_nsec; tmp <<= c->shift; tmp += c->mult_orig/2; ({ uint32_t __base = (c->mult_orig); uint32_t __rem; (void)(((typeof((tmp)) *)0) == ((uint64_t *)0)); if (__builtin_expect(!!(((tmp) >> 32) == 0), 1)) { __rem = (uint32_t)(tmp) % __base; (tmp) = (uint32_t)(tmp) / __base; } else __rem = __div64_32(&(tmp), __base); __rem; }); c->cycle_interval = (cycle_t)tmp; if (c->cycle_interval == 0) c->cycle_interval = 1; c->xtime_interval = (u64)c->cycle_interval * c->mult; c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift; } extern int clocksource_register(struct clocksource*); extern void clocksource_unregister(struct clocksource*); extern void clocksource_touch_watchdog(void); extern struct clocksource* clocksource_get_next(void); extern void clocksource_change_rating(struct clocksource *cs, int rating); extern void clocksource_resume(void); static inline __attribute__((always_inline)) void update_vsyscall(struct timespec *ts, struct clocksource *c) { } static inline __attribute__((always_inline)) void update_vsyscall_tz(void) { } # 8 "kernel/trace/trace.h" 2 # 1 "include/linux/mmiotrace.h" 1 struct kmmio_probe; struct pt_regs; typedef void (*kmmio_pre_handler_t)(struct kmmio_probe *, struct pt_regs *, unsigned long addr); typedef void (*kmmio_post_handler_t)(struct kmmio_probe *, unsigned long condition, struct pt_regs *); struct kmmio_probe { struct list_head list; unsigned long addr; unsigned long len; kmmio_pre_handler_t pre_handler; kmmio_post_handler_t post_handler; void *private; }; static inline __attribute__((always_inline)) int is_kmmio_active(void) { extern unsigned int kmmio_count; return kmmio_count; } extern int register_kmmio_probe(struct kmmio_probe *p); extern void unregister_kmmio_probe(struct kmmio_probe *p); extern int kmmio_handler(struct pt_regs *regs, unsigned long addr); # 47 "include/linux/mmiotrace.h" static inline __attribute__((always_inline)) void mmiotrace_ioremap(resource_size_t offset, unsigned long size, void *addr) { } static inline __attribute__((always_inline)) void mmiotrace_iounmap(volatile void *addr) { } static inline __attribute__((always_inline)) int mmiotrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0))); static inline __attribute__((always_inline)) int mmiotrace_printk(const char *fmt, ...) { return 0; } enum mm_io_opcode { MMIO_READ = 0x1, MMIO_WRITE = 0x2, MMIO_PROBE = 0x3, MMIO_UNPROBE = 0x4, MMIO_UNKNOWN_OP = 0x5, }; struct mmiotrace_rw { resource_size_t phys; unsigned long value; unsigned long pc; int map_id; unsigned char opcode; unsigned char width; }; struct mmiotrace_map { resource_size_t phys; unsigned long virt; unsigned long len; int map_id; unsigned char opcode; }; extern void enable_mmiotrace(void); extern void disable_mmiotrace(void); extern void mmio_trace_rw(struct mmiotrace_rw *rw); extern void mmio_trace_mapping(struct mmiotrace_map *map); extern int mmio_trace_printk(const char *fmt, va_list args); # 10 "kernel/trace/trace.h" 2 enum trace_type { __TRACE_FIRST_TYPE = 0, TRACE_FN, TRACE_CTX, TRACE_WAKE, TRACE_CONT, TRACE_STACK, TRACE_PRINT, TRACE_SPECIAL, TRACE_MMIO_RW, TRACE_MMIO_MAP, TRACE_BOOT, __TRACE_LAST_TYPE }; struct trace_entry { unsigned char type; unsigned char cpu; unsigned char flags; unsigned char preempt_count; int pid; }; struct ftrace_entry { struct trace_entry ent; unsigned long ip; unsigned long parent_ip; }; extern struct tracer boot_tracer; struct ctx_switch_entry { struct trace_entry ent; unsigned int prev_pid; unsigned char prev_prio; unsigned char prev_state; unsigned int next_pid; unsigned char next_prio; unsigned char next_state; unsigned int next_cpu; }; struct special_entry { struct trace_entry ent; unsigned long arg1; unsigned long arg2; unsigned long arg3; }; struct stack_entry { struct trace_entry ent; unsigned long caller[8]; }; struct print_entry { struct trace_entry ent; unsigned long ip; char buf[]; }; struct trace_field_cont { unsigned char type; char buf[88 - 1]; }; struct trace_mmiotrace_rw { struct trace_entry ent; struct mmiotrace_rw rw; }; struct trace_mmiotrace_map { struct trace_entry ent; struct mmiotrace_map map; }; struct trace_boot { struct trace_entry ent; struct boot_trace initcall; }; # 130 "kernel/trace/trace.h" enum trace_flag_type { TRACE_FLAG_IRQS_OFF = 0x01, TRACE_FLAG_IRQS_NOSUPPORT = 0x02, TRACE_FLAG_NEED_RESCHED = 0x04, TRACE_FLAG_HARDIRQ = 0x08, TRACE_FLAG_SOFTIRQ = 0x10, TRACE_FLAG_CONT = 0x20, }; # 146 "kernel/trace/trace.h" struct trace_array_cpu { atomic_t disabled; unsigned long trace_idx; unsigned long overrun; unsigned long saved_latency; unsigned long critical_start; unsigned long critical_end; unsigned long critical_sequence; unsigned long nice; unsigned long policy; unsigned long rt_priority; cycle_t preempt_timestamp; pid_t pid; uid_t uid; char comm[16]; }; struct trace_iterator; struct trace_array { struct ring_buffer *buffer; unsigned long entries; long ctrl; int cpu; cycle_t time_start; struct task_struct *waiter; struct trace_array_cpu *data[1]; }; # 194 "kernel/trace/trace.h" extern void __ftrace_bad_type(void); # 226 "kernel/trace/trace.h" enum print_line_t { TRACE_TYPE_PARTIAL_LINE = 0, TRACE_TYPE_HANDLED = 1, TRACE_TYPE_UNHANDLED = 2 }; struct tracer { const char *name; void (*init)(struct trace_array *tr); void (*reset)(struct trace_array *tr); void (*open)(struct trace_iterator *iter); void (*pipe_open)(struct trace_iterator *iter); void (*close)(struct trace_iterator *iter); void (*start)(struct trace_iterator *iter); void (*stop)(struct trace_iterator *iter); ssize_t (*read)(struct trace_iterator *iter, struct file *filp, char *ubuf, size_t cnt, loff_t *ppos); void (*ctrl_update)(struct trace_array *tr); enum print_line_t (*print_line)(struct trace_iterator *iter); struct tracer *next; int print_max; }; struct trace_seq { unsigned char buffer[(1UL << 12)]; unsigned int len; unsigned int readpos; }; struct trace_iterator { struct trace_array *tr; struct tracer *trace; void *private; struct ring_buffer_iter *buffer_iter[1]; struct trace_seq seq; struct trace_entry *ent; int cpu; u64 ts; unsigned long iter_flags; loff_t pos; long idx; }; void trace_wake_up(void); void tracing_reset(struct trace_array *tr, int cpu); int tracing_open_generic(struct inode *inode, struct file *filp); struct dentry *tracing_init_dentry(void); void init_tracer_sysprof_debugfs(struct dentry *d_tracer); struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data); void tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, int pc); void ftrace(struct trace_array *tr, struct trace_array_cpu *data, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc); void tracing_sched_switch_trace(struct trace_array *tr, struct trace_array_cpu *data, struct task_struct *prev, struct task_struct *next, unsigned long flags, int pc); void tracing_record_cmdline(struct task_struct *tsk); void tracing_sched_wakeup_trace(struct trace_array *tr, struct trace_array_cpu *data, struct task_struct *wakee, struct task_struct *cur, unsigned long flags, int pc); void trace_special(struct trace_array *tr, struct trace_array_cpu *data, unsigned long arg1, unsigned long arg2, unsigned long arg3, int pc); void trace_function(struct trace_array *tr, struct trace_array_cpu *data, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc); void tracing_start_cmdline_record(void); void tracing_stop_cmdline_record(void); int register_tracer(struct tracer *type); void unregister_tracer(struct tracer *type); extern unsigned long nsecs_to_usecs(unsigned long nsecs); extern unsigned long tracing_max_latency; extern unsigned long tracing_thresh; void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); void update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu); extern cycle_t ftrace_now(int cpu); # 349 "kernel/trace/trace.h" typedef void (*tracer_switch_func_t)(void *private, void *__rq, struct task_struct *prev, struct task_struct *next); struct tracer_switch_ops { tracer_switch_func_t func; void *private; struct tracer_switch_ops *next; }; # 388 "kernel/trace/trace.h" extern void *head_page(struct trace_array_cpu *data); extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); extern void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter); extern ssize_t trace_seq_to_user(struct trace_seq *s, char *ubuf, size_t cnt); extern long ns2usecs(cycle_t nsec); extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args); extern unsigned long trace_flags; # 406 "kernel/trace/trace.h" enum trace_iterator_flags { TRACE_ITER_PRINT_PARENT = 0x01, TRACE_ITER_SYM_OFFSET = 0x02, TRACE_ITER_SYM_ADDR = 0x04, TRACE_ITER_VERBOSE = 0x08, TRACE_ITER_RAW = 0x10, TRACE_ITER_HEX = 0x20, TRACE_ITER_BIN = 0x40, TRACE_ITER_BLOCK = 0x80, TRACE_ITER_STACKTRACE = 0x100, TRACE_ITER_SCHED_TREE = 0x200, TRACE_ITER_PRINTK = 0x400, }; extern struct tracer nop_trace; # 40 "kernel/trace/trace.c" 2 unsigned long tracing_max_latency = (cycle_t)(~0UL); unsigned long tracing_thresh; static __typeof__(local_t) per_cpu__ftrace_cpu_disabled; static inline __attribute__((always_inline)) void ftrace_disable_cpu(void) { do { } while (0); atomic_long_inc(&(&per_cpu__ftrace_cpu_disabled)->a); } static inline __attribute__((always_inline)) void ftrace_enable_cpu(void) { atomic_long_dec(&(&per_cpu__ftrace_cpu_disabled)->a); do { } while (0); } static cpumask_t tracing_buffer_mask; static int tracing_disabled = 1; long ns2usecs(cycle_t nsec) { nsec += 500; ({ uint32_t __base = (1000); uint32_t __rem; (void)(((typeof((nsec)) *)0) == ((uint64_t *)0)); if (__builtin_expect(!!(((nsec) >> 32) == 0), 1)) { __rem = (uint32_t)(nsec) % __base; (nsec) = (uint32_t)(nsec) / __base; } else __rem = __div64_32(&(nsec), __base); __rem; }); return nsec; } cycle_t ftrace_now(int cpu) { u64 ts = ring_buffer_time_stamp(cpu); ring_buffer_normalize_time_stamp(cpu, &ts); return ts; } # 94 "kernel/trace/trace.c" static struct trace_array global_trace; static __typeof__(struct trace_array_cpu) per_cpu__global_trace_cpu; # 108 "kernel/trace/trace.c" static struct trace_array max_tr; static __typeof__(struct trace_array_cpu) per_cpu__max_data; static int tracer_enabled = 1; int ftrace_function_enabled; # 130 "kernel/trace/trace.c" static unsigned long trace_buf_size = 1441792UL; static struct tracer *trace_types ; static struct tracer *current_trace ; static int max_tracer_type_len; static struct mutex trace_types_lock = { .count = { (1) } , .wait_lock = (spinlock_t) { .raw_lock = { 1 }, .magic = 0xdead4ead, .owner = ((void *)-1L), .owner_cpu = -1, } , .wait_list = { &(trace_types_lock.wait_list), &(trace_types_lock.wait_list) } , .magic = &trace_types_lock }; static wait_queue_head_t trace_wait = { .lock = (spinlock_t) { .raw_lock = { 1 }, .magic = 0xdead4ead, .owner = ((void *)-1L), .owner_cpu = -1, }, .task_list = { &(trace_wait).task_list, &(trace_wait).task_list } }; unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; void trace_wake_up(void) { if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked()) __wake_up(&trace_wait, (1 | 2), 1, ((void *)0)); } static int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) set_buf_size(char *str) { unsigned long buf_size; int ret; if (!str) return 0; ret = strict_strtoul(str, 0, &buf_size); if (ret < 0 || buf_size == 0) return 0; trace_buf_size = buf_size; return 1; } static char __setup_str_set_buf_size[] __attribute__ ((__section__(".init.data"))) __attribute__((aligned(1))) = "trace_buf_size="; static struct obs_kernel_param __setup_set_buf_size __attribute__((__used__)) __attribute__ ((__section__(".init.setup"))) __attribute__((aligned((sizeof(long))))) = { __setup_str_set_buf_size, set_buf_size, 0 }; unsigned long nsecs_to_usecs(unsigned long nsecs) { return nsecs / 1000; } # 204 "kernel/trace/trace.c" static const char *trace_options[] = { "print-parent", "sym-offset", "sym-addr", "verbose", "raw", "hex", "bin", "block", "stacktrace", "sched-tree", "ftrace_printk", ((void *)0) }; # 228 "kernel/trace/trace.c" static raw_spinlock_t ftrace_max_lock = (raw_spinlock_t){ 1 }; static void __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { struct trace_array_cpu *data = tr->data[cpu]; max_tr.cpu = cpu; max_tr.time_start = data->preempt_timestamp; data = max_tr.data[cpu]; data->saved_latency = tracing_max_latency; memcpy(data->comm, tsk->comm, 16); data->pid = tsk->pid; data->uid = tsk->uid; data->nice = tsk->static_prio - 20 - 100; data->policy = tsk->policy; data->rt_priority = tsk->rt_priority; tracing_record_cmdline((get_current())); } # 269 "kernel/trace/trace.c" int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) { int len = ((1UL << 12) - 1) - s->len; va_list ap; int ret; if (!len) return 0; __builtin_va_start(ap,fmt); ret = vsnprintf(s->buffer + s->len, len, fmt, ap); __builtin_va_end(ap); if (ret >= len) return 0; s->len += ret; return len; } # 302 "kernel/trace/trace.c" static int trace_seq_puts(struct trace_seq *s, const char *str) { int len = strlen(str); if (len > (((1UL << 12) - 1) - s->len)) return 0; memcpy(s->buffer + s->len, str, len); s->len += len; return len; } static int trace_seq_putc(struct trace_seq *s, unsigned char c) { if (s->len >= ((1UL << 12) - 1)) return 0; s->buffer[s->len++] = c; return 1; } static int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len) { if (len > (((1UL << 12) - 1) - s->len)) return 0; memcpy(s->buffer + s->len, mem, len); s->len += len; return len; } static int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) { unsigned char hex[(8*2 + 1)]; unsigned char *data = mem; int i, j; for (i = len-1, j = 0; i >= 0; i--) { hex[j++] = hex_asc[((data[i]) & 0xf0) >> 4]; hex[j++] = hex_asc[((data[i]) & 0x0f)]; } hex[j++] = ' '; return trace_seq_putmem(s, hex, j); } static void trace_seq_reset(struct trace_seq *s) { s->len = 0; s->readpos = 0; } ssize_t trace_seq_to_user(struct trace_seq *s, char *ubuf, size_t cnt) { int len; int ret; if (s->len <= s->readpos) return -16; len = s->len - s->readpos; if (cnt > len) cnt = len; ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); if (ret) return -14; s->readpos += len; return cnt; } static void trace_print_seq(struct seq_file *m, struct trace_seq *s) { int len = s->len >= (1UL << 12) ? (1UL << 12) - 1 : s->len; s->buffer[len] = 0; seq_puts(m, s->buffer); trace_seq_reset(s); } # 408 "kernel/trace/trace.c" void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { struct ring_buffer *buf = tr->buffer; ({ static int __warned; int __ret_warn_once = !!(!({ unsigned long flags; __asm__ __volatile__( "cli %0;" "sti %0;" : "=d" (flags) ); !(((flags) & ~0x3f) != 0); })); if (__builtin_expect(!!(__ret_warn_once), 0)) if (({ int __ret_warn_on = !!(!__warned); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 413); __builtin_expect(!!(__ret_warn_on), 0); })) __warned = 1; __builtin_expect(!!(__ret_warn_once), 0); }); __raw_spin_lock(&ftrace_max_lock); tr->buffer = max_tr.buffer; max_tr.buffer = buf; ftrace_disable_cpu(); ring_buffer_reset(tr->buffer); ftrace_enable_cpu(); __update_max_tr(tr, tsk, cpu); __raw_spin_unlock(&ftrace_max_lock); } # 435 "kernel/trace/trace.c" void update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) { int ret; ({ static int __warned; int __ret_warn_once = !!(!({ unsigned long flags; __asm__ __volatile__( "cli %0;" "sti %0;" : "=d" (flags) ); !(((flags) & ~0x3f) != 0); })); if (__builtin_expect(!!(__ret_warn_once), 0)) if (({ int __ret_warn_on = !!(!__warned); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 440); __builtin_expect(!!(__ret_warn_on), 0); })) __warned = 1; __builtin_expect(!!(__ret_warn_once), 0); }); __raw_spin_lock(&ftrace_max_lock); ftrace_disable_cpu(); ring_buffer_reset(max_tr.buffer); ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); ftrace_enable_cpu(); ({ static int __warned; int __ret_warn_once = !!(ret); if (__builtin_expect(!!(__ret_warn_once), 0)) if (({ int __ret_warn_on = !!(!__warned); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 450); __builtin_expect(!!(__ret_warn_on), 0); })) __warned = 1; __builtin_expect(!!(__ret_warn_once), 0); }); __update_max_tr(tr, tsk, cpu); __raw_spin_unlock(&ftrace_max_lock); } int register_tracer(struct tracer *type) { struct tracer *t; int len; int ret = 0; if (!type->name) { printk("<6>" "Tracer must have a name\n"); return -1; } mutex_lock(&trace_types_lock); for (t = trace_types; t; t = t->next) { if (strcmp(type->name, t->name) == 0) { printk("<6>" "Trace %s already registered\n", type->name); ret = -1; goto out; } } # 520 "kernel/trace/trace.c" type->next = trace_types; trace_types = type; len = strlen(type->name); if (len > max_tracer_type_len) max_tracer_type_len = len; out: mutex_unlock(&trace_types_lock); return ret; } void unregister_tracer(struct tracer *type) { struct tracer **t; int len; mutex_lock(&trace_types_lock); for (t = &trace_types; *t; t = &(*t)->next) { if (*t == type) goto found; } printk("<6>" "Trace %s not registered\n", type->name); goto out; found: *t = (*t)->next; if (strlen(type->name) != max_tracer_type_len) goto out; max_tracer_type_len = 0; for (t = &trace_types; *t; t = &(*t)->next) { len = strlen((*t)->name); if (len > max_tracer_type_len) max_tracer_type_len = len; } out: mutex_unlock(&trace_types_lock); } void tracing_reset(struct trace_array *tr, int cpu) { ftrace_disable_cpu(); ring_buffer_reset_cpu(tr->buffer, cpu); ftrace_enable_cpu(); } static unsigned map_pid_to_cmdline[(1 ? 0x1000 : 0x8000)+1]; static unsigned map_cmdline_to_pid[128]; static char saved_cmdlines[128][16]; static int cmdline_idx; static spinlock_t trace_cmdline_lock = (spinlock_t) { .raw_lock = { 1 }, .magic = 0xdead4ead, .owner = ((void *)-1L), .owner_cpu = -1, }; atomic_t trace_record_cmdline_disabled ; static void trace_init_cmdlines(void) { memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline)); memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid)); cmdline_idx = 0; } void trace_stop_cmdline_recording(void); static void trace_save_cmdline(struct task_struct *tsk) { unsigned map; unsigned idx; if (!tsk->pid || __builtin_expect(!!(tsk->pid > (1 ? 0x1000 : 0x8000)), 0)) return; if (!(_spin_trylock(&trace_cmdline_lock))) return; idx = map_pid_to_cmdline[tsk->pid]; if (idx >= 128) { idx = (cmdline_idx + 1) % 128; map = map_cmdline_to_pid[idx]; if (map <= (1 ? 0x1000 : 0x8000)) map_pid_to_cmdline[map] = (unsigned)-1; map_pid_to_cmdline[tsk->pid] = idx; cmdline_idx = idx; } memcpy(&saved_cmdlines[idx], tsk->comm, 16); _spin_unlock(&trace_cmdline_lock); } static char *trace_find_cmdline(int pid) { char *cmdline = "<...>"; unsigned map; if (!pid) return ""; if (pid > (1 ? 0x1000 : 0x8000)) goto out; map = map_pid_to_cmdline[pid]; if (map >= 128) goto out; cmdline = saved_cmdlines[map]; out: return cmdline; } void tracing_record_cmdline(struct task_struct *tsk) { if (((&trace_record_cmdline_disabled)->counter)) return; trace_save_cmdline(tsk); } void tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, int pc) { struct task_struct *tsk = (get_current()); entry->preempt_count = pc & 0xff; entry->pid = (tsk) ? tsk->pid : 0; entry->flags = TRACE_FLAG_IRQS_NOSUPPORT | ((pc & (((1UL << (8))-1) << ((0 + 8) + 8))) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & (((1UL << (8))-1) << (0 + 8))) ? TRACE_FLAG_SOFTIRQ : 0) | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); } void trace_function(struct trace_array *tr, struct trace_array_cpu *data, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { struct ring_buffer_event *event; struct ftrace_entry *entry; unsigned long irq_flags; if (__builtin_expect(!!(atomic_long_read(&(&per_cpu__ftrace_cpu_disabled)->a)), 0)) return; event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq_flags); if (!event) return; entry = ring_buffer_event_data(event); tracing_generic_entry_update(&entry->ent, flags, pc); entry->ent.type = TRACE_FN; entry->ip = ip; entry->parent_ip = parent_ip; ring_buffer_unlock_commit(tr->buffer, event, irq_flags); } void ftrace(struct trace_array *tr, struct trace_array_cpu *data, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { if (__builtin_expect(!!(!((&data->disabled)->counter)), 1)) trace_function(tr, data, ip, parent_ip, flags, pc); } static void ftrace_trace_stack(struct trace_array *tr, struct trace_array_cpu *data, unsigned long flags, int skip, int pc) { # 735 "kernel/trace/trace.c" } void __trace_stack(struct trace_array *tr, struct trace_array_cpu *data, unsigned long flags, int skip) { ftrace_trace_stack(tr, data, flags, skip, (current_thread_info()->preempt_count)); } static void ftrace_trace_special(void *__tr, void *__data, unsigned long arg1, unsigned long arg2, unsigned long arg3, int pc) { struct ring_buffer_event *event; struct trace_array_cpu *data = __data; struct trace_array *tr = __tr; struct special_entry *entry; unsigned long irq_flags; event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq_flags); if (!event) return; entry = ring_buffer_event_data(event); tracing_generic_entry_update(&entry->ent, 0, pc); entry->ent.type = TRACE_SPECIAL; entry->arg1 = arg1; entry->arg2 = arg2; entry->arg3 = arg3; ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ftrace_trace_stack(tr, data, irq_flags, 4, pc); trace_wake_up(); } void __trace_special(void *__tr, void *__data, unsigned long arg1, unsigned long arg2, unsigned long arg3) { ftrace_trace_special(__tr, __data, arg1, arg2, arg3, (current_thread_info()->preempt_count)); } void tracing_sched_switch_trace(struct trace_array *tr, struct trace_array_cpu *data, struct task_struct *prev, struct task_struct *next, unsigned long flags, int pc) { struct ring_buffer_event *event; struct ctx_switch_entry *entry; unsigned long irq_flags; event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq_flags); if (!event) return; entry = ring_buffer_event_data(event); tracing_generic_entry_update(&entry->ent, flags, pc); entry->ent.type = TRACE_CTX; entry->prev_pid = prev->pid; entry->prev_prio = prev->prio; entry->prev_state = prev->state; entry->next_pid = next->pid; entry->next_prio = next->prio; entry->next_state = next->state; entry->next_cpu = task_cpu(next); ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ftrace_trace_stack(tr, data, flags, 5, pc); } void tracing_sched_wakeup_trace(struct trace_array *tr, struct trace_array_cpu *data, struct task_struct *wakee, struct task_struct *curr, unsigned long flags, int pc) { struct ring_buffer_event *event; struct ctx_switch_entry *entry; unsigned long irq_flags; event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq_flags); if (!event) return; entry = ring_buffer_event_data(event); tracing_generic_entry_update(&entry->ent, flags, pc); entry->ent.type = TRACE_WAKE; entry->prev_pid = curr->pid; entry->prev_prio = curr->prio; entry->prev_state = curr->state; entry->next_pid = wakee->pid; entry->next_prio = wakee->prio; entry->next_state = wakee->state; entry->next_cpu = task_cpu(wakee); ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ftrace_trace_stack(tr, data, flags, 6, pc); trace_wake_up(); } void ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { struct trace_array *tr = &global_trace; struct trace_array_cpu *data; int cpu; int pc; if (tracing_disabled || !tr->ctrl) return; pc = (current_thread_info()->preempt_count); do { } while (0); cpu = 0; data = tr->data[cpu]; if (__builtin_expect(!!(!((&data->disabled)->counter)), 1)) ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); do { } while (0); } # 913 "kernel/trace/trace.c" enum trace_file_type { TRACE_FILE_LAT_FMT = 1, }; static void trace_iterator_increment(struct trace_iterator *iter, int cpu) { ftrace_disable_cpu(); iter->idx++; if (iter->buffer_iter[iter->cpu]) ring_buffer_read(iter->buffer_iter[iter->cpu], ((void *)0)); ftrace_enable_cpu(); } static struct trace_entry * peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) { struct ring_buffer_event *event; struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; ftrace_disable_cpu(); if (buf_iter) event = ring_buffer_iter_peek(buf_iter, ts); else event = ring_buffer_peek(iter->tr->buffer, cpu, ts); ftrace_enable_cpu(); return event ? ring_buffer_event_data(event) : ((void *)0); } static struct trace_entry * __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) { struct ring_buffer *buffer = iter->tr->buffer; struct trace_entry *ent, *next = ((void *)0); u64 next_ts = 0, ts; int next_cpu = -1; int cpu; for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) { if (ring_buffer_empty_cpu(buffer, cpu)) continue; ent = peek_next_entry(iter, cpu, &ts); if (ent && (!next || ts < next_ts)) { next = ent; next_cpu = cpu; next_ts = ts; } } if (ent_cpu) *ent_cpu = next_cpu; if (ent_ts) *ent_ts = next_ts; return next; } static struct trace_entry * find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) { return __find_next_entry(iter, ent_cpu, ent_ts); } static void *find_next_entry_inc(struct trace_iterator *iter) { iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); if (iter->ent) trace_iterator_increment(iter, iter->cpu); return iter->ent ? iter : ((void *)0); } static void trace_consume(struct trace_iterator *iter) { ftrace_disable_cpu(); ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts); ftrace_enable_cpu(); } static void *s_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_iterator *iter = m->private; int i = (int)*pos; void *ent; (*pos)++; if (iter->idx > i) return ((void *)0); if (iter->idx < 0) ent = find_next_entry_inc(iter); else ent = iter; while (ent && iter->idx < i) ent = find_next_entry_inc(iter); iter->pos = *pos; return ent; } static void *s_start(struct seq_file *m, loff_t *pos) { struct trace_iterator *iter = m->private; void *p = ((void *)0); loff_t l = 0; int cpu; mutex_lock(&trace_types_lock); if (!current_trace || current_trace != iter->trace) { mutex_unlock(&trace_types_lock); return ((void *)0); } atomic_inc(&trace_record_cmdline_disabled); if (current_trace->start) current_trace->start(iter); if (*pos != iter->pos) { iter->ent = ((void *)0); iter->cpu = 0; iter->idx = -1; ftrace_disable_cpu(); for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) { ring_buffer_iter_reset(iter->buffer_iter[cpu]); } ftrace_enable_cpu(); for (p = iter; p && l < *pos; p = s_next(m, p, &l)) ; } else { l = *pos - 1; p = s_next(m, p, &l); } return p; } static void s_stop(struct seq_file *m, void *p) { struct trace_iterator *iter = m->private; atomic_dec(&trace_record_cmdline_disabled); if (current_trace && current_trace == iter->trace && iter->trace->stop) iter->trace->stop(iter); mutex_unlock(&trace_types_lock); } # 1102 "kernel/trace/trace.c" static inline __attribute__((always_inline)) const char *kretprobed(const char *name) { return name; } static int seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) { # 1121 "kernel/trace/trace.c" return 1; } static int seq_print_sym_offset(struct trace_seq *s, const char *fmt, unsigned long address) { # 1137 "kernel/trace/trace.c" return 1; } static int seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) { int ret; if (!ip) return trace_seq_printf(s, "0"); if (sym_flags & TRACE_ITER_SYM_OFFSET) ret = seq_print_sym_offset(s, "%s", ip); else ret = seq_print_sym_short(s, "%s", ip); if (!ret) return 0; if (sym_flags & TRACE_ITER_SYM_ADDR) ret = trace_seq_printf(s, " <" "%08lx" ">", ip); return ret; } static void print_lat_help_header(struct seq_file *m) { seq_puts(m, "# _------=> CPU# \n"); seq_puts(m, "# / _-----=> irqs-off \n"); seq_puts(m, "# | / _----=> need-resched \n"); seq_puts(m, "# || / _---=> hardirq/softirq \n"); seq_puts(m, "# ||| / _--=> preempt-depth \n"); seq_puts(m, "# |||| / \n"); seq_puts(m, "# ||||| delay \n"); seq_puts(m, "# cmd pid ||||| time | caller \n"); seq_puts(m, "# \\ / ||||| \\ | / \n"); } static void print_func_help_header(struct seq_file *m) { seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); seq_puts(m, "# | | | | |\n"); } static void print_trace_header(struct seq_file *m, struct trace_iterator *iter) { unsigned long sym_flags = (trace_flags & (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)); struct trace_array *tr = iter->tr; struct trace_array_cpu *data = tr->data[tr->cpu]; struct tracer *type = current_trace; unsigned long total; unsigned long entries; const char *name = "preemption"; if (type) name = type->name; entries = ring_buffer_entries(iter->tr->buffer); total = entries + ring_buffer_overruns(iter->tr->buffer); seq_printf(m, "%s latency trace v1.1.5 on %s\n", name, "2.6.28-ADI-2009R1-pre-g0033e75-dirty"); seq_puts(m, "-----------------------------------" "---------------------------------\n"); seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |" " (M:%s VP:%d, KP:%d, SP:%d HP:%d", nsecs_to_usecs(data->saved_latency), entries, total, tr->cpu, "server", # 1225 "kernel/trace/trace.c" 0, 0, 0, 0); seq_puts(m, ")\n"); seq_puts(m, " -----------------\n"); seq_printf(m, " | task: %.16s-%d " "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", data->comm, data->pid, data->uid, data->nice, data->policy, data->rt_priority); seq_puts(m, " -----------------\n"); if (data->critical_start) { seq_puts(m, " => started at: "); seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); trace_print_seq(m, &iter->seq); seq_puts(m, "\n => ended at: "); seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); trace_print_seq(m, &iter->seq); seq_puts(m, "\n"); } seq_puts(m, "\n"); } static void lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) { int hardirq, softirq; char *comm; comm = trace_find_cmdline(entry->pid); trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid); trace_seq_printf(s, "%3d", cpu); trace_seq_printf(s, "%c%c", (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.', ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); hardirq = entry->flags & TRACE_FLAG_HARDIRQ; softirq = entry->flags & TRACE_FLAG_SOFTIRQ; if (hardirq && softirq) { trace_seq_putc(s, 'H'); } else { if (hardirq) { trace_seq_putc(s, 'h'); } else { if (softirq) trace_seq_putc(s, 's'); else trace_seq_putc(s, '.'); } } if (entry->preempt_count) trace_seq_printf(s, "%x", entry->preempt_count); else trace_seq_puts(s, "."); } unsigned long preempt_mark_thresh = 100; static void lat_print_timestamp(struct trace_seq *s, u64 abs_usecs, unsigned long rel_usecs) { trace_seq_printf(s, " %4lldus", abs_usecs); if (rel_usecs > preempt_mark_thresh) trace_seq_puts(s, "!: "); else if (rel_usecs > 1) trace_seq_puts(s, "+: "); else trace_seq_puts(s, " : "); } static const char state_to_char[] = "RSDTtZX"; void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) { struct trace_entry *ent; struct trace_field_cont *cont; bool ok = true; ent = peek_next_entry(iter, iter->cpu, ((void *)0)); if (!ent || ent->type != TRACE_CONT) { trace_seq_putc(s, '\n'); return; } do { cont = (struct trace_field_cont *)ent; if (ok) ok = (trace_seq_printf(s, "%s", cont->buf) > 0); ftrace_disable_cpu(); if (iter->buffer_iter[iter->cpu]) ring_buffer_read(iter->buffer_iter[iter->cpu], ((void *)0)); else ring_buffer_consume(iter->tr->buffer, iter->cpu, ((void *)0)); ftrace_enable_cpu(); ent = peek_next_entry(iter, iter->cpu, ((void *)0)); } while (ent && ent->type == TRACE_CONT); if (!ok) trace_seq_putc(s, '\n'); } static enum print_line_t print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) { struct trace_seq *s = &iter->seq; unsigned long sym_flags = (trace_flags & (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)); struct trace_entry *next_entry; unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); struct trace_entry *entry = iter->ent; unsigned long abs_usecs; unsigned long rel_usecs; u64 next_ts; char *comm; int S, T; int i; unsigned state; if (entry->type == TRACE_CONT) return TRACE_TYPE_HANDLED; next_entry = find_next_entry(iter, ((void *)0), &next_ts); if (!next_entry) next_ts = iter->ts; rel_usecs = ns2usecs(next_ts - iter->ts); abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); if (verbose) { comm = trace_find_cmdline(entry->pid); trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]" " %ld.%03ldms (+%ld.%03ldms): ", comm, entry->pid, cpu, entry->flags, entry->preempt_count, trace_idx, ns2usecs(iter->ts), abs_usecs/1000, abs_usecs % 1000, rel_usecs/1000, rel_usecs % 1000); } else { lat_print_generic(s, entry, cpu); lat_print_timestamp(s, abs_usecs, rel_usecs); } switch (entry->type) { case TRACE_FN: { struct ftrace_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1385); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1385); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1385); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1385); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1385); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1385); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1385); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1385); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1385); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); seq_print_ip_sym(s, field->ip, sym_flags); trace_seq_puts(s, " ("); seq_print_ip_sym(s, field->parent_ip, sym_flags); trace_seq_puts(s, ")\n"); break; } case TRACE_CTX: case TRACE_WAKE: { struct ctx_switch_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1397); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1397); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1397); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1397); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1397); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1397); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1397); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1397); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1397); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); T = field->next_state < sizeof(state_to_char) ? state_to_char[field->next_state] : 'X'; state = field->prev_state ? __ffs(field->prev_state) + 1 : 0; S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X'; comm = trace_find_cmdline(field->next_pid); trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", field->prev_pid, field->prev_prio, S, entry->type == TRACE_CTX ? "==>" : " +", field->next_cpu, field->next_pid, field->next_prio, T, comm); break; } case TRACE_SPECIAL: { struct special_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1419); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1419); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1419); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1419); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1419); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1419); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1419); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1419); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1419); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); trace_seq_printf(s, "# %ld %ld %ld\n", field->arg1, field->arg2, field->arg3); break; } case TRACE_STACK: { struct stack_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1430); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1430); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1430); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1430); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1430); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1430); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1430); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1430); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1430); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); for (i = 0; i < 8; i++) { if (i) trace_seq_puts(s, " <= "); seq_print_ip_sym(s, field->caller[i], sym_flags); } trace_seq_puts(s, "\n"); break; } case TRACE_PRINT: { struct print_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1443); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1443); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1443); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1443); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1443); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1443); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1443); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1443); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1443); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); seq_print_ip_sym(s, field->ip, sym_flags); trace_seq_printf(s, ": %s", field->buf); if (entry->flags & TRACE_FLAG_CONT) trace_seq_print_cont(s, iter); break; } default: trace_seq_printf(s, "Unknown type %d\n", entry->type); } return TRACE_TYPE_HANDLED; } static enum print_line_t print_trace_fmt(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; unsigned long sym_flags = (trace_flags & (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)); struct trace_entry *entry; unsigned long usec_rem; unsigned long long t; unsigned long secs; char *comm; int ret; int S, T; int i; entry = iter->ent; if (entry->type == TRACE_CONT) return TRACE_TYPE_HANDLED; comm = trace_find_cmdline(iter->ent->pid); t = ns2usecs(iter->ts); usec_rem = ({ uint32_t __base = (1000000ULL); uint32_t __rem; (void)(((typeof((t)) *)0) == ((uint64_t *)0)); if (__builtin_expect(!!(((t) >> 32) == 0), 1)) { __rem = (uint32_t)(t) % __base; (t) = (uint32_t)(t) / __base; } else __rem = __div64_32(&(t), __base); __rem; }); secs = (unsigned long)t; ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); if (!ret) return TRACE_TYPE_PARTIAL_LINE; ret = trace_seq_printf(s, "[%03d] ", iter->cpu); if (!ret) return TRACE_TYPE_PARTIAL_LINE; ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem); if (!ret) return TRACE_TYPE_PARTIAL_LINE; switch (entry->type) { case TRACE_FN: { struct ftrace_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1495); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1495); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1495); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1495); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1495); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1495); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1495); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1495); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1495); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); ret = seq_print_ip_sym(s, field->ip, sym_flags); if (!ret) return TRACE_TYPE_PARTIAL_LINE; if ((sym_flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { ret = trace_seq_printf(s, " <-"); if (!ret) return TRACE_TYPE_PARTIAL_LINE; ret = seq_print_ip_sym(s, field->parent_ip, sym_flags); if (!ret) return TRACE_TYPE_PARTIAL_LINE; } ret = trace_seq_printf(s, "\n"); if (!ret) return TRACE_TYPE_PARTIAL_LINE; break; } case TRACE_CTX: case TRACE_WAKE: { struct ctx_switch_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1520); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1520); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1520); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1520); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1520); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1520); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1520); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1520); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1520); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); S = field->prev_state < sizeof(state_to_char) ? state_to_char[field->prev_state] : 'X'; T = field->next_state < sizeof(state_to_char) ? state_to_char[field->next_state] : 'X'; ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n", field->prev_pid, field->prev_prio, S, entry->type == TRACE_CTX ? "==>" : " +", field->next_cpu, field->next_pid, field->next_prio, T); if (!ret) return TRACE_TYPE_PARTIAL_LINE; break; } case TRACE_SPECIAL: { struct special_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1542); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1542); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1542); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1542); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1542); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1542); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1542); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1542); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1542); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); ret = trace_seq_printf(s, "# %ld %ld %ld\n", field->arg1, field->arg2, field->arg3); if (!ret) return TRACE_TYPE_PARTIAL_LINE; break; } case TRACE_STACK: { struct stack_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1555); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1555); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1555); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1555); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1555); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1555); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1555); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1555); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1555); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); for (i = 0; i < 8; i++) { if (i) { ret = trace_seq_puts(s, " <= "); if (!ret) return TRACE_TYPE_PARTIAL_LINE; } ret = seq_print_ip_sym(s, field->caller[i], sym_flags); if (!ret) return TRACE_TYPE_PARTIAL_LINE; } ret = trace_seq_puts(s, "\n"); if (!ret) return TRACE_TYPE_PARTIAL_LINE; break; } case TRACE_PRINT: { struct print_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1576); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1576); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1576); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1576); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1576); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1576); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1576); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1576); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1576); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); seq_print_ip_sym(s, field->ip, sym_flags); trace_seq_printf(s, ": %s", field->buf); if (entry->flags & TRACE_FLAG_CONT) trace_seq_print_cont(s, iter); break; } } return TRACE_TYPE_HANDLED; } static enum print_line_t print_raw_fmt(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_entry *entry; int ret; int S, T; entry = iter->ent; if (entry->type == TRACE_CONT) return TRACE_TYPE_HANDLED; ret = trace_seq_printf(s, "%d %d %llu ", entry->pid, iter->cpu, iter->ts); if (!ret) return TRACE_TYPE_PARTIAL_LINE; switch (entry->type) { case TRACE_FN: { struct ftrace_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1609); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1609); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1609); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1609); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1609); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1609); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1609); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1609); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1609); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); ret = trace_seq_printf(s, "%x %x\n", field->ip, field->parent_ip); if (!ret) return TRACE_TYPE_PARTIAL_LINE; break; } case TRACE_CTX: case TRACE_WAKE: { struct ctx_switch_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1622); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1622); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1622); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1622); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1622); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1622); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1622); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1622); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1622); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); S = field->prev_state < sizeof(state_to_char) ? state_to_char[field->prev_state] : 'X'; T = field->next_state < sizeof(state_to_char) ? state_to_char[field->next_state] : 'X'; if (entry->type == TRACE_WAKE) S = '+'; ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n", field->prev_pid, field->prev_prio, S, field->next_cpu, field->next_pid, field->next_prio, T); if (!ret) return TRACE_TYPE_PARTIAL_LINE; break; } case TRACE_SPECIAL: case TRACE_STACK: { struct special_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1646); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1646); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1646); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1646); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1646); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1646); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1646); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1646); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1646); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); ret = trace_seq_printf(s, "# %ld %ld %ld\n", field->arg1, field->arg2, field->arg3); if (!ret) return TRACE_TYPE_PARTIAL_LINE; break; } case TRACE_PRINT: { struct print_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1659); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1659); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1659); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1659); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1659); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1659); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1659); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1659); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1659); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); trace_seq_printf(s, "# %lx %s", field->ip, field->buf); if (entry->flags & TRACE_FLAG_CONT) trace_seq_print_cont(s, iter); break; } } return TRACE_TYPE_HANDLED; } # 1683 "kernel/trace/trace.c" static enum print_line_t print_hex_fmt(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; unsigned char newline = '\n'; struct trace_entry *entry; int S, T; entry = iter->ent; if (entry->type == TRACE_CONT) return TRACE_TYPE_HANDLED; do { ((void)sizeof(char[1 - 2*!!(sizeof(entry->pid) > 8)])); if (!trace_seq_putmem_hex(s, &(entry->pid), sizeof(entry->pid))) return 0; } while (0); do { ((void)sizeof(char[1 - 2*!!(sizeof(iter->cpu) > 8)])); if (!trace_seq_putmem_hex(s, &(iter->cpu), sizeof(iter->cpu))) return 0; } while (0); do { ((void)sizeof(char[1 - 2*!!(sizeof(iter->ts) > 8)])); if (!trace_seq_putmem_hex(s, &(iter->ts), sizeof(iter->ts))) return 0; } while (0); switch (entry->type) { case TRACE_FN: { struct ftrace_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1703); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1703); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1703); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1703); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1703); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1703); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1703); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1703); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1703); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); do { ((void)sizeof(char[1 - 2*!!(sizeof(field->ip) > 8)])); if (!trace_seq_putmem_hex(s, &(field->ip), sizeof(field->ip))) return 0; } while (0); do { ((void)sizeof(char[1 - 2*!!(sizeof(field->parent_ip) > 8)])); if (!trace_seq_putmem_hex(s, &(field->parent_ip), sizeof(field->parent_ip))) return 0; } while (0); break; } case TRACE_CTX: case TRACE_WAKE: { struct ctx_switch_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1713); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1713); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1713); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1713); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1713); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1713); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1713); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1713); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1713); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); S = field->prev_state < sizeof(state_to_char) ? state_to_char[field->prev_state] : 'X'; T = field->next_state < sizeof(state_to_char) ? state_to_char[field->next_state] : 'X'; if (entry->type == TRACE_WAKE) S = '+'; do { ((void)sizeof(char[1 - 2*!!(sizeof(field->prev_pid) > 8)])); if (!trace_seq_putmem_hex(s, &(field->prev_pid), sizeof(field->prev_pid))) return 0; } while (0); do { ((void)sizeof(char[1 - 2*!!(sizeof(field->prev_prio) > 8)])); if (!trace_seq_putmem_hex(s, &(field->prev_prio), sizeof(field->prev_prio))) return 0; } while (0); do { ((void)sizeof(char[1 - 2*!!(sizeof(S) > 8)])); if (!trace_seq_putmem_hex(s, &(S), sizeof(S))) return 0; } while (0); do { ((void)sizeof(char[1 - 2*!!(sizeof(field->next_cpu) > 8)])); if (!trace_seq_putmem_hex(s, &(field->next_cpu), sizeof(field->next_cpu))) return 0; } while (0); do { ((void)sizeof(char[1 - 2*!!(sizeof(field->next_pid) > 8)])); if (!trace_seq_putmem_hex(s, &(field->next_pid), sizeof(field->next_pid))) return 0; } while (0); do { ((void)sizeof(char[1 - 2*!!(sizeof(field->next_prio) > 8)])); if (!trace_seq_putmem_hex(s, &(field->next_prio), sizeof(field->next_prio))) return 0; } while (0); do { ((void)sizeof(char[1 - 2*!!(sizeof(T) > 8)])); if (!trace_seq_putmem_hex(s, &(T), sizeof(T))) return 0; } while (0); break; } case TRACE_SPECIAL: case TRACE_STACK: { struct special_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1734); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1734); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1734); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1734); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1734); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1734); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1734); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1734); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1734); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); do { ((void)sizeof(char[1 - 2*!!(sizeof(field->arg1) > 8)])); if (!trace_seq_putmem_hex(s, &(field->arg1), sizeof(field->arg1))) return 0; } while (0); do { ((void)sizeof(char[1 - 2*!!(sizeof(field->arg2) > 8)])); if (!trace_seq_putmem_hex(s, &(field->arg2), sizeof(field->arg2))) return 0; } while (0); do { ((void)sizeof(char[1 - 2*!!(sizeof(field->arg3) > 8)])); if (!trace_seq_putmem_hex(s, &(field->arg3), sizeof(field->arg3))) return 0; } while (0); break; } } do { if (!trace_seq_putmem(s, &(newline), sizeof(newline))) return 0; } while (0); return TRACE_TYPE_HANDLED; } static enum print_line_t print_bin_fmt(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_entry *entry; entry = iter->ent; if (entry->type == TRACE_CONT) return TRACE_TYPE_HANDLED; do { if (!trace_seq_putmem(s, &(entry->pid), sizeof(entry->pid))) return 0; } while (0); do { if (!trace_seq_putmem(s, &(entry->cpu), sizeof(entry->cpu))) return 0; } while (0); do { if (!trace_seq_putmem(s, &(iter->ts), sizeof(iter->ts))) return 0; } while (0); switch (entry->type) { case TRACE_FN: { struct ftrace_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1765); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1765); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1765); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1765); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1765); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1765); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1765); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1765); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1765); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); do { if (!trace_seq_putmem(s, &(field->ip), sizeof(field->ip))) return 0; } while (0); do { if (!trace_seq_putmem(s, &(field->parent_ip), sizeof(field->parent_ip))) return 0; } while (0); break; } case TRACE_CTX: { struct ctx_switch_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1774); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1774); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1774); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1774); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1774); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1774); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1774); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1774); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1774); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); do { if (!trace_seq_putmem(s, &(field->prev_pid), sizeof(field->prev_pid))) return 0; } while (0); do { if (!trace_seq_putmem(s, &(field->prev_prio), sizeof(field->prev_prio))) return 0; } while (0); do { if (!trace_seq_putmem(s, &(field->prev_state), sizeof(field->prev_state))) return 0; } while (0); do { if (!trace_seq_putmem(s, &(field->next_pid), sizeof(field->next_pid))) return 0; } while (0); do { if (!trace_seq_putmem(s, &(field->next_prio), sizeof(field->next_prio))) return 0; } while (0); do { if (!trace_seq_putmem(s, &(field->next_state), sizeof(field->next_state))) return 0; } while (0); break; } case TRACE_SPECIAL: case TRACE_STACK: { struct special_entry *field; do { if (__builtin_types_compatible_p(typeof(field), struct ftrace_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_FN && (entry)->type != TRACE_FN); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1788); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct ctx_switch_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1788); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_field_cont *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_CONT && (entry)->type != TRACE_CONT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1788); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct stack_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_STACK && (entry)->type != TRACE_STACK); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1788); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct print_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_PRINT && (entry)->type != TRACE_PRINT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1788); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct special_entry *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(0 && (entry)->type != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1788); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_rw *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_RW && (entry)->type != TRACE_MMIO_RW); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1788); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_mmiotrace_map *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_MMIO_MAP && (entry)->type != TRACE_MMIO_MAP); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1788); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; if (__builtin_types_compatible_p(typeof(field), struct trace_boot *)) { field = (typeof(field))(entry); ({ int __ret_warn_on = !!(TRACE_BOOT && (entry)->type != TRACE_BOOT); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 1788); __builtin_expect(!!(__ret_warn_on), 0); }); break; }; __ftrace_bad_type(); } while (0); do { if (!trace_seq_putmem(s, &(field->arg1), sizeof(field->arg1))) return 0; } while (0); do { if (!trace_seq_putmem(s, &(field->arg2), sizeof(field->arg2))) return 0; } while (0); do { if (!trace_seq_putmem(s, &(field->arg3), sizeof(field->arg3))) return 0; } while (0); break; } } return 1; } static int trace_empty(struct trace_iterator *iter) { int cpu; for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) { if (iter->buffer_iter[cpu]) { if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) return 0; } else { if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) return 0; } } return 1; } static enum print_line_t print_trace_line(struct trace_iterator *iter) { enum print_line_t ret; if (iter->trace && iter->trace->print_line) { ret = iter->trace->print_line(iter); if (ret != TRACE_TYPE_UNHANDLED) return ret; } if (trace_flags & TRACE_ITER_BIN) return print_bin_fmt(iter); if (trace_flags & TRACE_ITER_HEX) return print_hex_fmt(iter); if (trace_flags & TRACE_ITER_RAW) return print_raw_fmt(iter); if (iter->iter_flags & TRACE_FILE_LAT_FMT) return print_lat_fmt(iter, iter->idx, iter->cpu); return print_trace_fmt(iter); } static int s_show(struct seq_file *m, void *v) { struct trace_iterator *iter = v; if (iter->ent == ((void *)0)) { if (iter->tr) { seq_printf(m, "# tracer: %s\n", iter->trace->name); seq_puts(m, "#\n"); } if (iter->iter_flags & TRACE_FILE_LAT_FMT) { if (trace_empty(iter)) return 0; print_trace_header(m, iter); if (!(trace_flags & TRACE_ITER_VERBOSE)) print_lat_help_header(m); } else { if (!(trace_flags & TRACE_ITER_VERBOSE)) print_func_help_header(m); } } else { print_trace_line(iter); trace_print_seq(m, &iter->seq); } return 0; } static struct seq_operations tracer_seq_ops = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; static struct trace_iterator * __tracing_open(struct inode *inode, struct file *file, int *ret) { struct trace_iterator *iter; struct seq_file *m; int cpu; if (tracing_disabled) { *ret = -19; return ((void *)0); } iter = kzalloc(sizeof(*iter), ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); if (!iter) { *ret = -12; goto out; } mutex_lock(&trace_types_lock); if (current_trace && current_trace->print_max) iter->tr = &max_tr; else iter->tr = inode->i_private; iter->trace = current_trace; iter->pos = -1; for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) { iter->buffer_iter[cpu] = ring_buffer_read_start(iter->tr->buffer, cpu); if (!iter->buffer_iter[cpu]) goto fail_buffer; } *ret = seq_open(file, &tracer_seq_ops); if (*ret) goto fail_buffer; m = file->private_data; m->private = iter; if (iter->tr->ctrl) { tracer_enabled = 0; ftrace_function_enabled = 0; } if (iter->trace && iter->trace->open) iter->trace->open(iter); mutex_unlock(&trace_types_lock); out: return iter; fail_buffer: for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) { if (iter->buffer_iter[cpu]) ring_buffer_read_finish(iter->buffer_iter[cpu]); } mutex_unlock(&trace_types_lock); kfree(iter); return ERR_PTR(-12); } int tracing_open_generic(struct inode *inode, struct file *filp) { if (tracing_disabled) return -19; filp->private_data = inode->i_private; return 0; } int tracing_release(struct inode *inode, struct file *file) { struct seq_file *m = (struct seq_file *)file->private_data; struct trace_iterator *iter = m->private; int cpu; mutex_lock(&trace_types_lock); for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) { if (iter->buffer_iter[cpu]) ring_buffer_read_finish(iter->buffer_iter[cpu]); } if (iter->trace && iter->trace->close) iter->trace->close(iter); if (iter->tr->ctrl) { tracer_enabled = 1; ftrace_function_enabled = 1; } mutex_unlock(&trace_types_lock); seq_release(inode, file); kfree(iter); return 0; } static int tracing_open(struct inode *inode, struct file *file) { int ret; __tracing_open(inode, file, &ret); return ret; } static int tracing_lt_open(struct inode *inode, struct file *file) { struct trace_iterator *iter; int ret; iter = __tracing_open(inode, file, &ret); if (!ret) iter->iter_flags |= TRACE_FILE_LAT_FMT; return ret; } static void * t_next(struct seq_file *m, void *v, loff_t *pos) { struct tracer *t = m->private; (*pos)++; if (t) t = t->next; m->private = t; return t; } static void *t_start(struct seq_file *m, loff_t *pos) { struct tracer *t = m->private; loff_t l = 0; mutex_lock(&trace_types_lock); for (; t && l < *pos; t = t_next(m, t, &l)) ; return t; } static void t_stop(struct seq_file *m, void *p) { mutex_unlock(&trace_types_lock); } static int t_show(struct seq_file *m, void *v) { struct tracer *t = v; if (!t) return 0; seq_printf(m, "%s", t->name); if (t->next) seq_putc(m, ' '); else seq_putc(m, '\n'); return 0; } static struct seq_operations show_traces_seq_ops = { .start = t_start, .next = t_next, .stop = t_stop, .show = t_show, }; static int show_traces_open(struct inode *inode, struct file *file) { int ret; if (tracing_disabled) return -19; ret = seq_open(file, &show_traces_seq_ops); if (!ret) { struct seq_file *m = file->private_data; m->private = trace_types; } return ret; } static struct file_operations tracing_fops = { .open = tracing_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_release, }; static struct file_operations tracing_lt_fops = { .open = tracing_lt_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_release, }; static struct file_operations show_traces_fops = { .open = show_traces_open, .read = seq_read, .release = seq_release, }; static cpumask_t tracing_cpumask = (cpumask_t) { { [(((1) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))-1] = ( ((1) % 32) ? (1UL<<((1) % 32))-1 : ~0UL ) } }; static cpumask_t tracing_cpumask_new; static struct mutex tracing_cpumask_update_lock = { .count = { (1) } , .wait_lock = (spinlock_t) { .raw_lock = { 1 }, .magic = 0xdead4ead, .owner = ((void *)-1L), .owner_cpu = -1, } , .wait_list = { &(tracing_cpumask_update_lock.wait_list), &(tracing_cpumask_update_lock.wait_list) } , .magic = &tracing_cpumask_update_lock }; static char mask_str[1 + 1]; static ssize_t tracing_cpumask_read(struct file *filp, char *ubuf, size_t count, loff_t *ppos) { int len; mutex_lock(&tracing_cpumask_update_lock); len = __cpumask_scnprintf((mask_str), (count), &(tracing_cpumask), 1); if (count - len < 2) { count = -22; goto out_err; } len += sprintf(mask_str + len, "\n"); count = simple_read_from_buffer(ubuf, count, ppos, mask_str, 1 +1); out_err: mutex_unlock(&tracing_cpumask_update_lock); return count; } static ssize_t tracing_cpumask_write(struct file *filp, const char *ubuf, size_t count, loff_t *ppos) { int err, cpu; mutex_lock(&tracing_cpumask_update_lock); err = __cpumask_parse_user((ubuf), (count), &(tracing_cpumask_new), 1); if (err) goto err_unlock; do { int __tmp_dummy; __asm__ __volatile__( "cli %0;" : "=d" (__tmp_dummy) ); } while (0); __raw_spin_lock(&ftrace_max_lock); for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) { if (test_bit((cpu), (tracing_cpumask).bits) && !test_bit((cpu), (tracing_cpumask_new).bits)) { atomic_inc(&global_trace.data[cpu]->disabled); } if (!test_bit((cpu), (tracing_cpumask).bits) && test_bit((cpu), (tracing_cpumask_new).bits)) { atomic_dec(&global_trace.data[cpu]->disabled); } } __raw_spin_unlock(&ftrace_max_lock); __asm__ __volatile__( "sti %0;" : : "d" (bfin_irq_flags) ); tracing_cpumask = tracing_cpumask_new; mutex_unlock(&tracing_cpumask_update_lock); return count; err_unlock: mutex_unlock(&tracing_cpumask_update_lock); return err; } static struct file_operations tracing_cpumask_fops = { .open = tracing_open_generic, .read = tracing_cpumask_read, .write = tracing_cpumask_write, }; static ssize_t tracing_iter_ctrl_read(struct file *filp, char *ubuf, size_t cnt, loff_t *ppos) { char *buf; int r = 0; int len = 0; int i; for (i = 0; trace_options[i]; i++) { len += strlen(trace_options[i]); len += 3; } buf = kmalloc(len + 2, ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); if (!buf) return -12; for (i = 0; trace_options[i]; i++) { if (trace_flags & (1 << i)) r += sprintf(buf + r, "%s ", trace_options[i]); else r += sprintf(buf + r, "no%s ", trace_options[i]); } r += sprintf(buf + r, "\n"); ({ int __ret_warn_on = !!(r >= len + 2); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 2219); __builtin_expect(!!(__ret_warn_on), 0); }); r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); kfree(buf); return r; } static ssize_t tracing_iter_ctrl_write(struct file *filp, const char *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; char *cmp = buf; int neg = 0; int i; if (cnt >= sizeof(buf)) return -22; if (copy_from_user(&buf, ubuf, cnt)) return -14; buf[cnt] = 0; if (strncmp(buf, "no", 2) == 0) { neg = 1; cmp += 2; } for (i = 0; trace_options[i]; i++) { int len = strlen(trace_options[i]); if (strncmp(cmp, trace_options[i], len) == 0) { if (neg) trace_flags &= ~(1 << i); else trace_flags |= (1 << i); break; } } if (!trace_options[i]) return -22; filp->f_pos += cnt; return cnt; } static struct file_operations tracing_iter_fops = { .open = tracing_open_generic, .read = tracing_iter_ctrl_read, .write = tracing_iter_ctrl_write, }; static const char readme_msg[] = "tracing mini-HOWTO:\n\n" "# mkdir /debug\n" "# mount -t debugfs nodev /debug\n\n" "# cat /debug/tracing/available_tracers\n" "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n" "# cat /debug/tracing/current_tracer\n" "none\n" "# echo sched_switch > /debug/tracing/current_tracer\n" "# cat /debug/tracing/current_tracer\n" "sched_switch\n" "# cat /debug/tracing/iter_ctrl\n" "noprint-parent nosym-offset nosym-addr noverbose\n" "# echo print-parent > /debug/tracing/iter_ctrl\n" "# echo 1 > /debug/tracing/tracing_enabled\n" "# cat /debug/tracing/trace > /tmp/trace.txt\n" "echo 0 > /debug/tracing/tracing_enabled\n" ; static ssize_t tracing_readme_read(struct file *filp, char *ubuf, size_t cnt, loff_t *ppos) { return simple_read_from_buffer(ubuf, cnt, ppos, readme_msg, strlen(readme_msg)); } static struct file_operations tracing_readme_fops = { .open = tracing_open_generic, .read = tracing_readme_read, }; static ssize_t tracing_ctrl_read(struct file *filp, char *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r; r = sprintf(buf, "%ld\n", tr->ctrl); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_ctrl_write(struct file *filp, const char *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; long val; int ret; if (cnt >= sizeof(buf)) return -22; if (copy_from_user(&buf, ubuf, cnt)) return -14; buf[cnt] = 0; ret = strict_strtoul(buf, 10, &val); if (ret < 0) return ret; val = !!val; mutex_lock(&trace_types_lock); if (tr->ctrl ^ val) { if (val) tracer_enabled = 1; else tracer_enabled = 0; tr->ctrl = val; if (current_trace && current_trace->ctrl_update) current_trace->ctrl_update(tr); } mutex_unlock(&trace_types_lock); filp->f_pos += cnt; return cnt; } static ssize_t tracing_set_trace_read(struct file *filp, char *ubuf, size_t cnt, loff_t *ppos) { char buf[max_tracer_type_len+2]; int r; mutex_lock(&trace_types_lock); if (current_trace) r = sprintf(buf, "%s\n", current_trace->name); else r = sprintf(buf, "\n"); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_set_trace_write(struct file *filp, const char *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = &global_trace; struct tracer *t; char buf[max_tracer_type_len+1]; int i; size_t ret; ret = cnt; if (cnt > max_tracer_type_len) cnt = max_tracer_type_len; if (copy_from_user(&buf, ubuf, cnt)) return -14; buf[cnt] = 0; for (i = cnt - 1; i > 0 && (((_ctype[(int)(unsigned char)(buf[i])])&(0x20)) != 0); i--) buf[i] = 0; mutex_lock(&trace_types_lock); for (t = trace_types; t; t = t->next) { if (strcmp(t->name, buf) == 0) break; } if (!t) { ret = -22; goto out; } if (t == current_trace) goto out; if (current_trace && current_trace->reset) current_trace->reset(tr); current_trace = t; if (t->init) t->init(tr); out: mutex_unlock(&trace_types_lock); if (ret > 0) filp->f_pos += ret; return ret; } static ssize_t tracing_max_lat_read(struct file *filp, char *ubuf, size_t cnt, loff_t *ppos) { unsigned long *ptr = filp->private_data; char buf[64]; int r; r = snprintf(buf, sizeof(buf), "%ld\n", *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); if (r > sizeof(buf)) r = sizeof(buf); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_max_lat_write(struct file *filp, const char *ubuf, size_t cnt, loff_t *ppos) { long *ptr = filp->private_data; char buf[64]; long val; int ret; if (cnt >= sizeof(buf)) return -22; if (copy_from_user(&buf, ubuf, cnt)) return -14; buf[cnt] = 0; ret = strict_strtoul(buf, 10, &val); if (ret < 0) return ret; *ptr = val * 1000; return cnt; } static atomic_t tracing_reader; static int tracing_open_pipe(struct inode *inode, struct file *filp) { struct trace_iterator *iter; if (tracing_disabled) return -19; if (atomic_add_return(1,(&tracing_reader)) != 1) { atomic_dec(&tracing_reader); return -16; } iter = kzalloc(sizeof(*iter), ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); if (!iter) return -12; mutex_lock(&trace_types_lock); iter->tr = &global_trace; iter->trace = current_trace; filp->private_data = iter; if (iter->trace->pipe_open) iter->trace->pipe_open(iter); mutex_unlock(&trace_types_lock); return 0; } static int tracing_release_pipe(struct inode *inode, struct file *file) { struct trace_iterator *iter = file->private_data; kfree(iter); atomic_dec(&tracing_reader); return 0; } static unsigned int tracing_poll_pipe(struct file *filp, poll_table *poll_table) { struct trace_iterator *iter = filp->private_data; if (trace_flags & TRACE_ITER_BLOCK) { return 1 | 64; } else { if (!trace_empty(iter)) return 1 | 64; poll_wait(filp, &trace_wait, poll_table); if (!trace_empty(iter)) return 1 | 64; return 0; } } static ssize_t tracing_read_pipe(struct file *filp, char *ubuf, size_t cnt, loff_t *ppos) { struct trace_iterator *iter = filp->private_data; ssize_t sret; sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (sret != -16) return sret; trace_seq_reset(&iter->seq); mutex_lock(&trace_types_lock); if (iter->trace->read) { sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); if (sret) goto out; } waitagain: sret = 0; while (trace_empty(iter)) { if ((filp->f_flags & 00004000)) { sret = -11; goto out; } # 2579 "kernel/trace/trace.c" do { (void) ((__typeof__(*(&(get_current())->state)))__xchg((unsigned long)((1)), (&(get_current())->state), sizeof(*(&(get_current())->state)))); } while (0); iter->tr->waiter = (get_current()); mutex_unlock(&trace_types_lock); schedule_timeout(250/10); mutex_lock(&trace_types_lock); iter->tr->waiter = ((void *)0); if (signal_pending((get_current()))) { sret = -4; goto out; } if (iter->trace != current_trace) goto out; # 2608 "kernel/trace/trace.c" if (!tracer_enabled && iter->pos) break; continue; } if (trace_empty(iter)) goto out; if (cnt >= (1UL << 12)) cnt = (1UL << 12) - 1; memset(&iter->seq, 0, sizeof(struct trace_iterator) - __builtin_offsetof(struct trace_iterator,seq)); iter->pos = -1; while (find_next_entry_inc(iter) != ((void *)0)) { enum print_line_t ret; int len = iter->seq.len; ret = print_trace_line(iter); if (ret == TRACE_TYPE_PARTIAL_LINE) { iter->seq.len = len; break; } trace_consume(iter); if (iter->seq.len >= cnt) break; } sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (iter->seq.readpos >= iter->seq.len) trace_seq_reset(&iter->seq); if (sret == -16) goto waitagain; out: mutex_unlock(&trace_types_lock); return sret; } static ssize_t tracing_entries_read(struct file *filp, char *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r; r = sprintf(buf, "%lu\n", tr->entries); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_entries_write(struct file *filp, const char *ubuf, size_t cnt, loff_t *ppos) { unsigned long val; char buf[64]; int ret, cpu; struct trace_array *tr = filp->private_data; if (cnt >= sizeof(buf)) return -22; if (copy_from_user(&buf, ubuf, cnt)) return -14; buf[cnt] = 0; ret = strict_strtoul(buf, 10, &val); if (ret < 0) return ret; if (!val) return -22; mutex_lock(&trace_types_lock); if (tr->ctrl) { cnt = -16; printk("<6>" "ftrace: please disable tracing" " before modifying buffer size\n"); goto out; } for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) { if (global_trace.data[cpu]) atomic_inc(&global_trace.data[cpu]->disabled); if (max_tr.data[cpu]) atomic_inc(&max_tr.data[cpu]->disabled); } if (val != global_trace.entries) { ret = ring_buffer_resize(global_trace.buffer, val); if (ret < 0) { cnt = ret; goto out; } ret = ring_buffer_resize(max_tr.buffer, val); if (ret < 0) { int r; cnt = ret; r = ring_buffer_resize(global_trace.buffer, global_trace.entries); if (r < 0) { ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 2732); __builtin_expect(!!(__ret_warn_on), 0); }); tracing_disabled = 1; } goto out; } global_trace.entries = val; } filp->f_pos += cnt; if (tracing_disabled) cnt = -12; out: for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) { if (global_trace.data[cpu]) atomic_dec(&global_trace.data[cpu]->disabled); if (max_tr.data[cpu]) atomic_dec(&max_tr.data[cpu]->disabled); } max_tr.entries = global_trace.entries; mutex_unlock(&trace_types_lock); return cnt; } static int mark_printk(const char *fmt, ...) { int ret; va_list args; __builtin_va_start(args,fmt); ret = trace_vprintk(0, fmt, args); __builtin_va_end(args); return ret; } static ssize_t tracing_mark_write(struct file *filp, const char *ubuf, size_t cnt, loff_t *fpos) { char *buf; char *end; struct trace_array *tr = &global_trace; if (!tr->ctrl || tracing_disabled) return -22; if (cnt > 1024) cnt = 1024; buf = kmalloc(cnt + 1, ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); if (buf == ((void *)0)) return -12; if (copy_from_user(buf, ubuf, cnt)) { kfree(buf); return -14; } buf[cnt] = '\0'; end = strchr(buf, '\n'); if (end) *end = '\0'; cnt = mark_printk("%s\n", buf); kfree(buf); *fpos += cnt; return cnt; } static struct file_operations tracing_max_lat_fops = { .open = tracing_open_generic, .read = tracing_max_lat_read, .write = tracing_max_lat_write, }; static struct file_operations tracing_ctrl_fops = { .open = tracing_open_generic, .read = tracing_ctrl_read, .write = tracing_ctrl_write, }; static struct file_operations set_tracer_fops = { .open = tracing_open_generic, .read = tracing_set_trace_read, .write = tracing_set_trace_write, }; static struct file_operations tracing_pipe_fops = { .open = tracing_open_pipe, .poll = tracing_poll_pipe, .read = tracing_read_pipe, .release = tracing_release_pipe, }; static struct file_operations tracing_entries_fops = { .open = tracing_open_generic, .read = tracing_entries_read, .write = tracing_entries_write, }; static struct file_operations tracing_mark_fops = { .open = tracing_open_generic, .write = tracing_mark_write, }; # 2863 "kernel/trace/trace.c" static struct dentry *d_tracer; struct dentry *tracing_init_dentry(void) { static int once; if (d_tracer) return d_tracer; d_tracer = debugfs_create_dir("tracing", ((void *)0)); if (!d_tracer && !once) { once = 1; printk("<4>" "Could not create debugfs directory 'tracing'\n"); return ((void *)0); } return d_tracer; } static __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) int tracer_init_debugfs(void) { struct dentry *d_tracer; struct dentry *entry; d_tracer = tracing_init_dentry(); entry = debugfs_create_file("tracing_enabled", 0644, d_tracer, &global_trace, &tracing_ctrl_fops); if (!entry) printk("<4>" "Could not create debugfs 'tracing_enabled' entry\n"); entry = debugfs_create_file("iter_ctrl", 0644, d_tracer, ((void *)0), &tracing_iter_fops); if (!entry) printk("<4>" "Could not create debugfs 'iter_ctrl' entry\n"); entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, ((void *)0), &tracing_cpumask_fops); if (!entry) printk("<4>" "Could not create debugfs 'tracing_cpumask' entry\n"); entry = debugfs_create_file("latency_trace", 0444, d_tracer, &global_trace, &tracing_lt_fops); if (!entry) printk("<4>" "Could not create debugfs 'latency_trace' entry\n"); entry = debugfs_create_file("trace", 0444, d_tracer, &global_trace, &tracing_fops); if (!entry) printk("<4>" "Could not create debugfs 'trace' entry\n"); entry = debugfs_create_file("available_tracers", 0444, d_tracer, &global_trace, &show_traces_fops); if (!entry) printk("<4>" "Could not create debugfs 'available_tracers' entry\n"); entry = debugfs_create_file("current_tracer", 0444, d_tracer, &global_trace, &set_tracer_fops); if (!entry) printk("<4>" "Could not create debugfs 'current_tracer' entry\n"); entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer, &tracing_max_latency, &tracing_max_lat_fops); if (!entry) printk("<4>" "Could not create debugfs " "'tracing_max_latency' entry\n"); entry = debugfs_create_file("tracing_thresh", 0644, d_tracer, &tracing_thresh, &tracing_max_lat_fops); if (!entry) printk("<4>" "Could not create debugfs " "'tracing_thresh' entry\n"); entry = debugfs_create_file("README", 0644, d_tracer, ((void *)0), &tracing_readme_fops); if (!entry) printk("<4>" "Could not create debugfs 'README' entry\n"); entry = debugfs_create_file("trace_pipe", 0644, d_tracer, ((void *)0), &tracing_pipe_fops); if (!entry) printk("<4>" "Could not create debugfs " "'trace_pipe' entry\n"); entry = debugfs_create_file("trace_entries", 0644, d_tracer, &global_trace, &tracing_entries_fops); if (!entry) printk("<4>" "Could not create debugfs " "'trace_entries' entry\n"); entry = debugfs_create_file("trace_marker", 0220, d_tracer, ((void *)0), &tracing_mark_fops); if (!entry) printk("<4>" "Could not create debugfs " "'trace_marker' entry\n"); # 2976 "kernel/trace/trace.c" return 0; } int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { static spinlock_t trace_buf_lock = (spinlock_t) { .raw_lock = { 1 }, .magic = 0xdead4ead, .owner = ((void *)-1L), .owner_cpu = -1, }; static char trace_buf[1024]; struct ring_buffer_event *event; struct trace_array *tr = &global_trace; struct trace_array_cpu *data; struct print_entry *entry; unsigned long flags, irq_flags; int cpu, len = 0, size, pc; if (!tr->ctrl || tracing_disabled) return 0; pc = (current_thread_info()->preempt_count); do { } while (0); cpu = 0; data = tr->data[cpu]; if (__builtin_expect(!!(((&data->disabled)->counter)), 0)) goto out; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _spin_lock_irqsave(&trace_buf_lock); } while (0); len = vsnprintf(trace_buf, 1024, fmt, args); len = ({ typeof(len) _min1 = (len); typeof(1024 -1) _min2 = (1024 -1); (void) (&_min1 == &_min2); _min1 < _min2 ? _min1 : _min2; }); trace_buf[len] = 0; size = sizeof(*entry) + len + 1; event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags); if (!event) goto out_unlock; entry = ring_buffer_event_data(event); tracing_generic_entry_update(&entry->ent, flags, pc); entry->ent.type = TRACE_PRINT; entry->ip = ip; memcpy(&entry->buf, trace_buf, len); entry->buf[len] = 0; ring_buffer_unlock_commit(tr->buffer, event, irq_flags); out_unlock: do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _spin_unlock_irqrestore(&trace_buf_lock, flags); } while (0); out: do { } while (0); return len; } ; int __ftrace_printk(unsigned long ip, const char *fmt, ...) { int ret; va_list ap; if (!(trace_flags & TRACE_ITER_PRINTK)) return 0; __builtin_va_start(ap,fmt); ret = trace_vprintk(ip, fmt, ap); __builtin_va_end(ap); return ret; } ; static int trace_panic_handler(struct notifier_block *this, unsigned long event, void *unused) { ftrace_dump(); return 0x0001; } static struct notifier_block trace_panic_notifier = { .notifier_call = trace_panic_handler, .next = ((void *)0), .priority = 150 }; static int trace_die_handler(struct notifier_block *self, unsigned long val, void *data) { switch (val) { case DIE_OOPS: ftrace_dump(); break; default: break; } return 0x0001; } static struct notifier_block trace_die_notifier = { .notifier_call = trace_die_handler, .priority = 200 }; # 3091 "kernel/trace/trace.c" static void trace_printk_seq(struct trace_seq *s) { if (s->len >= 1000) s->len = 1000; s->buffer[s->len] = 0; printk("<6>" "%s", s->buffer); trace_seq_reset(s); } void ftrace_dump(void) { static spinlock_t ftrace_dump_lock = (spinlock_t) { .raw_lock = { 1 }, .magic = 0xdead4ead, .owner = ((void *)-1L), .owner_cpu = -1, }; static struct trace_iterator iter; static cpumask_t mask; static int dump_ran; unsigned long flags; int cnt = 0, cpu; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _spin_lock_irqsave(&ftrace_dump_lock); } while (0); if (dump_ran) goto out; dump_ran = 1; ftrace_kill(); for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)tracing_buffer_mask) { atomic_inc(&global_trace.data[cpu]->disabled); } printk("<6>" "Dumping ftrace buffer:\n"); iter.tr = &global_trace; iter.trace = current_trace; # 3143 "kernel/trace/trace.c" __cpus_clear(&(mask), 1); while (!trace_empty(&iter)) { if (!cnt) printk("<6>" "---------------------------------\n"); cnt++; memset(&iter.seq, 0, sizeof(struct trace_iterator) - __builtin_offsetof(struct trace_iterator,seq)); iter.iter_flags |= TRACE_FILE_LAT_FMT; iter.pos = -1; if (find_next_entry_inc(&iter) != ((void *)0)) { print_trace_line(&iter); trace_consume(&iter); } trace_printk_seq(&iter.seq); } if (!cnt) printk("<6>" " (ftrace buffer empty)\n"); else printk("<6>" "---------------------------------\n"); out: do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _spin_unlock_irqrestore(&ftrace_dump_lock, flags); } while (0); } __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) static int tracer_alloc_buffers(void) { struct trace_array_cpu *data; int i; tracing_buffer_mask = cpu_possible_map; global_trace.buffer = ring_buffer_alloc(trace_buf_size, (RB_FL_OVERWRITE)); if (!global_trace.buffer) { printk("<3>" "tracer: failed to allocate ring buffer!\n"); ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 3188); __builtin_expect(!!(__ret_warn_on), 0); }); return 0; } global_trace.entries = ring_buffer_size(global_trace.buffer); max_tr.buffer = ring_buffer_alloc(trace_buf_size, (RB_FL_OVERWRITE)); if (!max_tr.buffer) { printk("<3>" "tracer: failed to allocate max ring buffer!\n"); ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 3198); __builtin_expect(!!(__ret_warn_on), 0); }); ring_buffer_free(global_trace.buffer); return 0; } max_tr.entries = ring_buffer_size(max_tr.buffer); ({ int __ret_warn_on = !!(max_tr.entries != global_trace.entries); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_on_slowpath("kernel/trace/trace.c", 3203); __builtin_expect(!!(__ret_warn_on), 0); }); for ((i) = 0; (i) < 1; (i)++, (void)tracing_buffer_mask) { data = global_trace.data[i] = &(*((void)(i), &per_cpu__global_trace_cpu)); max_tr.data[i] = &(*((void)(i), &per_cpu__max_data)); } trace_init_cmdlines(); register_tracer(&nop_trace); register_tracer(&boot_tracer); current_trace = &boot_tracer; current_trace->init(&global_trace); global_trace.ctrl = tracer_enabled; tracing_disabled = 0; atomic_notifier_chain_register(&panic_notifier_list, &trace_panic_notifier); register_die_notifier(&trace_die_notifier); return 0; } static initcall_t __initcall_tracer_alloc_buffersearly __attribute__((__used__)) __attribute__((__section__(".initcall" "early" ".init"))) = tracer_alloc_buffers; static initcall_t __initcall_tracer_init_debugfs5 __attribute__((__used__)) __attribute__((__section__(".initcall" "5" ".init"))) = tracer_init_debugfs;