--- /dev/null
+# 1 "fs/sysv/super.c"
+# 1 "/usr/local/src/blackfin/git/linux-kernel//"
+# 1 "<built-in>"
+# 1 "<command-line>"
+# 1 "./include/linux/autoconf.h" 1
+# 1 "<command-line>" 2
+# 1 "fs/sysv/super.c"
+# 23 "fs/sysv/super.c"
+# 1 "include/linux/module.h" 1
+# 9 "include/linux/module.h"
+# 1 "include/linux/list.h" 1
+
+
+
+# 1 "include/linux/stddef.h" 1
+
+
+
+# 1 "include/linux/compiler.h" 1
+# 40 "include/linux/compiler.h"
+# 1 "include/linux/compiler-gcc.h" 1
+# 86 "include/linux/compiler-gcc.h"
+# 1 "include/linux/compiler-gcc4.h" 1
+# 86 "include/linux/compiler-gcc.h" 2
+# 41 "include/linux/compiler.h" 2
+# 58 "include/linux/compiler.h"
+struct ftrace_branch_data {
+ const char *func;
+ const char *file;
+ unsigned line;
+ union {
+ struct {
+ unsigned long correct;
+ unsigned long incorrect;
+ };
+ struct {
+ unsigned long miss;
+ unsigned long hit;
+ };
+ unsigned long miss_hit[2];
+ };
+};
+# 5 "include/linux/stddef.h" 2
+# 15 "include/linux/stddef.h"
+enum {
+ false = 0,
+ true = 1
+};
+# 5 "include/linux/list.h" 2
+# 1 "include/linux/poison.h" 1
+# 6 "include/linux/list.h" 2
+# 1 "include/linux/prefetch.h" 1
+# 13 "include/linux/prefetch.h"
+# 1 "include/linux/types.h" 1
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" 1
+# 1 "include/asm-generic/types.h" 1
+
+
+
+
+
+
+# 1 "include/asm-generic/int-ll64.h" 1
+# 11 "include/asm-generic/int-ll64.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitsperlong.h" 1
+# 1 "include/asm-generic/bitsperlong.h" 1
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitsperlong.h" 2
+# 12 "include/asm-generic/int-ll64.h" 2
+
+
+
+
+
+
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+
+__extension__ typedef __signed__ long long __s64;
+__extension__ typedef unsigned long long __u64;
+# 42 "include/asm-generic/int-ll64.h"
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+typedef signed long long s64;
+typedef unsigned long long u64;
+# 8 "include/asm-generic/types.h" 2
+
+
+
+typedef unsigned short umode_t;
+# 34 "include/asm-generic/types.h"
+typedef u32 dma_addr_t;
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" 2
+# 5 "include/linux/types.h" 2
+# 14 "include/linux/types.h"
+# 1 "include/linux/posix_types.h" 1
+# 36 "include/linux/posix_types.h"
+typedef struct {
+ unsigned long fds_bits [(1024/(8 * sizeof(unsigned long)))];
+} __kernel_fd_set;
+
+
+typedef void (*__kernel_sighandler_t)(int);
+
+
+typedef int __kernel_key_t;
+typedef int __kernel_mqd_t;
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/posix_types.h" 1
+# 10 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/posix_types.h"
+typedef unsigned short __kernel_mode_t;
+
+
+typedef unsigned short __kernel_nlink_t;
+
+
+typedef unsigned int __kernel_ipc_pid_t;
+
+
+typedef unsigned long __kernel_size_t;
+typedef long __kernel_ssize_t;
+typedef int __kernel_ptrdiff_t;
+
+
+typedef unsigned short __kernel_old_uid_t;
+typedef unsigned short __kernel_old_gid_t;
+
+
+typedef unsigned short __kernel_old_dev_t;
+
+
+# 1 "include/asm-generic/posix_types.h" 1
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitsperlong.h" 1
+# 5 "include/asm-generic/posix_types.h" 2
+# 14 "include/asm-generic/posix_types.h"
+typedef unsigned long __kernel_ino_t;
+# 26 "include/asm-generic/posix_types.h"
+typedef int __kernel_pid_t;
+
+
+
+
+
+
+
+typedef unsigned int __kernel_uid_t;
+typedef unsigned int __kernel_gid_t;
+
+
+
+typedef long __kernel_suseconds_t;
+
+
+
+typedef int __kernel_daddr_t;
+
+
+
+typedef __kernel_uid_t __kernel_uid32_t;
+typedef __kernel_gid_t __kernel_gid32_t;
+# 79 "include/asm-generic/posix_types.h"
+typedef long __kernel_off_t;
+typedef long long __kernel_loff_t;
+typedef long __kernel_time_t;
+typedef long __kernel_clock_t;
+typedef int __kernel_timer_t;
+typedef int __kernel_clockid_t;
+typedef char * __kernel_caddr_t;
+typedef unsigned short __kernel_uid16_t;
+typedef unsigned short __kernel_gid16_t;
+
+typedef struct {
+ int val[2];
+} __kernel_fsid_t;
+
+
+
+
+static inline __attribute__((always_inline)) void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+ unsigned long __tmp = __fd / (8 * sizeof(unsigned long));
+ unsigned long __rem = __fd % (8 * sizeof(unsigned long));
+ __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
+}
+
+
+static inline __attribute__((always_inline)) void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+ unsigned long __tmp = __fd / (8 * sizeof(unsigned long));
+ unsigned long __rem = __fd % (8 * sizeof(unsigned long));
+ __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
+}
+
+
+static inline __attribute__((always_inline)) int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
+{
+ unsigned long __tmp = __fd / (8 * sizeof(unsigned long));
+ unsigned long __rem = __fd % (8 * sizeof(unsigned long));
+ return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void __FD_ZERO(__kernel_fd_set *__p)
+{
+ unsigned long *__tmp = __p->fds_bits;
+ int __i;
+
+ if (__builtin_constant_p((1024/(8 * sizeof(unsigned long))))) {
+ switch ((1024/(8 * sizeof(unsigned long)))) {
+ case 16:
+ __tmp[ 0] = 0; __tmp[ 1] = 0;
+ __tmp[ 2] = 0; __tmp[ 3] = 0;
+ __tmp[ 4] = 0; __tmp[ 5] = 0;
+ __tmp[ 6] = 0; __tmp[ 7] = 0;
+ __tmp[ 8] = 0; __tmp[ 9] = 0;
+ __tmp[10] = 0; __tmp[11] = 0;
+ __tmp[12] = 0; __tmp[13] = 0;
+ __tmp[14] = 0; __tmp[15] = 0;
+ return;
+
+ case 8:
+ __tmp[ 0] = 0; __tmp[ 1] = 0;
+ __tmp[ 2] = 0; __tmp[ 3] = 0;
+ __tmp[ 4] = 0; __tmp[ 5] = 0;
+ __tmp[ 6] = 0; __tmp[ 7] = 0;
+ return;
+
+ case 4:
+ __tmp[ 0] = 0; __tmp[ 1] = 0;
+ __tmp[ 2] = 0; __tmp[ 3] = 0;
+ return;
+ }
+ }
+ __i = (1024/(8 * sizeof(unsigned long)));
+ while (__i) {
+ __i--;
+ *__tmp = 0;
+ __tmp++;
+ }
+}
+# 32 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/posix_types.h" 2
+# 48 "include/linux/posix_types.h" 2
+# 15 "include/linux/types.h" 2
+
+
+
+typedef __u32 __kernel_dev_t;
+
+typedef __kernel_fd_set fd_set;
+typedef __kernel_dev_t dev_t;
+typedef __kernel_ino_t ino_t;
+typedef __kernel_mode_t mode_t;
+typedef __kernel_nlink_t nlink_t;
+typedef __kernel_off_t off_t;
+typedef __kernel_pid_t pid_t;
+typedef __kernel_daddr_t daddr_t;
+typedef __kernel_key_t key_t;
+typedef __kernel_suseconds_t suseconds_t;
+typedef __kernel_timer_t timer_t;
+typedef __kernel_clockid_t clockid_t;
+typedef __kernel_mqd_t mqd_t;
+
+typedef _Bool bool;
+
+typedef __kernel_uid32_t uid_t;
+typedef __kernel_gid32_t gid_t;
+typedef __kernel_uid16_t uid16_t;
+typedef __kernel_gid16_t gid16_t;
+
+typedef unsigned long uintptr_t;
+
+
+
+typedef __kernel_old_uid_t old_uid_t;
+typedef __kernel_old_gid_t old_gid_t;
+
+
+
+typedef __kernel_loff_t loff_t;
+# 59 "include/linux/types.h"
+typedef __kernel_size_t size_t;
+
+
+
+
+typedef __kernel_ssize_t ssize_t;
+
+
+
+
+typedef __kernel_ptrdiff_t ptrdiff_t;
+
+
+
+
+typedef __kernel_time_t time_t;
+
+
+
+
+typedef __kernel_clock_t clock_t;
+
+
+
+
+typedef __kernel_caddr_t caddr_t;
+
+
+
+typedef unsigned char u_char;
+typedef unsigned short u_short;
+typedef unsigned int u_int;
+typedef unsigned long u_long;
+
+
+typedef unsigned char unchar;
+typedef unsigned short ushort;
+typedef unsigned int uint;
+typedef unsigned long ulong;
+
+
+
+
+typedef __u8 u_int8_t;
+typedef __s8 int8_t;
+typedef __u16 u_int16_t;
+typedef __s16 int16_t;
+typedef __u32 u_int32_t;
+typedef __s32 int32_t;
+
+
+
+typedef __u8 uint8_t;
+typedef __u16 uint16_t;
+typedef __u32 uint32_t;
+
+
+typedef __u64 uint64_t;
+typedef __u64 u_int64_t;
+typedef __s64 int64_t;
+# 135 "include/linux/types.h"
+typedef u64 sector_t;
+typedef u64 blkcnt_t;
+# 168 "include/linux/types.h"
+typedef __u16 __le16;
+typedef __u16 __be16;
+typedef __u32 __le32;
+typedef __u32 __be32;
+typedef __u64 __le64;
+typedef __u64 __be64;
+
+typedef __u16 __sum16;
+typedef __u32 __wsum;
+
+
+typedef unsigned gfp_t;
+typedef unsigned fmode_t;
+
+
+
+
+typedef u32 phys_addr_t;
+
+
+typedef phys_addr_t resource_size_t;
+
+typedef struct {
+ volatile int counter;
+} atomic_t;
+
+
+
+
+
+
+
+struct ustat {
+ __kernel_daddr_t f_tfree;
+ __kernel_ino_t f_tinode;
+ char f_fname[6];
+ char f_fpack[6];
+};
+# 14 "include/linux/prefetch.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h" 1
+# 16 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ptrace.h" 1
+# 30 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ptrace.h"
+struct pt_regs {
+ long orig_pc;
+ long ipend;
+ long seqstat;
+ long rete;
+ long retn;
+ long retx;
+ long pc;
+ long rets;
+ long reserved;
+ long astat;
+ long lb1;
+ long lb0;
+ long lt1;
+ long lt0;
+ long lc1;
+ long lc0;
+ long a1w;
+ long a1x;
+ long a0w;
+ long a0x;
+ long b3;
+ long b2;
+ long b1;
+ long b0;
+ long l3;
+ long l2;
+ long l1;
+ long l0;
+ long m3;
+ long m2;
+ long m1;
+ long m0;
+ long i3;
+ long i2;
+ long i1;
+ long i0;
+ long usp;
+ long fp;
+ long p5;
+ long p4;
+ long p3;
+ long p2;
+ long p1;
+ long p0;
+ long r7;
+ long r6;
+ long r5;
+ long r4;
+ long r3;
+ long r2;
+ long r1;
+ long r0;
+ long orig_r0;
+ long orig_p0;
+ long syscfg;
+};
+# 105 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ptrace.h"
+extern void show_regs(struct pt_regs *);
+# 17 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h" 1
+# 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/bf533.h" 1
+# 13 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/defBF532.h" 1
+# 13 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/defBF532.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/def_LPBlackfin.h" 1
+# 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/def_LPBlackfin.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/anomaly.h" 1
+# 13 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/def_LPBlackfin.h" 2
+# 14 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/defBF532.h" 2
+# 14 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/anomaly.h" 1
+# 15 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h" 2
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h" 1
+# 14 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cdef_LPBlackfin.h" 1
+# 15 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/cdefBF532.h" 2
+# 18 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/blackfin.h" 2
+# 18 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h" 2
+
+static inline __attribute__((always_inline)) unsigned long rdusp(void)
+{
+ unsigned long usp;
+
+ __asm__ __volatile__("%0 = usp;\n\t":"=da"(usp));
+ return usp;
+}
+
+static inline __attribute__((always_inline)) void wrusp(unsigned long usp)
+{
+ __asm__ __volatile__("usp = %0;\n\t"::"da"(usp));
+}
+
+static inline __attribute__((always_inline)) unsigned long __get_SP(void)
+{
+ unsigned long sp;
+
+ __asm__ __volatile__("%0 = sp;\n\t" : "=da"(sp));
+ return sp;
+}
+# 53 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h"
+struct thread_struct {
+ unsigned long ksp;
+ unsigned long usp;
+ unsigned short seqstat;
+ unsigned long esp0;
+ unsigned long pc;
+ void * debuggerinfo;
+};
+
+
+
+
+
+
+extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
+ unsigned long new_sp);
+
+
+struct task_struct;
+
+
+static inline __attribute__((always_inline)) void release_thread(struct task_struct *dead_task)
+{
+}
+
+
+
+extern int kernel_thread(int (*fn) (void *), void *arg, unsigned long flags);
+
+
+
+
+static inline __attribute__((always_inline)) void exit_thread(void)
+{
+}
+
+
+
+
+
+
+unsigned long get_wchan(struct task_struct *p);
+# 109 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h"
+static inline __attribute__((always_inline)) uint32_t __attribute__((pure)) bfin_revid(void)
+{
+
+ uint32_t revid = (({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }) & 0xF0000000) >> 28;
+# 124 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h"
+ return revid;
+}
+
+static inline __attribute__((always_inline)) uint16_t __attribute__((pure)) bfin_cpuid(void)
+{
+ return (({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }) & 0x0FFFF000) >> 12;
+}
+
+static inline __attribute__((always_inline)) uint32_t __attribute__((pure)) bfin_dspid(void)
+{
+ return ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFE05000) ); __v; });
+}
+
+
+
+static inline __attribute__((always_inline)) uint32_t __attribute__((pure)) bfin_compiled_revid(void)
+{
+# 150 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h"
+ return 4;
+# 160 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/processor.h"
+}
+# 15 "include/linux/prefetch.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cache.h" 1
+# 9 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cache.h"
+# 1 "include/linux/linkage.h" 1
+
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/linkage.h" 1
+# 6 "include/linux/linkage.h" 2
+# 10 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cache.h" 2
+# 16 "include/linux/prefetch.h" 2
+# 53 "include/linux/prefetch.h"
+static inline __attribute__((always_inline)) void prefetch_range(void *addr, size_t len)
+{
+
+
+
+
+
+
+
+}
+# 7 "include/linux/list.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 1
+# 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h"
+# 1 "include/linux/irqflags.h" 1
+# 14 "include/linux/irqflags.h"
+# 1 "include/linux/typecheck.h" 1
+# 15 "include/linux/irqflags.h" 2
+# 57 "include/linux/irqflags.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irqflags.h" 1
+# 16 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irqflags.h"
+extern unsigned long bfin_irq_flags;
+
+
+static inline __attribute__((always_inline)) void bfin_sti(unsigned long flags)
+{
+ asm volatile("sti %0;" : : "d" (flags));
+}
+
+static inline __attribute__((always_inline)) unsigned long bfin_cli(void)
+{
+ unsigned long flags;
+ asm volatile("cli %0;" : "=d" (flags));
+ return flags;
+}
+# 176 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irqflags.h"
+static inline __attribute__((always_inline)) void raw_local_irq_disable(void)
+{
+ bfin_cli();
+}
+static inline __attribute__((always_inline)) void raw_local_irq_enable(void)
+{
+ bfin_sti(bfin_irq_flags);
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) unsigned long __raw_local_irq_save(void)
+{
+ unsigned long flags = bfin_cli();
+
+
+
+ return flags;
+}
+# 207 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irqflags.h"
+static inline __attribute__((always_inline)) void raw_local_irq_restore(unsigned long flags)
+{
+ if (!(((flags) & ~0x3f) == 0))
+ raw_local_irq_enable();
+}
+# 58 "include/linux/irqflags.h" 2
+# 13 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pda.h" 1
+# 15 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pda.h"
+struct blackfin_pda {
+ struct blackfin_pda *next;
+
+ unsigned long syscfg;
+
+
+
+
+ unsigned long *ipdt;
+ unsigned long *ipdt_swapcount;
+ unsigned long *dpdt;
+ unsigned long *dpdt_swapcount;
+
+
+
+
+
+
+
+ unsigned long ex_iptr;
+ unsigned long ex_optr;
+ unsigned long ex_buf[4];
+ unsigned long ex_imask;
+ unsigned long ex_ipend;
+ unsigned long *ex_stack;
+
+
+ unsigned long last_cplb_fault_retx;
+
+ unsigned long dcplb_fault_addr;
+ unsigned long icplb_fault_addr;
+ unsigned long retx;
+ unsigned long seqstat;
+ unsigned int __nmi_count;
+
+ unsigned long dcplb_doublefault_addr;
+ unsigned long icplb_doublefault_addr;
+ unsigned long retx_doublefault;
+ unsigned long seqstat_doublefault;
+
+};
+
+extern struct blackfin_pda cpu_pda[];
+# 16 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irq.h" 1
+# 18 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irq.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/irq.h" 1
+# 19 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irq.h" 2
+# 36 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irq.h"
+# 1 "include/asm-generic/irq.h" 1
+# 13 "include/asm-generic/irq.h"
+static inline __attribute__((always_inline)) int irq_canonicalize(int irq)
+{
+ return irq;
+}
+# 37 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irq.h" 2
+# 17 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2
+# 107 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h"
+struct __xchg_dummy {
+ unsigned long a[100];
+};
+
+
+
+
+static inline __attribute__((always_inline)) unsigned long __xchg(unsigned long x, volatile void *ptr,
+ int size)
+{
+ unsigned long tmp = 0;
+ unsigned long flags;
+
+ do { (flags) = __raw_local_irq_save(); } while (0);
+
+ switch (size) {
+ case 1:
+ __asm__ __volatile__
+ ("%0 = b%2 (z);\n\t"
+ "b%2 = %1;\n\t"
+ : "=&d" (tmp) : "d" (x), "m" (*((volatile struct __xchg_dummy *)(ptr))) : "memory");
+ break;
+ case 2:
+ __asm__ __volatile__
+ ("%0 = w%2 (z);\n\t"
+ "w%2 = %1;\n\t"
+ : "=&d" (tmp) : "d" (x), "m" (*((volatile struct __xchg_dummy *)(ptr))) : "memory");
+ break;
+ case 4:
+ __asm__ __volatile__
+ ("%0 = %2;\n\t"
+ "%2 = %1;\n\t"
+ : "=&d" (tmp) : "d" (x), "m" (*((volatile struct __xchg_dummy *)(ptr))) : "memory");
+ break;
+ }
+ raw_local_irq_restore(flags);
+ return tmp;
+}
+
+# 1 "include/asm-generic/cmpxchg-local.h" 1
+
+
+
+
+
+extern unsigned long wrong_size_cmpxchg(volatile void *ptr);
+
+
+
+
+
+static inline __attribute__((always_inline)) unsigned long __cmpxchg_local_generic(volatile void *ptr,
+ unsigned long old, unsigned long new, int size)
+{
+ unsigned long flags, prev;
+
+
+
+
+ if (size == 8 && sizeof(unsigned long) != 8)
+ wrong_size_cmpxchg(ptr);
+
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { (flags) = __raw_local_irq_save(); } while (0); do { } while (0); } while (0);
+ switch (size) {
+ case 1: prev = *(u8 *)ptr;
+ if (prev == old)
+ *(u8 *)ptr = (u8)new;
+ break;
+ case 2: prev = *(u16 *)ptr;
+ if (prev == old)
+ *(u16 *)ptr = (u16)new;
+ break;
+ case 4: prev = *(u32 *)ptr;
+ if (prev == old)
+ *(u32 *)ptr = (u32)new;
+ break;
+ case 8: prev = *(u64 *)ptr;
+ if (prev == old)
+ *(u64 *)ptr = (u64)new;
+ break;
+ default:
+ wrong_size_cmpxchg(ptr);
+ }
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if ((((flags) & ~0x3f) == 0)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0);
+ return prev;
+}
+
+
+
+
+static inline __attribute__((always_inline)) u64 __cmpxchg64_local_generic(volatile void *ptr,
+ u64 old, u64 new)
+{
+ u64 prev;
+ unsigned long flags;
+
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { (flags) = __raw_local_irq_save(); } while (0); do { } while (0); } while (0);
+ prev = *(u64 *)ptr;
+ if (prev == old)
+ *(u64 *)ptr = new;
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if ((((flags) & ~0x3f) == 0)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0);
+ return prev;
+}
+# 147 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2
+# 157 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h"
+# 1 "include/asm-generic/cmpxchg.h" 1
+# 158 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2
+# 171 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/l1layout.h" 1
+# 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/l1layout.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/blackfin.h" 1
+# 17 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/blackfin.h"
+static inline __attribute__((always_inline)) void SSYNC(void)
+{
+ int _tmp;
+ if ((0x0004 < 6))
+ __asm__ __volatile__(
+ "cli %0;"
+ "nop;"
+ "nop;"
+ "ssync;"
+ "sti %0;"
+ : "=d" (_tmp)
+ );
+ else if ((0x0004 < 5))
+ __asm__ __volatile__(
+ "nop;"
+ "nop;"
+ "nop;"
+ "ssync;"
+ );
+ else
+ __asm__ __volatile__("ssync;");
+}
+
+
+static inline __attribute__((always_inline)) void CSYNC(void)
+{
+ int _tmp;
+ if ((0x0004 < 6))
+ __asm__ __volatile__(
+ "cli %0;"
+ "nop;"
+ "nop;"
+ "csync;"
+ "sti %0;"
+ : "=d" (_tmp)
+ );
+ else if ((0x0004 < 5))
+ __asm__ __volatile__(
+ "nop;"
+ "nop;"
+ "nop;"
+ "csync;"
+ );
+ else
+ __asm__ __volatile__("csync;");
+}
+# 92 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/blackfin.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/mem_map.h" 1
+# 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/mem_map.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/mach-bf533/include/mach/mem_map.h" 1
+# 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/mem_map.h" 2
+# 53 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/mem_map.h"
+static inline __attribute__((always_inline)) unsigned long get_l1_scratch_start_cpu(int cpu)
+{
+ return 0xFFB00000;
+}
+static inline __attribute__((always_inline)) unsigned long get_l1_code_start_cpu(int cpu)
+{
+ return 0xFFA00000;
+}
+static inline __attribute__((always_inline)) unsigned long get_l1_data_a_start_cpu(int cpu)
+{
+ return 0xFF800000;
+}
+static inline __attribute__((always_inline)) unsigned long get_l1_data_b_start_cpu(int cpu)
+{
+ return 0xFF900000;
+}
+static inline __attribute__((always_inline)) unsigned long get_l1_scratch_start(void)
+{
+ return get_l1_scratch_start_cpu(0);
+}
+static inline __attribute__((always_inline)) unsigned long get_l1_code_start(void)
+{
+ return get_l1_code_start_cpu(0);
+}
+static inline __attribute__((always_inline)) unsigned long get_l1_data_a_start(void)
+{
+ return get_l1_data_a_start_cpu(0);
+}
+static inline __attribute__((always_inline)) unsigned long get_l1_data_b_start(void)
+{
+ return get_l1_data_b_start_cpu(0);
+}
+# 93 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/blackfin.h" 2
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bfin-global.h" 1
+# 33 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bfin-global.h"
+extern void bfin_setup_caches(unsigned int cpu);
+extern void bfin_setup_cpudata(unsigned int cpu);
+
+extern unsigned long get_cclk(void);
+extern unsigned long get_sclk(void);
+extern unsigned long sclk_to_usecs(unsigned long sclk);
+extern unsigned long usecs_to_sclk(unsigned long usecs);
+
+struct pt_regs;
+extern void dump_bfin_process(struct pt_regs *regs);
+extern void dump_bfin_mem(struct pt_regs *regs);
+extern void dump_bfin_trace_buffer(void);
+
+
+extern int init_arch_irq(void);
+extern void init_exception_vectors(void);
+extern void program_IAR(void);
+
+extern void lower_to_irq14(void);
+extern void bfin_return_from_exception(void);
+extern void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
+extern int bfin_internal_set_wake(unsigned int irq, unsigned int state);
+
+extern void *l1_data_A_sram_alloc(size_t);
+extern void *l1_data_B_sram_alloc(size_t);
+extern void *l1_inst_sram_alloc(size_t);
+extern void *l1_data_sram_alloc(size_t);
+extern void *l1_data_sram_zalloc(size_t);
+extern void *l2_sram_alloc(size_t);
+extern void *l2_sram_zalloc(size_t);
+extern int l1_data_A_sram_free(const void*);
+extern int l1_data_B_sram_free(const void*);
+extern int l1_inst_sram_free(const void*);
+extern int l1_data_sram_free(const void*);
+extern int l2_sram_free(const void *);
+extern int sram_free(const void*);
+
+
+
+
+
+
+extern void *sram_alloc_with_lsl(size_t, unsigned long);
+extern int sram_free_with_lsl(const void*);
+
+extern void *isram_memcpy(void *dest, const void *src, size_t n);
+
+extern const char bfin_board_name[];
+
+extern unsigned long bfin_sic_iwr[];
+extern unsigned vr_wakeup;
+extern u16 _bfin_swrst;
+# 95 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/blackfin.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cdef_misc.h" 1
+# 37 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cdef_misc.h"
+static __inline__ __attribute__((always_inline)) void bfin_write_PLL_CTL(unsigned int val)
+{
+ unsigned long flags = 0;
+# 49 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cdef_misc.h"
+ unsigned long iwr;
+
+
+ if (val == ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = w[%1] (z);" : "=d" (__v) : "a" (0xFFC00000) ); __v; }))
+ return;
+
+ do { (flags) = __raw_local_irq_save(); } while (0);
+# 70 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cdef_misc.h"
+ iwr = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00124) ); __v; });
+
+
+ __asm__ __volatile__( "nop;" "w[%0] = %1;" : : "a" (0xFFC00000), "d" ((uint16_t)(val)) : "memory" );
+ SSYNC();
+ asm("IDLE;");
+# 86 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cdef_misc.h"
+ __asm__ __volatile__( "nop;" "[%0] = %1;" : : "a" (0xFFC00124), "d" (iwr) : "memory" );
+
+ raw_local_irq_restore(flags);
+}
+
+
+static __inline__ __attribute__((always_inline)) void bfin_write_VR_CTL(unsigned int val)
+{
+ unsigned long flags = 0;
+# 104 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cdef_misc.h"
+ unsigned long iwr;
+
+
+ if (val == ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = w[%1] (z);" : "=d" (__v) : "a" (0xFFC00008) ); __v; }))
+ return;
+
+ do { (flags) = __raw_local_irq_save(); } while (0);
+# 125 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cdef_misc.h"
+ iwr = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00124) ); __v; });
+ __asm__ __volatile__( "nop;" "[%0] = %1;" : : "a" (0xFFC00124), "d" ((1 << (0))) : "memory" );
+
+
+ __asm__ __volatile__( "nop;" "w[%0] = %1;" : : "a" (0xFFC00008), "d" ((uint16_t)(val)) : "memory" );
+ SSYNC();
+ asm("IDLE;");
+# 142 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cdef_misc.h"
+ __asm__ __volatile__( "nop;" "[%0] = %1;" : : "a" (0xFFC00124), "d" (iwr) : "memory" );
+
+ raw_local_irq_restore(flags);
+}
+# 160 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cdef_misc.h"
+static inline __attribute__((always_inline)) void bfin_write_FIO_FLAG_D(unsigned short val) { unsigned long flags; do { (flags) = __raw_local_irq_save(); } while (0); __asm__ __volatile__( "nop;" "w[%0] = %1;" : : "a" (0xFFC00700), "d" ((uint16_t)(val)) : "memory" ); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); raw_local_irq_restore(flags); }
+static inline __attribute__((always_inline)) void bfin_write_FIO_FLAG_C(unsigned short val) { unsigned long flags; do { (flags) = __raw_local_irq_save(); } while (0); __asm__ __volatile__( "nop;" "w[%0] = %1;" : : "a" (0xFFC00704), "d" ((uint16_t)(val)) : "memory" ); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); raw_local_irq_restore(flags); }
+static inline __attribute__((always_inline)) void bfin_write_FIO_FLAG_S(unsigned short val) { unsigned long flags; do { (flags) = __raw_local_irq_save(); } while (0); __asm__ __volatile__( "nop;" "w[%0] = %1;" : : "a" (0xFFC00708), "d" ((uint16_t)(val)) : "memory" ); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); raw_local_irq_restore(flags); }
+static inline __attribute__((always_inline)) void bfin_write_FIO_FLAG_T(unsigned short val) { unsigned long flags; do { (flags) = __raw_local_irq_save(); } while (0); __asm__ __volatile__( "nop;" "w[%0] = %1;" : : "a" (0xFFC0070C), "d" ((uint16_t)(val)) : "memory" ); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); raw_local_irq_restore(flags); }
+# 176 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cdef_misc.h"
+static inline __attribute__((always_inline)) u16 bfin_read_FIO_FLAG_D(void) { unsigned long flags; u16 ret; do { (flags) = __raw_local_irq_save(); } while (0); ret = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = w[%1] (z);" : "=d" (__v) : "a" (0xFFC00700) ); __v; }); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); raw_local_irq_restore(flags); return ret; }
+static inline __attribute__((always_inline)) u16 bfin_read_FIO_FLAG_C(void) { unsigned long flags; u16 ret; do { (flags) = __raw_local_irq_save(); } while (0); ret = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = w[%1] (z);" : "=d" (__v) : "a" (0xFFC00704) ); __v; }); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); raw_local_irq_restore(flags); return ret; }
+static inline __attribute__((always_inline)) u16 bfin_read_FIO_FLAG_S(void) { unsigned long flags; u16 ret; do { (flags) = __raw_local_irq_save(); } while (0); ret = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = w[%1] (z);" : "=d" (__v) : "a" (0xFFC00708) ); __v; }); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); raw_local_irq_restore(flags); return ret; }
+static inline __attribute__((always_inline)) u16 bfin_read_FIO_FLAG_T(void) { unsigned long flags; u16 ret; do { (flags) = __raw_local_irq_save(); } while (0); ret = ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = w[%1] (z);" : "=d" (__v) : "a" (0xFFC0070C) ); __v; }); ({ uint32_t __v; __asm__ __volatile__( "nop;" "%0 = [%1];" : "=d" (__v) : "a" (0xFFC00014) ); __v; }); raw_local_irq_restore(flags); return ret; }
+# 96 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/blackfin.h" 2
+# 13 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/l1layout.h" 2
+
+
+
+
+
+
+
+struct l1_scratch_task_info
+{
+
+ void *stack_start;
+
+
+
+ void *lowest_sp;
+};
+# 172 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/system.h" 2
+
+
+ struct task_struct *resume(struct task_struct *prev, struct task_struct *next);
+# 8 "include/linux/list.h" 2
+# 19 "include/linux/list.h"
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void INIT_LIST_HEAD(struct list_head *list)
+{
+ list->next = list;
+ list->prev = list;
+}
+# 41 "include/linux/list.h"
+static inline __attribute__((always_inline)) void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+# 64 "include/linux/list.h"
+static inline __attribute__((always_inline)) void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+# 78 "include/linux/list.h"
+static inline __attribute__((always_inline)) void list_add_tail(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head->prev, head);
+}
+# 90 "include/linux/list.h"
+static inline __attribute__((always_inline)) void __list_del(struct list_head * prev, struct list_head * next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+# 103 "include/linux/list.h"
+static inline __attribute__((always_inline)) void list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ entry->next = ((void *) 0x00100100);
+ entry->prev = ((void *) 0x00200200);
+}
+# 120 "include/linux/list.h"
+static inline __attribute__((always_inline)) void list_replace(struct list_head *old,
+ struct list_head *new)
+{
+ new->next = old->next;
+ new->next->prev = new;
+ new->prev = old->prev;
+ new->prev->next = new;
+}
+
+static inline __attribute__((always_inline)) void list_replace_init(struct list_head *old,
+ struct list_head *new)
+{
+ list_replace(old, new);
+ INIT_LIST_HEAD(old);
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) void list_del_init(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ INIT_LIST_HEAD(entry);
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void list_move(struct list_head *list, struct list_head *head)
+{
+ __list_del(list->prev, list->next);
+ list_add(list, head);
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void list_move_tail(struct list_head *list,
+ struct list_head *head)
+{
+ __list_del(list->prev, list->next);
+ list_add_tail(list, head);
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int list_is_last(const struct list_head *list,
+ const struct list_head *head)
+{
+ return list->next == head;
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) int list_empty(const struct list_head *head)
+{
+ return head->next == head;
+}
+# 202 "include/linux/list.h"
+static inline __attribute__((always_inline)) int list_empty_careful(const struct list_head *head)
+{
+ struct list_head *next = head->next;
+ return (next == head) && (next == head->prev);
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) int list_is_singular(const struct list_head *head)
+{
+ return !list_empty(head) && (head->next == head->prev);
+}
+
+static inline __attribute__((always_inline)) void __list_cut_position(struct list_head *list,
+ struct list_head *head, struct list_head *entry)
+{
+ struct list_head *new_first = entry->next;
+ list->next = head->next;
+ list->next->prev = list;
+ list->prev = entry;
+ entry->next = list;
+ head->next = new_first;
+ new_first->prev = head;
+}
+# 243 "include/linux/list.h"
+static inline __attribute__((always_inline)) void list_cut_position(struct list_head *list,
+ struct list_head *head, struct list_head *entry)
+{
+ if (list_empty(head))
+ return;
+ if (list_is_singular(head) &&
+ (head->next != entry && head != entry))
+ return;
+ if (entry == head)
+ INIT_LIST_HEAD(list);
+ else
+ __list_cut_position(list, head, entry);
+}
+
+static inline __attribute__((always_inline)) void __list_splice(const struct list_head *list,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ struct list_head *first = list->next;
+ struct list_head *last = list->prev;
+
+ first->prev = prev;
+ prev->next = first;
+
+ last->next = next;
+ next->prev = last;
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void list_splice(const struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list))
+ __list_splice(list, head, head->next);
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void list_splice_tail(struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list))
+ __list_splice(list, head->prev, head);
+}
+# 302 "include/linux/list.h"
+static inline __attribute__((always_inline)) void list_splice_init(struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list)) {
+ __list_splice(list, head, head->next);
+ INIT_LIST_HEAD(list);
+ }
+}
+# 319 "include/linux/list.h"
+static inline __attribute__((always_inline)) void list_splice_tail_init(struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list)) {
+ __list_splice(list, head->prev, head);
+ INIT_LIST_HEAD(list);
+ }
+}
+# 540 "include/linux/list.h"
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
+
+
+
+static inline __attribute__((always_inline)) void INIT_HLIST_NODE(struct hlist_node *h)
+{
+ h->next = ((void *)0);
+ h->pprev = ((void *)0);
+}
+
+static inline __attribute__((always_inline)) int hlist_unhashed(const struct hlist_node *h)
+{
+ return !h->pprev;
+}
+
+static inline __attribute__((always_inline)) int hlist_empty(const struct hlist_head *h)
+{
+ return !h->first;
+}
+
+static inline __attribute__((always_inline)) void __hlist_del(struct hlist_node *n)
+{
+ struct hlist_node *next = n->next;
+ struct hlist_node **pprev = n->pprev;
+ *pprev = next;
+ if (next)
+ next->pprev = pprev;
+}
+
+static inline __attribute__((always_inline)) void hlist_del(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->next = ((void *) 0x00100100);
+ n->pprev = ((void *) 0x00200200);
+}
+
+static inline __attribute__((always_inline)) void hlist_del_init(struct hlist_node *n)
+{
+ if (!hlist_unhashed(n)) {
+ __hlist_del(n);
+ INIT_HLIST_NODE(n);
+ }
+}
+
+static inline __attribute__((always_inline)) void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+ n->next = first;
+ if (first)
+ first->pprev = &n->next;
+ h->first = n;
+ n->pprev = &h->first;
+}
+
+
+static inline __attribute__((always_inline)) void hlist_add_before(struct hlist_node *n,
+ struct hlist_node *next)
+{
+ n->pprev = next->pprev;
+ n->next = next;
+ next->pprev = &n->next;
+ *(n->pprev) = n;
+}
+
+static inline __attribute__((always_inline)) void hlist_add_after(struct hlist_node *n,
+ struct hlist_node *next)
+{
+ next->next = n->next;
+ n->next = next;
+ next->pprev = &n->next;
+
+ if(next->next)
+ next->next->pprev = &next->next;
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) void hlist_move_list(struct hlist_head *old,
+ struct hlist_head *new)
+{
+ new->first = old->first;
+ if (new->first)
+ new->first->pprev = &new->first;
+ old->first = ((void *)0);
+}
+# 10 "include/linux/module.h" 2
+# 1 "include/linux/stat.h" 1
+
+
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/stat.h" 1
+# 10 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/stat.h"
+struct stat {
+ unsigned short st_dev;
+ unsigned short __pad1;
+ unsigned long st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned short __pad2;
+ unsigned long st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+ unsigned long st_atime;
+ unsigned long __unused1;
+ unsigned long st_mtime;
+ unsigned long __unused2;
+ unsigned long st_ctime;
+ unsigned long __unused3;
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+
+
+
+struct stat64 {
+ unsigned long long st_dev;
+ unsigned char __pad1[4];
+
+
+ unsigned long __st_ino;
+
+ unsigned int st_mode;
+ unsigned int st_nlink;
+
+ unsigned long st_uid;
+ unsigned long st_gid;
+
+ unsigned long long st_rdev;
+ unsigned char __pad2[4];
+
+ long long st_size;
+ unsigned long st_blksize;
+
+ long long st_blocks;
+
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+
+ unsigned long long st_ino;
+};
+# 7 "include/linux/stat.h" 2
+# 60 "include/linux/stat.h"
+# 1 "include/linux/time.h" 1
+
+
+
+
+
+
+# 1 "include/linux/cache.h" 1
+
+
+
+# 1 "include/linux/kernel.h" 1
+# 10 "include/linux/kernel.h"
+# 1 "/usr/local/src/blackfin/toolchains/20091208/bfin-uclinux/lib/gcc/bfin-uclinux/4.3.4/include/stdarg.h" 1 3 4
+# 43 "/usr/local/src/blackfin/toolchains/20091208/bfin-uclinux/lib/gcc/bfin-uclinux/4.3.4/include/stdarg.h" 3 4
+typedef __builtin_va_list __gnuc_va_list;
+# 105 "/usr/local/src/blackfin/toolchains/20091208/bfin-uclinux/lib/gcc/bfin-uclinux/4.3.4/include/stdarg.h" 3 4
+typedef __gnuc_va_list va_list;
+# 11 "include/linux/kernel.h" 2
+
+
+
+
+# 1 "include/linux/bitops.h" 1
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" 1
+# 4 "include/linux/bitops.h" 2
+# 17 "include/linux/bitops.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 1
+# 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h"
+# 1 "include/asm-generic/bitops.h" 1
+# 23 "include/asm-generic/bitops.h"
+# 1 "include/asm-generic/bitops/__ffs.h" 1
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" 1
+# 5 "include/asm-generic/bitops/__ffs.h" 2
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long __ffs(unsigned long word)
+{
+ int num = 0;
+
+
+
+
+
+
+
+ if ((word & 0xffff) == 0) {
+ num += 16;
+ word >>= 16;
+ }
+ if ((word & 0xff) == 0) {
+ num += 8;
+ word >>= 8;
+ }
+ if ((word & 0xf) == 0) {
+ num += 4;
+ word >>= 4;
+ }
+ if ((word & 0x3) == 0) {
+ num += 2;
+ word >>= 2;
+ }
+ if ((word & 0x1) == 0)
+ num += 1;
+ return num;
+}
+# 24 "include/asm-generic/bitops.h" 2
+# 1 "include/asm-generic/bitops/ffz.h" 1
+# 25 "include/asm-generic/bitops.h" 2
+# 1 "include/asm-generic/bitops/fls.h" 1
+# 12 "include/asm-generic/bitops/fls.h"
+static inline __attribute__((always_inline)) __attribute__((always_inline)) int fls(int x)
+{
+ int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+# 26 "include/asm-generic/bitops.h" 2
+# 1 "include/asm-generic/bitops/__fls.h" 1
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" 1
+# 5 "include/asm-generic/bitops/__fls.h" 2
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned long __fls(unsigned long word)
+{
+ int num = 32 - 1;
+
+
+
+
+
+
+
+ if (!(word & (~0ul << (32 -16)))) {
+ num -= 16;
+ word <<= 16;
+ }
+ if (!(word & (~0ul << (32 -8)))) {
+ num -= 8;
+ word <<= 8;
+ }
+ if (!(word & (~0ul << (32 -4)))) {
+ num -= 4;
+ word <<= 4;
+ }
+ if (!(word & (~0ul << (32 -2)))) {
+ num -= 2;
+ word <<= 2;
+ }
+ if (!(word & (~0ul << (32 -1))))
+ num -= 1;
+ return num;
+}
+# 27 "include/asm-generic/bitops.h" 2
+# 1 "include/asm-generic/bitops/fls64.h" 1
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" 1
+# 5 "include/asm-generic/bitops/fls64.h" 2
+# 18 "include/asm-generic/bitops/fls64.h"
+static inline __attribute__((always_inline)) __attribute__((always_inline)) int fls64(__u64 x)
+{
+ __u32 h = x >> 32;
+ if (h)
+ return fls(h) + 32;
+ return fls(x);
+}
+# 28 "include/asm-generic/bitops.h" 2
+# 1 "include/asm-generic/bitops/find.h" 1
+# 29 "include/asm-generic/bitops.h" 2
+
+
+
+
+
+# 1 "include/asm-generic/bitops/sched.h" 1
+
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" 1
+# 6 "include/asm-generic/bitops/sched.h" 2
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int sched_find_first_bit(const unsigned long *b)
+{
+
+
+
+
+
+ if (b[0])
+ return __ffs(b[0]);
+ if (b[1])
+ return __ffs(b[1]) + 32;
+ if (b[2])
+ return __ffs(b[2]) + 64;
+ return __ffs(b[3]) + 96;
+
+
+
+}
+# 35 "include/asm-generic/bitops.h" 2
+# 1 "include/asm-generic/bitops/ffs.h" 1
+# 12 "include/asm-generic/bitops/ffs.h"
+static inline __attribute__((always_inline)) int ffs(int x)
+{
+ int r = 1;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff)) {
+ x >>= 16;
+ r += 16;
+ }
+ if (!(x & 0xff)) {
+ x >>= 8;
+ r += 8;
+ }
+ if (!(x & 0xf)) {
+ x >>= 4;
+ r += 4;
+ }
+ if (!(x & 3)) {
+ x >>= 2;
+ r += 2;
+ }
+ if (!(x & 1)) {
+ x >>= 1;
+ r += 1;
+ }
+ return r;
+}
+# 36 "include/asm-generic/bitops.h" 2
+# 1 "include/asm-generic/bitops/hweight.h" 1
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" 1
+# 5 "include/asm-generic/bitops/hweight.h" 2
+
+extern unsigned int hweight32(unsigned int w);
+extern unsigned int hweight16(unsigned int w);
+extern unsigned int hweight8(unsigned int w);
+extern unsigned long hweight64(__u64 w);
+# 37 "include/asm-generic/bitops.h" 2
+# 1 "include/asm-generic/bitops/lock.h" 1
+# 38 "include/asm-generic/bitops.h" 2
+
+# 1 "include/asm-generic/bitops/atomic.h" 1
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" 1
+# 5 "include/asm-generic/bitops/atomic.h" 2
+# 65 "include/asm-generic/bitops/atomic.h"
+static inline __attribute__((always_inline)) void set_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << ((nr) % 32));
+ unsigned long *p = ((unsigned long *)addr) + ((nr) / 32);
+ unsigned long flags;
+
+ do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { (flags) = __raw_local_irq_save(); } while (0); do { } while (0); } while (0); } while (0);
+ *p |= mask;
+ do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if ((((flags) & ~0x3f) == 0)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } while (0);
+}
+# 86 "include/asm-generic/bitops/atomic.h"
+static inline __attribute__((always_inline)) void clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << ((nr) % 32));
+ unsigned long *p = ((unsigned long *)addr) + ((nr) / 32);
+ unsigned long flags;
+
+ do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { (flags) = __raw_local_irq_save(); } while (0); do { } while (0); } while (0); } while (0);
+ *p &= ~mask;
+ do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if ((((flags) & ~0x3f) == 0)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } while (0);
+}
+# 107 "include/asm-generic/bitops/atomic.h"
+static inline __attribute__((always_inline)) void change_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << ((nr) % 32));
+ unsigned long *p = ((unsigned long *)addr) + ((nr) / 32);
+ unsigned long flags;
+
+ do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { (flags) = __raw_local_irq_save(); } while (0); do { } while (0); } while (0); } while (0);
+ *p ^= mask;
+ do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if ((((flags) & ~0x3f) == 0)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } while (0);
+}
+# 127 "include/asm-generic/bitops/atomic.h"
+static inline __attribute__((always_inline)) int test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << ((nr) % 32));
+ unsigned long *p = ((unsigned long *)addr) + ((nr) / 32);
+ unsigned long old;
+ unsigned long flags;
+
+ do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { (flags) = __raw_local_irq_save(); } while (0); do { } while (0); } while (0); } while (0);
+ old = *p;
+ *p = old | mask;
+ do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if ((((flags) & ~0x3f) == 0)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } while (0);
+
+ return (old & mask) != 0;
+}
+# 151 "include/asm-generic/bitops/atomic.h"
+static inline __attribute__((always_inline)) int test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << ((nr) % 32));
+ unsigned long *p = ((unsigned long *)addr) + ((nr) / 32);
+ unsigned long old;
+ unsigned long flags;
+
+ do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { (flags) = __raw_local_irq_save(); } while (0); do { } while (0); } while (0); } while (0);
+ old = *p;
+ *p = old & ~mask;
+ do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if ((((flags) & ~0x3f) == 0)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } while (0);
+
+ return (old & mask) != 0;
+}
+# 174 "include/asm-generic/bitops/atomic.h"
+static inline __attribute__((always_inline)) int test_and_change_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << ((nr) % 32));
+ unsigned long *p = ((unsigned long *)addr) + ((nr) / 32);
+ unsigned long old;
+ unsigned long flags;
+
+ do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { (flags) = __raw_local_irq_save(); } while (0); do { } while (0); } while (0); } while (0);
+ old = *p;
+ *p = old ^ mask;
+ do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if ((((flags) & ~0x3f) == 0)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } while (0);
+
+ return (old & mask) != 0;
+}
+# 40 "include/asm-generic/bitops.h" 2
+# 1 "include/asm-generic/bitops/non-atomic.h" 1
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" 1
+# 5 "include/asm-generic/bitops/non-atomic.h" 2
+# 15 "include/asm-generic/bitops/non-atomic.h"
+static inline __attribute__((always_inline)) void __set_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << ((nr) % 32));
+ unsigned long *p = ((unsigned long *)addr) + ((nr) / 32);
+
+ *p |= mask;
+}
+
+static inline __attribute__((always_inline)) void __clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << ((nr) % 32));
+ unsigned long *p = ((unsigned long *)addr) + ((nr) / 32);
+
+ *p &= ~mask;
+}
+# 40 "include/asm-generic/bitops/non-atomic.h"
+static inline __attribute__((always_inline)) void __change_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << ((nr) % 32));
+ unsigned long *p = ((unsigned long *)addr) + ((nr) / 32);
+
+ *p ^= mask;
+}
+# 57 "include/asm-generic/bitops/non-atomic.h"
+static inline __attribute__((always_inline)) int __test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << ((nr) % 32));
+ unsigned long *p = ((unsigned long *)addr) + ((nr) / 32);
+ unsigned long old = *p;
+
+ *p = old | mask;
+ return (old & mask) != 0;
+}
+# 76 "include/asm-generic/bitops/non-atomic.h"
+static inline __attribute__((always_inline)) int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << ((nr) % 32));
+ unsigned long *p = ((unsigned long *)addr) + ((nr) / 32);
+ unsigned long old = *p;
+
+ *p = old & ~mask;
+ return (old & mask) != 0;
+}
+
+
+static inline __attribute__((always_inline)) int __test_and_change_bit(int nr,
+ volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << ((nr) % 32));
+ unsigned long *p = ((unsigned long *)addr) + ((nr) / 32);
+ unsigned long old = *p;
+
+ *p = old ^ mask;
+ return (old & mask) != 0;
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int test_bit(int nr, const volatile unsigned long *addr)
+{
+ return 1UL & (addr[((nr) / 32)] >> (nr & (32 -1)));
+}
+# 41 "include/asm-generic/bitops.h" 2
+# 1 "include/asm-generic/bitops/ext2-non-atomic.h" 1
+
+
+
+# 1 "include/asm-generic/bitops/le.h" 1
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" 1
+# 5 "include/asm-generic/bitops/le.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/byteorder.h" 1
+# 1 "include/linux/byteorder/little_endian.h" 1
+# 12 "include/linux/byteorder/little_endian.h"
+# 1 "include/linux/swab.h" 1
+
+
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/swab.h" 1
+# 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/swab.h"
+# 1 "include/asm-generic/swab.h" 1
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitsperlong.h" 1
+# 5 "include/asm-generic/swab.h" 2
+# 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/swab.h" 2
+
+
+
+static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u32 __arch_swahb32(__u32 xx)
+{
+ __u32 tmp;
+ __asm__("%1 = %0 >> 8 (V);\n\t"
+ "%0 = %0 << 8 (V);\n\t"
+ "%0 = %0 | %1;\n\t"
+ : "+d"(xx), "=&d"(tmp));
+ return xx;
+}
+
+
+static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u32 __arch_swahw32(__u32 xx)
+{
+ __u32 rv;
+ __asm__("%0 = PACK(%1.L, %1.H);\n\t": "=d"(rv): "d"(xx));
+ return rv;
+}
+
+
+static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u32 __arch_swab32(__u32 xx)
+{
+ return __arch_swahb32(__arch_swahw32(xx));
+}
+
+
+static __inline__ __attribute__((always_inline)) __attribute__((__const__)) __u16 __arch_swab16(__u16 xx)
+{
+ __u32 xw = xx;
+ __asm__("%0 <<= 8;\n %0.L = %0.L + %0.H (NS);\n": "+d"(xw));
+ return (__u16)xw;
+}
+# 7 "include/linux/swab.h" 2
+# 46 "include/linux/swab.h"
+static inline __attribute__((always_inline)) __attribute__((__const__)) __u16 __fswab16(__u16 val)
+{
+
+ return __arch_swab16(val);
+
+
+
+}
+
+static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswab32(__u32 val)
+{
+
+ return __arch_swab32(val);
+
+
+
+}
+
+static inline __attribute__((always_inline)) __attribute__((__const__)) __u64 __fswab64(__u64 val)
+{
+
+
+
+ __u32 h = val >> 32;
+ __u32 l = val & ((1ULL << 32) - 1);
+ return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h)));
+
+
+
+}
+
+static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswahw32(__u32 val)
+{
+
+ return __arch_swahw32(val);
+
+
+
+}
+
+static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswahb32(__u32 val)
+{
+
+ return __arch_swahb32(val);
+
+
+
+}
+# 148 "include/linux/swab.h"
+static inline __attribute__((always_inline)) __u16 __swab16p(const __u16 *p)
+{
+
+
+
+ return (__builtin_constant_p((__u16)(*p)) ? ((__u16)( (((__u16)(*p) & (__u16)0x00ffU) << 8) | (((__u16)(*p) & (__u16)0xff00U) >> 8))) : __fswab16(*p));
+
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) __u32 __swab32p(const __u32 *p)
+{
+
+
+
+ return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x000000ffUL) << 24) | (((__u32)(*p) & (__u32)0x0000ff00UL) << 8) | (((__u32)(*p) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(*p) & (__u32)0xff000000UL) >> 24))) : __fswab32(*p));
+
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) __u64 __swab64p(const __u64 *p)
+{
+
+
+
+ return (__builtin_constant_p((__u64)(*p)) ? ((__u64)( (((__u64)(*p) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(*p) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(*p) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(*p) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(*p) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(*p) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(*p) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(*p) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(*p));
+
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) __u32 __swahw32p(const __u32 *p)
+{
+
+
+
+ return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x0000ffffUL) << 16) | (((__u32)(*p) & (__u32)0xffff0000UL) >> 16))) : __fswahw32(*p));
+
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) __u32 __swahb32p(const __u32 *p)
+{
+
+
+
+ return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x00ff00ffUL) << 8) | (((__u32)(*p) & (__u32)0xff00ff00UL) >> 8))) : __fswahb32(*p));
+
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) void __swab16s(__u16 *p)
+{
+
+
+
+ *p = __swab16p(p);
+
+}
+
+
+
+
+static inline __attribute__((always_inline)) void __swab32s(__u32 *p)
+{
+
+
+
+ *p = __swab32p(p);
+
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) void __swab64s(__u64 *p)
+{
+
+
+
+ *p = __swab64p(p);
+
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void __swahw32s(__u32 *p)
+{
+
+
+
+ *p = __swahw32p(p);
+
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void __swahb32s(__u32 *p)
+{
+
+
+
+ *p = __swahb32p(p);
+
+}
+# 13 "include/linux/byteorder/little_endian.h" 2
+# 43 "include/linux/byteorder/little_endian.h"
+static inline __attribute__((always_inline)) __le64 __cpu_to_le64p(const __u64 *p)
+{
+ return ( __le64)*p;
+}
+static inline __attribute__((always_inline)) __u64 __le64_to_cpup(const __le64 *p)
+{
+ return ( __u64)*p;
+}
+static inline __attribute__((always_inline)) __le32 __cpu_to_le32p(const __u32 *p)
+{
+ return ( __le32)*p;
+}
+static inline __attribute__((always_inline)) __u32 __le32_to_cpup(const __le32 *p)
+{
+ return ( __u32)*p;
+}
+static inline __attribute__((always_inline)) __le16 __cpu_to_le16p(const __u16 *p)
+{
+ return ( __le16)*p;
+}
+static inline __attribute__((always_inline)) __u16 __le16_to_cpup(const __le16 *p)
+{
+ return ( __u16)*p;
+}
+static inline __attribute__((always_inline)) __be64 __cpu_to_be64p(const __u64 *p)
+{
+ return ( __be64)__swab64p(p);
+}
+static inline __attribute__((always_inline)) __u64 __be64_to_cpup(const __be64 *p)
+{
+ return __swab64p((__u64 *)p);
+}
+static inline __attribute__((always_inline)) __be32 __cpu_to_be32p(const __u32 *p)
+{
+ return ( __be32)__swab32p(p);
+}
+static inline __attribute__((always_inline)) __u32 __be32_to_cpup(const __be32 *p)
+{
+ return __swab32p((__u32 *)p);
+}
+static inline __attribute__((always_inline)) __be16 __cpu_to_be16p(const __u16 *p)
+{
+ return ( __be16)__swab16p(p);
+}
+static inline __attribute__((always_inline)) __u16 __be16_to_cpup(const __be16 *p)
+{
+ return __swab16p((__u16 *)p);
+}
+# 105 "include/linux/byteorder/little_endian.h"
+# 1 "include/linux/byteorder/generic.h" 1
+# 143 "include/linux/byteorder/generic.h"
+static inline __attribute__((always_inline)) void le16_add_cpu(__le16 *var, u16 val)
+{
+ *var = (( __le16)(__u16)((( __u16)(__le16)(*var)) + val));
+}
+
+static inline __attribute__((always_inline)) void le32_add_cpu(__le32 *var, u32 val)
+{
+ *var = (( __le32)(__u32)((( __u32)(__le32)(*var)) + val));
+}
+
+static inline __attribute__((always_inline)) void le64_add_cpu(__le64 *var, u64 val)
+{
+ *var = (( __le64)(__u64)((( __u64)(__le64)(*var)) + val));
+}
+
+static inline __attribute__((always_inline)) void be16_add_cpu(__be16 *var, u16 val)
+{
+ *var = (( __be16)(__builtin_constant_p((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val))) ? ((__u16)( (((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val)) & (__u16)0x00ffU) << 8) | (((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val)) & (__u16)0xff00U) >> 8))) : __fswab16(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val))));
+}
+
+static inline __attribute__((always_inline)) void be32_add_cpu(__be32 *var, u32 val)
+{
+ *var = (( __be32)(__builtin_constant_p((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val))) ? ((__u32)( (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x000000ffUL) << 24) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0xff000000UL) >> 24))) : __fswab32(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val))));
+}
+
+static inline __attribute__((always_inline)) void be64_add_cpu(__be64 *var, u64 val)
+{
+ *var = (( __be64)(__builtin_constant_p((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val))) ? ((__u64)( (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val))));
+}
+# 106 "include/linux/byteorder/little_endian.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/byteorder.h" 2
+# 6 "include/asm-generic/bitops/le.h" 2
+# 5 "include/asm-generic/bitops/ext2-non-atomic.h" 2
+# 42 "include/asm-generic/bitops.h" 2
+# 1 "include/asm-generic/bitops/ext2-atomic.h" 1
+# 43 "include/asm-generic/bitops.h" 2
+# 1 "include/asm-generic/bitops/minix.h" 1
+# 44 "include/asm-generic/bitops.h" 2
+# 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitops.h" 2
+# 18 "include/linux/bitops.h" 2
+
+
+
+
+
+
+
+static __inline__ __attribute__((always_inline)) int get_bitmask_order(unsigned int count)
+{
+ int order;
+
+ order = fls(count);
+ return order;
+}
+
+static __inline__ __attribute__((always_inline)) int get_count_order(unsigned int count)
+{
+ int order;
+
+ order = fls(count) - 1;
+ if (count & (count - 1))
+ order++;
+ return order;
+}
+
+static inline __attribute__((always_inline)) unsigned long hweight_long(unsigned long w)
+{
+ return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) __u32 rol32(__u32 word, unsigned int shift)
+{
+ return (word << shift) | (word >> (32 - shift));
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) __u32 ror32(__u32 word, unsigned int shift)
+{
+ return (word >> shift) | (word << (32 - shift));
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) __u16 rol16(__u16 word, unsigned int shift)
+{
+ return (word << shift) | (word >> (16 - shift));
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) __u16 ror16(__u16 word, unsigned int shift)
+{
+ return (word >> shift) | (word << (16 - shift));
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) __u8 rol8(__u8 word, unsigned int shift)
+{
+ return (word << shift) | (word >> (8 - shift));
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) __u8 ror8(__u8 word, unsigned int shift)
+{
+ return (word >> shift) | (word << (8 - shift));
+}
+
+static inline __attribute__((always_inline)) unsigned fls_long(unsigned long l)
+{
+ if (sizeof(l) == 4)
+ return fls(l);
+ return fls64(l);
+}
+# 123 "include/linux/bitops.h"
+static inline __attribute__((always_inline)) unsigned long __ffs64(u64 word)
+{
+
+ if (((u32)word) == 0UL)
+ return __ffs((u32)(word >> 32)) + 32;
+
+
+
+ return __ffs((unsigned long)word);
+}
+# 166 "include/linux/bitops.h"
+extern unsigned long find_last_bit(const unsigned long *addr,
+ unsigned long size);
+# 178 "include/linux/bitops.h"
+extern unsigned long find_next_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+# 188 "include/linux/bitops.h"
+extern unsigned long find_next_zero_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset);
+# 16 "include/linux/kernel.h" 2
+# 1 "include/linux/log2.h" 1
+# 21 "include/linux/log2.h"
+extern __attribute__((const, noreturn))
+int ____ilog2_NaN(void);
+# 31 "include/linux/log2.h"
+static inline __attribute__((always_inline)) __attribute__((const))
+int __ilog2_u32(u32 n)
+{
+ return fls(n) - 1;
+}
+
+
+
+static inline __attribute__((always_inline)) __attribute__((const))
+int __ilog2_u64(u64 n)
+{
+ return fls64(n) - 1;
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) __attribute__((const))
+bool is_power_of_2(unsigned long n)
+{
+ return (n != 0 && ((n & (n - 1)) == 0));
+}
+
+
+
+
+static inline __attribute__((always_inline)) __attribute__((const))
+unsigned long __roundup_pow_of_two(unsigned long n)
+{
+ return 1UL << fls_long(n - 1);
+}
+
+
+
+
+static inline __attribute__((always_inline)) __attribute__((const))
+unsigned long __rounddown_pow_of_two(unsigned long n)
+{
+ return 1UL << (fls_long(n) - 1);
+}
+# 17 "include/linux/kernel.h" 2
+
+# 1 "include/linux/ratelimit.h" 1
+
+
+# 1 "include/linux/param.h" 1
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/param.h" 1
+# 1 "include/asm-generic/param.h" 1
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/param.h" 2
+# 5 "include/linux/param.h" 2
+# 4 "include/linux/ratelimit.h" 2
+
+
+
+
+struct ratelimit_state {
+ int interval;
+ int burst;
+ int printed;
+ int missed;
+ unsigned long begin;
+};
+
+
+
+
+extern int __ratelimit(struct ratelimit_state *rs);
+# 19 "include/linux/kernel.h" 2
+# 1 "include/linux/dynamic_debug.h" 1
+
+
+
+
+
+
+
+extern long long dynamic_debug_enabled;
+extern long long dynamic_debug_enabled2;
+
+
+
+
+
+
+struct _ddebug {
+
+
+
+
+ const char *modname;
+ const char *function;
+ const char *filename;
+ const char *format;
+ char primary_hash;
+ char secondary_hash;
+ unsigned int lineno:24;
+
+
+
+
+
+
+
+ unsigned int flags:8;
+} __attribute__((aligned(8)));
+
+
+int ddebug_add_module(struct _ddebug *tab, unsigned int n,
+ const char *modname);
+# 79 "include/linux/dynamic_debug.h"
+static inline __attribute__((always_inline)) int ddebug_remove_module(char *mod)
+{
+ return 0;
+}
+# 20 "include/linux/kernel.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/byteorder.h" 1
+# 21 "include/linux/kernel.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bug.h" 1
+# 66 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bug.h"
+# 1 "include/asm-generic/bug.h" 1
+# 10 "include/asm-generic/bug.h"
+struct bug_entry {
+
+ unsigned long bug_addr;
+
+
+
+
+
+ const char *file;
+
+
+
+ unsigned short line;
+
+ unsigned short flags;
+};
+# 61 "include/asm-generic/bug.h"
+extern void warn_slowpath_fmt(const char *file, const int line,
+ const char *fmt, ...) __attribute__((format(printf, 3, 4)));
+extern void warn_slowpath_null(const char *file, const int line);
+# 67 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bug.h" 2
+# 22 "include/linux/kernel.h" 2
+
+extern const char linux_banner[];
+extern const char linux_proc_banner[];
+# 62 "include/linux/kernel.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/div64.h" 1
+# 1 "include/asm-generic/div64.h" 1
+# 35 "include/asm-generic/div64.h"
+extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/div64.h" 2
+# 63 "include/linux/kernel.h" 2
+# 109 "include/linux/kernel.h"
+extern int console_printk[];
+
+
+
+
+
+
+struct completion;
+struct pt_regs;
+struct user;
+
+
+extern int _cond_resched(void);
+
+
+
+
+
+
+ void __might_sleep(char *file, int line, int preempt_offset);
+# 156 "include/linux/kernel.h"
+static inline __attribute__((always_inline)) void might_fault(void)
+{
+ do { __might_sleep("include/linux/kernel.h", 158, 0); _cond_resched(); } while (0);
+}
+
+
+extern struct atomic_notifier_head panic_notifier_list;
+extern long (*panic_blink)(long time);
+ void panic(const char * fmt, ...)
+ __attribute__ ((noreturn, format (printf, 1, 2))) __attribute__((__cold__));
+extern void oops_enter(void);
+extern void oops_exit(void);
+extern int oops_may_print(void);
+ void do_exit(long error_code)
+ __attribute__((noreturn));
+ void complete_and_exit(struct completion *, long)
+ __attribute__((noreturn));
+extern unsigned long simple_strtoul(const char *,char **,unsigned int);
+extern long simple_strtol(const char *,char **,unsigned int);
+extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
+extern long long simple_strtoll(const char *,char **,unsigned int);
+extern int strict_strtoul(const char *, unsigned int, unsigned long *);
+extern int strict_strtol(const char *, unsigned int, long *);
+extern int strict_strtoull(const char *, unsigned int, unsigned long long *);
+extern int strict_strtoll(const char *, unsigned int, long long *);
+extern int sprintf(char * buf, const char * fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+extern int vsprintf(char *buf, const char *, va_list)
+ __attribute__ ((format (printf, 2, 0)));
+extern int snprintf(char * buf, size_t size, const char * fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
+extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
+ __attribute__ ((format (printf, 3, 0)));
+extern int scnprintf(char * buf, size_t size, const char * fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
+extern int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
+ __attribute__ ((format (printf, 3, 0)));
+extern char *kasprintf(gfp_t gfp, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
+
+extern int sscanf(const char *, const char *, ...)
+ __attribute__ ((format (scanf, 2, 3)));
+extern int vsscanf(const char *, const char *, va_list)
+ __attribute__ ((format (scanf, 2, 0)));
+
+extern int get_option(char **str, int *pint);
+extern char *get_options(const char *str, int nints, int *ints);
+extern unsigned long long memparse(const char *ptr, char **retptr);
+
+extern int core_kernel_text(unsigned long addr);
+extern int __kernel_text_address(unsigned long addr);
+extern int kernel_text_address(unsigned long addr);
+extern int func_ptr_is_kernel_text(void *ptr);
+
+struct pid;
+extern struct pid *session_of_pgrp(struct pid *pgrp);
+# 239 "include/linux/kernel.h"
+ int vprintk(const char *fmt, va_list args)
+ __attribute__ ((format (printf, 1, 0)));
+ int printk(const char * fmt, ...)
+ __attribute__ ((format (printf, 1, 2))) __attribute__((__cold__));
+
+extern struct ratelimit_state printk_ratelimit_state;
+extern int printk_ratelimit(void);
+extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
+ unsigned int interval_msec);
+
+extern int printk_delay_msec;
+# 263 "include/linux/kernel.h"
+void log_buf_kexec_setup(void);
+# 284 "include/linux/kernel.h"
+extern int printk_needs_cpu(int cpu);
+extern void printk_tick(void);
+
+extern void __attribute__((format(printf, 1, 2)))
+ early_printk(const char *fmt, ...);
+
+unsigned long int_sqrt(unsigned long);
+
+static inline __attribute__((always_inline)) void console_silent(void)
+{
+ (console_printk[0]) = 0;
+}
+
+static inline __attribute__((always_inline)) void console_verbose(void)
+{
+ if ((console_printk[0]))
+ (console_printk[0]) = 15;
+}
+
+extern void bust_spinlocks(int yes);
+extern void wake_up_klogd(void);
+extern int oops_in_progress;
+extern int panic_timeout;
+extern int panic_on_oops;
+extern int panic_on_unrecovered_nmi;
+extern int panic_on_io_nmi;
+extern const char *print_tainted(void);
+extern void add_taint(unsigned flag);
+extern int test_taint(unsigned flag);
+extern unsigned long get_taint(void);
+extern int root_mountflags;
+
+
+extern enum system_states {
+ SYSTEM_BOOTING,
+ SYSTEM_RUNNING,
+ SYSTEM_HALT,
+ SYSTEM_POWER_OFF,
+ SYSTEM_RESTART,
+ SYSTEM_SUSPEND_DISK,
+} system_state;
+# 338 "include/linux/kernel.h"
+extern void dump_stack(void) __attribute__((__cold__));
+
+enum {
+ DUMP_PREFIX_NONE,
+ DUMP_PREFIX_ADDRESS,
+ DUMP_PREFIX_OFFSET
+};
+extern void hex_dump_to_buffer(const void *buf, size_t len,
+ int rowsize, int groupsize,
+ char *linebuf, size_t linebuflen, bool ascii);
+extern void print_hex_dump(const char *level, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii);
+extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
+ const void *buf, size_t len);
+
+extern const char hex_asc[];
+
+
+
+static inline __attribute__((always_inline)) char *pack_hex_byte(char *buf, u8 byte)
+{
+ *buf++ = hex_asc[((byte) & 0xf0) >> 4];
+ *buf++ = hex_asc[((byte) & 0x0f)];
+ return buf;
+}
+# 429 "include/linux/kernel.h"
+void tracing_on(void);
+void tracing_off(void);
+
+void tracing_off_permanent(void);
+int tracing_is_on(void);
+
+
+
+
+
+
+
+extern void tracing_start(void);
+extern void tracing_stop(void);
+extern void ftrace_off_permanent(void);
+
+extern void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
+
+static inline __attribute__((always_inline)) void __attribute__ ((format (printf, 1, 2)))
+____trace_printk_check_format(const char *fmt, ...)
+{
+}
+# 488 "include/linux/kernel.h"
+extern int
+__trace_bprintk(unsigned long ip, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+
+extern int
+__trace_printk(unsigned long ip, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+# 513 "include/linux/kernel.h"
+extern int
+__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
+
+extern int
+__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
+
+extern void ftrace_dump(void);
+# 657 "include/linux/kernel.h"
+struct sysinfo;
+extern int do_sysinfo(struct sysinfo *info);
+# 669 "include/linux/kernel.h"
+struct sysinfo {
+ long uptime;
+ unsigned long loads[3];
+ unsigned long totalram;
+ unsigned long freeram;
+ unsigned long sharedram;
+ unsigned long bufferram;
+ unsigned long totalswap;
+ unsigned long freeswap;
+ unsigned short procs;
+ unsigned short pad;
+ unsigned long totalhigh;
+ unsigned long freehigh;
+ unsigned int mem_unit;
+ char _f[20-2*sizeof(long)-sizeof(int)];
+};
+# 5 "include/linux/cache.h" 2
+# 8 "include/linux/time.h" 2
+# 1 "include/linux/seqlock.h" 1
+# 29 "include/linux/seqlock.h"
+# 1 "include/linux/spinlock.h" 1
+# 50 "include/linux/spinlock.h"
+# 1 "include/linux/preempt.h" 1
+# 9 "include/linux/preempt.h"
+# 1 "include/linux/thread_info.h" 1
+# 12 "include/linux/thread_info.h"
+struct timespec;
+struct compat_timespec;
+
+
+
+
+struct restart_block {
+ long (*fn)(struct restart_block *);
+ union {
+ struct {
+ unsigned long arg0, arg1, arg2, arg3;
+ };
+
+ struct {
+ u32 *uaddr;
+ u32 val;
+ u32 flags;
+ u32 bitset;
+ u64 time;
+ u32 *uaddr2;
+ } futex;
+
+ struct {
+ clockid_t index;
+ struct timespec *rmtp;
+
+
+
+ u64 expires;
+ } nanosleep;
+
+ struct {
+ struct pollfd *ufds;
+ int nfds;
+ int has_timeout;
+ unsigned long tv_sec;
+ unsigned long tv_nsec;
+ } poll;
+ };
+};
+
+extern long do_no_restart_syscall(struct restart_block *parm);
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/thread_info.h" 1
+# 10 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/thread_info.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h" 1
+# 10 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h"
+# 1 "include/asm-generic/page.h" 1
+# 23 "include/asm-generic/page.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/setup.h" 1
+# 1 "include/asm-generic/setup.h" 1
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/setup.h" 2
+# 24 "include/asm-generic/page.h" 2
+# 39 "include/asm-generic/page.h"
+typedef struct {
+ unsigned long pte;
+} pte_t;
+typedef struct {
+ unsigned long pmd[16];
+} pmd_t;
+typedef struct {
+ unsigned long pgd;
+} pgd_t;
+typedef struct {
+ unsigned long pgprot;
+} pgprot_t;
+typedef struct page *pgtable_t;
+# 63 "include/asm-generic/page.h"
+extern unsigned long memory_start;
+extern unsigned long memory_end;
+# 96 "include/asm-generic/page.h"
+# 1 "include/asm-generic/memory_model.h" 1
+# 97 "include/asm-generic/page.h" 2
+# 1 "include/asm-generic/getorder.h" 1
+# 9 "include/asm-generic/getorder.h"
+static inline __attribute__((always_inline)) __attribute__((__const__)) int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size - 1) >> (12 - 1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+# 98 "include/asm-generic/page.h" 2
+# 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/page.h" 2
+# 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/thread_info.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/entry.h" 1
+# 10 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/entry.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/setup.h" 1
+# 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/entry.h" 2
+# 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/thread_info.h" 2
+# 31 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/thread_info.h"
+typedef unsigned long mm_segment_t;
+
+
+
+
+
+
+struct thread_info {
+ struct task_struct *task;
+ struct exec_domain *exec_domain;
+ unsigned long flags;
+ int cpu;
+ int preempt_count;
+ mm_segment_t addr_limit;
+ struct restart_block restart_block;
+
+ struct l1_scratch_task_info l1_task_info;
+
+};
+# 72 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/thread_info.h"
+__attribute__((__const__))
+static inline __attribute__((always_inline)) struct thread_info *current_thread_info(void)
+{
+ struct thread_info *ti;
+ __asm__("%0 = sp;" : "=da"(ti) :
+ );
+ return (struct thread_info *)((long)ti & ~((long)8192 -1));
+}
+# 57 "include/linux/thread_info.h" 2
+# 65 "include/linux/thread_info.h"
+static inline __attribute__((always_inline)) void set_ti_thread_flag(struct thread_info *ti, int flag)
+{
+ set_bit(flag, (unsigned long *)&ti->flags);
+}
+
+static inline __attribute__((always_inline)) void clear_ti_thread_flag(struct thread_info *ti, int flag)
+{
+ clear_bit(flag, (unsigned long *)&ti->flags);
+}
+
+static inline __attribute__((always_inline)) int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
+{
+ return test_and_set_bit(flag, (unsigned long *)&ti->flags);
+}
+
+static inline __attribute__((always_inline)) int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
+{
+ return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
+}
+
+static inline __attribute__((always_inline)) int test_ti_thread_flag(struct thread_info *ti, int flag)
+{
+ return test_bit(flag, (unsigned long *)&ti->flags);
+}
+# 122 "include/linux/thread_info.h"
+static inline __attribute__((always_inline)) void set_restore_sigmask(void)
+{
+ set_ti_thread_flag(current_thread_info(), 5);
+ set_ti_thread_flag(current_thread_info(), 1);
+}
+# 10 "include/linux/preempt.h" 2
+# 51 "include/linux/spinlock.h" 2
+
+
+
+
+# 1 "include/linux/stringify.h" 1
+# 56 "include/linux/spinlock.h" 2
+# 1 "include/linux/bottom_half.h" 1
+
+
+
+extern void local_bh_disable(void);
+extern void _local_bh_enable(void);
+extern void local_bh_enable(void);
+extern void local_bh_enable_ip(unsigned long ip);
+# 57 "include/linux/spinlock.h" 2
+# 80 "include/linux/spinlock.h"
+# 1 "include/linux/spinlock_types.h" 1
+# 15 "include/linux/spinlock_types.h"
+# 1 "include/linux/spinlock_types_up.h" 1
+# 17 "include/linux/spinlock_types_up.h"
+typedef struct {
+ volatile unsigned int slock;
+} raw_spinlock_t;
+# 31 "include/linux/spinlock_types_up.h"
+typedef struct {
+
+} raw_rwlock_t;
+# 16 "include/linux/spinlock_types.h" 2
+
+
+# 1 "include/linux/lockdep.h" 1
+# 12 "include/linux/lockdep.h"
+struct task_struct;
+struct lockdep_map;
+
+
+
+
+
+# 1 "include/linux/debug_locks.h" 1
+
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/atomic.h" 1
+# 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/atomic.h"
+# 1 "include/asm-generic/atomic.h" 1
+# 58 "include/asm-generic/atomic.h"
+static inline __attribute__((always_inline)) int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned long flags;
+ int temp;
+
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { (flags) = __raw_local_irq_save(); } while (0); do { } while (0); } while (0);
+ temp = v->counter;
+ temp += i;
+ v->counter = temp;
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if ((((flags) & ~0x3f) == 0)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0);
+
+ return temp;
+}
+# 80 "include/asm-generic/atomic.h"
+static inline __attribute__((always_inline)) int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned long flags;
+ int temp;
+
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { (flags) = __raw_local_irq_save(); } while (0); do { } while (0); } while (0);
+ temp = v->counter;
+ temp -= i;
+ v->counter = temp;
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if ((((flags) & ~0x3f) == 0)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0);
+
+ return temp;
+}
+
+static inline __attribute__((always_inline)) int atomic_add_negative(int i, atomic_t *v)
+{
+ return atomic_add_return(i, v) < 0;
+}
+
+static inline __attribute__((always_inline)) void atomic_add(int i, atomic_t *v)
+{
+ atomic_add_return(i, v);
+}
+
+static inline __attribute__((always_inline)) void atomic_sub(int i, atomic_t *v)
+{
+ atomic_sub_return(i, v);
+}
+
+static inline __attribute__((always_inline)) void atomic_inc(atomic_t *v)
+{
+ atomic_add_return(1, v);
+}
+
+static inline __attribute__((always_inline)) void atomic_dec(atomic_t *v)
+{
+ atomic_sub_return(1, v);
+}
+# 137 "include/asm-generic/atomic.h"
+static inline __attribute__((always_inline)) void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+ unsigned long flags;
+
+ mask = ~mask;
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { (flags) = __raw_local_irq_save(); } while (0); do { } while (0); } while (0);
+ *addr &= mask;
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if ((((flags) & ~0x3f) == 0)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0);
+}
+# 162 "include/asm-generic/atomic.h"
+# 1 "include/asm-generic/atomic-long.h" 1
+# 11 "include/asm-generic/atomic-long.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" 1
+# 12 "include/asm-generic/atomic-long.h" 2
+# 141 "include/asm-generic/atomic-long.h"
+typedef atomic_t atomic_long_t;
+
+
+static inline __attribute__((always_inline)) long atomic_long_read(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)((v)->counter);
+}
+
+static inline __attribute__((always_inline)) void atomic_long_set(atomic_long_t *l, long i)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ (((v)->counter) = (i));
+}
+
+static inline __attribute__((always_inline)) void atomic_long_inc(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_inc(v);
+}
+
+static inline __attribute__((always_inline)) void atomic_long_dec(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_dec(v);
+}
+
+static inline __attribute__((always_inline)) void atomic_long_add(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_add(i, v);
+}
+
+static inline __attribute__((always_inline)) void atomic_long_sub(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_sub(i, v);
+}
+
+static inline __attribute__((always_inline)) int atomic_long_sub_and_test(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (atomic_sub_return((i), (v)) == 0);
+}
+
+static inline __attribute__((always_inline)) int atomic_long_dec_and_test(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (atomic_sub_return(1, (v)) == 0);
+}
+
+static inline __attribute__((always_inline)) int atomic_long_inc_and_test(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (atomic_add_return(1, (v)) == 0);
+}
+
+static inline __attribute__((always_inline)) int atomic_long_add_negative(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_add_negative(i, v);
+}
+
+static inline __attribute__((always_inline)) long atomic_long_add_return(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_add_return(i, v);
+}
+
+static inline __attribute__((always_inline)) long atomic_long_sub_return(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_sub_return(i, v);
+}
+
+static inline __attribute__((always_inline)) long atomic_long_inc_return(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_add_return(1, (v));
+}
+
+static inline __attribute__((always_inline)) long atomic_long_dec_return(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_sub_return(1, (v));
+}
+
+static inline __attribute__((always_inline)) long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)({ int c, old; c = ((v)->counter); while (c != (u) && (old = (((__typeof__(*((&(((v))->counter)))))__cmpxchg_local_generic(((&(((v))->counter))), (unsigned long)(((c))), (unsigned long)(((c + (a)))), sizeof(*((&(((v))->counter)))))))) != c) c = old; c != (u); });
+}
+# 163 "include/asm-generic/atomic.h" 2
+# 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/atomic.h" 2
+# 6 "include/linux/debug_locks.h" 2
+
+
+struct task_struct;
+
+extern int debug_locks;
+extern int debug_locks_silent;
+
+
+static inline __attribute__((always_inline)) int __debug_locks_off(void)
+{
+ return ((__typeof__(*(&debug_locks)))__xchg((unsigned long)(0), (&debug_locks), sizeof(*(&debug_locks))));
+}
+
+
+
+
+extern int debug_locks_off(void);
+# 43 "include/linux/debug_locks.h"
+ extern void locking_selftest(void);
+
+
+
+
+struct task_struct;
+
+
+extern void debug_show_all_locks(void);
+extern void __debug_show_held_locks(struct task_struct *task);
+extern void debug_show_held_locks(struct task_struct *task);
+extern void debug_check_no_locks_freed(const void *from, unsigned long len);
+extern void debug_check_no_locks_held(struct task_struct *task);
+# 20 "include/linux/lockdep.h" 2
+# 1 "include/linux/stacktrace.h" 1
+
+
+
+struct task_struct;
+
+
+struct task_struct;
+
+struct stack_trace {
+ unsigned int nr_entries, max_entries;
+ unsigned long *entries;
+ int skip;
+};
+
+extern void save_stack_trace(struct stack_trace *trace);
+extern void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp);
+extern void save_stack_trace_tsk(struct task_struct *tsk,
+ struct stack_trace *trace);
+
+extern void print_stack_trace(struct stack_trace *trace, int spaces);
+# 21 "include/linux/lockdep.h" 2
+# 35 "include/linux/lockdep.h"
+struct lockdep_subclass_key {
+ char __one_byte;
+} __attribute__ ((__packed__));
+
+struct lock_class_key {
+ struct lockdep_subclass_key subkeys[8UL];
+};
+
+
+
+
+
+
+struct lock_class {
+
+
+
+ struct list_head hash_entry;
+
+
+
+
+ struct list_head lock_entry;
+
+ struct lockdep_subclass_key *key;
+ unsigned int subclass;
+ unsigned int dep_gen_id;
+
+
+
+
+ unsigned long usage_mask;
+ struct stack_trace usage_traces[(1+3*4)];
+
+
+
+
+
+
+ struct list_head locks_after, locks_before;
+
+
+
+
+
+ unsigned int version;
+
+
+
+
+ unsigned long ops;
+
+ const char *name;
+ int name_version;
+
+
+
+
+
+};
+# 133 "include/linux/lockdep.h"
+struct lockdep_map {
+ struct lock_class_key *key;
+ struct lock_class *class_cache;
+ const char *name;
+
+
+
+
+};
+
+
+
+
+
+struct lock_list {
+ struct list_head entry;
+ struct lock_class *class;
+ struct stack_trace trace;
+ int distance;
+
+
+
+
+
+ struct lock_list *parent;
+};
+
+
+
+
+struct lock_chain {
+ u8 irq_context;
+ u8 depth;
+ u16 base;
+ struct list_head entry;
+ u64 chain_key;
+};
+# 179 "include/linux/lockdep.h"
+struct held_lock {
+# 194 "include/linux/lockdep.h"
+ u64 prev_chain_key;
+ unsigned long acquire_ip;
+ struct lockdep_map *instance;
+ struct lockdep_map *nest_lock;
+
+
+
+
+ unsigned int class_idx:13;
+# 216 "include/linux/lockdep.h"
+ unsigned int irq_context:2;
+ unsigned int trylock:1;
+
+ unsigned int read:2;
+ unsigned int check:2;
+ unsigned int hardirqs_off:1;
+ unsigned int references:11;
+};
+
+
+
+
+extern void lockdep_init(void);
+extern void lockdep_info(void);
+extern void lockdep_reset(void);
+extern void lockdep_reset_lock(struct lockdep_map *lock);
+extern void lockdep_free_key_range(void *start, unsigned long size);
+extern void lockdep_sys_exit(void);
+
+extern void lockdep_off(void);
+extern void lockdep_on(void);
+
+
+
+
+
+
+
+extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass);
+# 274 "include/linux/lockdep.h"
+static inline __attribute__((always_inline)) int lockdep_match_key(struct lockdep_map *lock,
+ struct lock_class_key *key)
+{
+ return lock->key == key;
+}
+# 295 "include/linux/lockdep.h"
+extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
+ int trylock, int read, int check,
+ struct lockdep_map *nest_lock, unsigned long ip);
+
+extern void lock_release(struct lockdep_map *lock, int nested,
+ unsigned long ip);
+
+
+
+extern int lock_is_held(struct lockdep_map *lock);
+
+extern void lock_set_class(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, unsigned int subclass,
+ unsigned long ip);
+
+static inline __attribute__((always_inline)) void lock_set_subclass(struct lockdep_map *lock,
+ unsigned int subclass, unsigned long ip)
+{
+ lock_set_class(lock, lock->name, lock->key, subclass, ip);
+}
+
+extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
+extern void lockdep_clear_current_reclaim_state(void);
+extern void lockdep_trace_alloc(gfp_t mask);
+# 416 "include/linux/lockdep.h"
+extern void early_init_irq_lock_class(void);
+# 428 "include/linux/lockdep.h"
+static inline __attribute__((always_inline)) void early_boot_irqs_off(void)
+{
+}
+static inline __attribute__((always_inline)) void early_boot_irqs_on(void)
+{
+}
+static inline __attribute__((always_inline)) void print_irqtrace_events(struct task_struct *curr)
+{
+}
+# 19 "include/linux/spinlock_types.h" 2
+
+typedef struct {
+ raw_spinlock_t raw_lock;
+
+
+
+
+ unsigned int magic, owner_cpu;
+ void *owner;
+
+
+ struct lockdep_map dep_map;
+
+} spinlock_t;
+
+
+
+typedef struct {
+ raw_rwlock_t raw_lock;
+
+
+
+
+ unsigned int magic, owner_cpu;
+ void *owner;
+
+
+ struct lockdep_map dep_map;
+
+} rwlock_t;
+# 81 "include/linux/spinlock.h" 2
+
+extern int __attribute__((section(".spinlock.text"))) generic__raw_read_trylock(raw_rwlock_t *lock);
+
+
+
+
+
+
+
+# 1 "include/linux/spinlock_up.h" 1
+# 23 "include/linux/spinlock_up.h"
+static inline __attribute__((always_inline)) void __raw_spin_lock(raw_spinlock_t *lock)
+{
+ lock->slock = 0;
+}
+
+static inline __attribute__((always_inline)) void
+__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+{
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { (flags) = __raw_local_irq_save(); } while (0); do { } while (0); } while (0);
+ lock->slock = 0;
+}
+
+static inline __attribute__((always_inline)) int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+ char oldval = lock->slock;
+
+ lock->slock = 0;
+
+ return oldval > 0;
+}
+
+static inline __attribute__((always_inline)) void __raw_spin_unlock(raw_spinlock_t *lock)
+{
+ lock->slock = 1;
+}
+# 91 "include/linux/spinlock.h" 2
+
+
+
+ extern void __spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# 109 "include/linux/spinlock.h"
+ extern void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# 137 "include/linux/spinlock.h"
+static inline __attribute__((always_inline)) void smp_mb__after_lock(void) { __asm__ __volatile__("": : :"memory"); }
+# 147 "include/linux/spinlock.h"
+ extern void _raw_spin_lock(spinlock_t *lock);
+
+ extern int _raw_spin_trylock(spinlock_t *lock);
+ extern void _raw_spin_unlock(spinlock_t *lock);
+ extern void _raw_read_lock(rwlock_t *lock);
+
+ extern int _raw_read_trylock(rwlock_t *lock);
+ extern void _raw_read_unlock(rwlock_t *lock);
+ extern void _raw_write_lock(rwlock_t *lock);
+
+ extern int _raw_write_trylock(rwlock_t *lock);
+ extern void _raw_write_unlock(rwlock_t *lock);
+# 330 "include/linux/spinlock.h"
+extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
+# 344 "include/linux/spinlock.h"
+# 1 "include/linux/spinlock_api_smp.h" 1
+# 18 "include/linux/spinlock_api_smp.h"
+int in_lock_functions(unsigned long addr);
+
+
+
+void __attribute__((section(".spinlock.text"))) _spin_lock(spinlock_t *lock) ;
+void __attribute__((section(".spinlock.text"))) _spin_lock_nested(spinlock_t *lock, int subclass)
+ ;
+void __attribute__((section(".spinlock.text"))) _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
+ ;
+void __attribute__((section(".spinlock.text"))) _read_lock(rwlock_t *lock) ;
+void __attribute__((section(".spinlock.text"))) _write_lock(rwlock_t *lock) ;
+void __attribute__((section(".spinlock.text"))) _spin_lock_bh(spinlock_t *lock) ;
+void __attribute__((section(".spinlock.text"))) _read_lock_bh(rwlock_t *lock) ;
+void __attribute__((section(".spinlock.text"))) _write_lock_bh(rwlock_t *lock) ;
+void __attribute__((section(".spinlock.text"))) _spin_lock_irq(spinlock_t *lock) ;
+void __attribute__((section(".spinlock.text"))) _read_lock_irq(rwlock_t *lock) ;
+void __attribute__((section(".spinlock.text"))) _write_lock_irq(rwlock_t *lock) ;
+unsigned long __attribute__((section(".spinlock.text"))) _spin_lock_irqsave(spinlock_t *lock)
+ ;
+unsigned long __attribute__((section(".spinlock.text"))) _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
+ ;
+unsigned long __attribute__((section(".spinlock.text"))) _read_lock_irqsave(rwlock_t *lock)
+ ;
+unsigned long __attribute__((section(".spinlock.text"))) _write_lock_irqsave(rwlock_t *lock)
+ ;
+int __attribute__((section(".spinlock.text"))) _spin_trylock(spinlock_t *lock);
+int __attribute__((section(".spinlock.text"))) _read_trylock(rwlock_t *lock);
+int __attribute__((section(".spinlock.text"))) _write_trylock(rwlock_t *lock);
+int __attribute__((section(".spinlock.text"))) _spin_trylock_bh(spinlock_t *lock);
+void __attribute__((section(".spinlock.text"))) _spin_unlock(spinlock_t *lock) ;
+void __attribute__((section(".spinlock.text"))) _read_unlock(rwlock_t *lock) ;
+void __attribute__((section(".spinlock.text"))) _write_unlock(rwlock_t *lock) ;
+void __attribute__((section(".spinlock.text"))) _spin_unlock_bh(spinlock_t *lock) ;
+void __attribute__((section(".spinlock.text"))) _read_unlock_bh(rwlock_t *lock) ;
+void __attribute__((section(".spinlock.text"))) _write_unlock_bh(rwlock_t *lock) ;
+void __attribute__((section(".spinlock.text"))) _spin_unlock_irq(spinlock_t *lock) ;
+void __attribute__((section(".spinlock.text"))) _read_unlock_irq(rwlock_t *lock) ;
+void __attribute__((section(".spinlock.text"))) _write_unlock_irq(rwlock_t *lock) ;
+void __attribute__((section(".spinlock.text"))) _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+ ;
+void __attribute__((section(".spinlock.text"))) _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ ;
+void __attribute__((section(".spinlock.text"))) _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ ;
+# 194 "include/linux/spinlock_api_smp.h"
+static inline __attribute__((always_inline)) int __spin_trylock(spinlock_t *lock)
+{
+ do { } while (0);
+ if (_raw_spin_trylock(lock)) {
+ lock_acquire(&lock->dep_map, 0, 1, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
+ return 1;
+ }
+ do { } while (0);
+ return 0;
+}
+
+static inline __attribute__((always_inline)) int __read_trylock(rwlock_t *lock)
+{
+ do { } while (0);
+ if (_raw_read_trylock(lock)) {
+ lock_acquire(&lock->dep_map, 0, 1, 2, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
+ return 1;
+ }
+ do { } while (0);
+ return 0;
+}
+
+static inline __attribute__((always_inline)) int __write_trylock(rwlock_t *lock)
+{
+ do { } while (0);
+ if (_raw_write_trylock(lock)) {
+ lock_acquire(&lock->dep_map, 0, 1, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
+ return 1;
+ }
+ do { } while (0);
+ return 0;
+}
+# 234 "include/linux/spinlock_api_smp.h"
+static inline __attribute__((always_inline)) void __read_lock(rwlock_t *lock)
+{
+ do { } while (0);
+ lock_acquire(&lock->dep_map, 0, 0, 2, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
+ _raw_read_lock(lock);
+}
+
+static inline __attribute__((always_inline)) unsigned long __spin_lock_irqsave(spinlock_t *lock)
+{
+ unsigned long flags;
+
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { (flags) = __raw_local_irq_save(); } while (0); do { } while (0); } while (0);
+ do { } while (0);
+ lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
+
+
+
+
+
+
+ _raw_spin_lock(lock);
+
+
+
+ return flags;
+}
+
+static inline __attribute__((always_inline)) void __spin_lock_irq(spinlock_t *lock)
+{
+ do { raw_local_irq_disable(); do { } while (0); } while (0);
+ do { } while (0);
+ lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
+ _raw_spin_lock(lock);
+}
+
+static inline __attribute__((always_inline)) void __spin_lock_bh(spinlock_t *lock)
+{
+ local_bh_disable();
+ do { } while (0);
+ lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
+ _raw_spin_lock(lock);
+}
+
+static inline __attribute__((always_inline)) unsigned long __read_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { (flags) = __raw_local_irq_save(); } while (0); do { } while (0); } while (0);
+ do { } while (0);
+ lock_acquire(&lock->dep_map, 0, 0, 2, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
+ (_raw_read_lock)((lock));
+
+ return flags;
+}
+
+static inline __attribute__((always_inline)) void __read_lock_irq(rwlock_t *lock)
+{
+ do { raw_local_irq_disable(); do { } while (0); } while (0);
+ do { } while (0);
+ lock_acquire(&lock->dep_map, 0, 0, 2, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
+ _raw_read_lock(lock);
+}
+
+static inline __attribute__((always_inline)) void __read_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ do { } while (0);
+ lock_acquire(&lock->dep_map, 0, 0, 2, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
+ _raw_read_lock(lock);
+}
+
+static inline __attribute__((always_inline)) unsigned long __write_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { (flags) = __raw_local_irq_save(); } while (0); do { } while (0); } while (0);
+ do { } while (0);
+ lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
+ (_raw_write_lock)((lock));
+
+ return flags;
+}
+
+static inline __attribute__((always_inline)) void __write_lock_irq(rwlock_t *lock)
+{
+ do { raw_local_irq_disable(); do { } while (0); } while (0);
+ do { } while (0);
+ lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
+ _raw_write_lock(lock);
+}
+
+static inline __attribute__((always_inline)) void __write_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ do { } while (0);
+ lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
+ _raw_write_lock(lock);
+}
+
+static inline __attribute__((always_inline)) void __spin_lock(spinlock_t *lock)
+{
+ do { } while (0);
+ lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
+ _raw_spin_lock(lock);
+}
+
+static inline __attribute__((always_inline)) void __write_lock(rwlock_t *lock)
+{
+ do { } while (0);
+ lock_acquire(&lock->dep_map, 0, 0, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
+ _raw_write_lock(lock);
+}
+
+
+
+static inline __attribute__((always_inline)) void __spin_unlock(spinlock_t *lock)
+{
+ lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
+ _raw_spin_unlock(lock);
+ do { } while (0);
+}
+
+static inline __attribute__((always_inline)) void __write_unlock(rwlock_t *lock)
+{
+ lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
+ _raw_write_unlock(lock);
+ do { } while (0);
+}
+
+static inline __attribute__((always_inline)) void __read_unlock(rwlock_t *lock)
+{
+ lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
+ _raw_read_unlock(lock);
+ do { } while (0);
+}
+
+static inline __attribute__((always_inline)) void __spin_unlock_irqrestore(spinlock_t *lock,
+ unsigned long flags)
+{
+ lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
+ _raw_spin_unlock(lock);
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if ((((flags) & ~0x3f) == 0)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0);
+ do { } while (0);
+}
+
+static inline __attribute__((always_inline)) void __spin_unlock_irq(spinlock_t *lock)
+{
+ lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
+ _raw_spin_unlock(lock);
+ do { do { } while (0); raw_local_irq_enable(); } while (0);
+ do { } while (0);
+}
+
+static inline __attribute__((always_inline)) void __spin_unlock_bh(spinlock_t *lock)
+{
+ lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
+ _raw_spin_unlock(lock);
+ do { } while (0);
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+static inline __attribute__((always_inline)) void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
+ _raw_read_unlock(lock);
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if ((((flags) & ~0x3f) == 0)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0);
+ do { } while (0);
+}
+
+static inline __attribute__((always_inline)) void __read_unlock_irq(rwlock_t *lock)
+{
+ lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
+ _raw_read_unlock(lock);
+ do { do { } while (0); raw_local_irq_enable(); } while (0);
+ do { } while (0);
+}
+
+static inline __attribute__((always_inline)) void __read_unlock_bh(rwlock_t *lock)
+{
+ lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
+ _raw_read_unlock(lock);
+ do { } while (0);
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+static inline __attribute__((always_inline)) void __write_unlock_irqrestore(rwlock_t *lock,
+ unsigned long flags)
+{
+ lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
+ _raw_write_unlock(lock);
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if ((((flags) & ~0x3f) == 0)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0);
+ do { } while (0);
+}
+
+static inline __attribute__((always_inline)) void __write_unlock_irq(rwlock_t *lock)
+{
+ lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
+ _raw_write_unlock(lock);
+ do { do { } while (0); raw_local_irq_enable(); } while (0);
+ do { } while (0);
+}
+
+static inline __attribute__((always_inline)) void __write_unlock_bh(rwlock_t *lock)
+{
+ lock_release(&lock->dep_map, 1, (unsigned long)__builtin_return_address(0));
+ _raw_write_unlock(lock);
+ do { } while (0);
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+static inline __attribute__((always_inline)) int __spin_trylock_bh(spinlock_t *lock)
+{
+ local_bh_disable();
+ do { } while (0);
+ if (_raw_spin_trylock(lock)) {
+ lock_acquire(&lock->dep_map, 0, 1, 0, 1, ((void *)0), (unsigned long)__builtin_return_address(0));
+ return 1;
+ }
+ do { } while (0);
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ return 0;
+}
+# 345 "include/linux/spinlock.h" 2
+# 30 "include/linux/seqlock.h" 2
+
+
+typedef struct {
+ unsigned sequence;
+ spinlock_t lock;
+} seqlock_t;
+# 60 "include/linux/seqlock.h"
+static inline __attribute__((always_inline)) void write_seqlock(seqlock_t *sl)
+{
+ _spin_lock(&sl->lock);
+ ++sl->sequence;
+ __asm__ __volatile__("": : :"memory");
+}
+
+static inline __attribute__((always_inline)) void write_sequnlock(seqlock_t *sl)
+{
+ __asm__ __volatile__("": : :"memory");
+ sl->sequence++;
+ _spin_unlock(&sl->lock);
+}
+
+static inline __attribute__((always_inline)) int write_tryseqlock(seqlock_t *sl)
+{
+ int ret = (_spin_trylock(&sl->lock));
+
+ if (ret) {
+ ++sl->sequence;
+ __asm__ __volatile__("": : :"memory");
+ }
+ return ret;
+}
+
+
+static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned read_seqbegin(const seqlock_t *sl)
+{
+ unsigned ret;
+
+repeat:
+ ret = sl->sequence;
+ __asm__ __volatile__("": : :"memory");
+ if (__builtin_expect(!!(ret & 1), 0)) {
+ __asm__ __volatile__("": : :"memory");
+ goto repeat;
+ }
+
+ return ret;
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) __attribute__((always_inline)) int read_seqretry(const seqlock_t *sl, unsigned start)
+{
+ __asm__ __volatile__("": : :"memory");
+
+ return (sl->sequence != start);
+}
+# 121 "include/linux/seqlock.h"
+typedef struct seqcount {
+ unsigned sequence;
+} seqcount_t;
+
+
+
+
+
+static inline __attribute__((always_inline)) unsigned read_seqcount_begin(const seqcount_t *s)
+{
+ unsigned ret;
+
+repeat:
+ ret = s->sequence;
+ __asm__ __volatile__("": : :"memory");
+ if (__builtin_expect(!!(ret & 1), 0)) {
+ __asm__ __volatile__("": : :"memory");
+ goto repeat;
+ }
+ return ret;
+}
+
+
+
+
+static inline __attribute__((always_inline)) int read_seqcount_retry(const seqcount_t *s, unsigned start)
+{
+ __asm__ __volatile__("": : :"memory");
+
+ return s->sequence != start;
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void write_seqcount_begin(seqcount_t *s)
+{
+ s->sequence++;
+ __asm__ __volatile__("": : :"memory");
+}
+
+static inline __attribute__((always_inline)) void write_seqcount_end(seqcount_t *s)
+{
+ __asm__ __volatile__("": : :"memory");
+ s->sequence++;
+}
+# 9 "include/linux/time.h" 2
+# 1 "include/linux/math64.h" 1
+
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/div64.h" 1
+# 6 "include/linux/math64.h" 2
+# 41 "include/linux/math64.h"
+static inline __attribute__((always_inline)) u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+{
+ *remainder = ({ uint32_t __base = (divisor); uint32_t __rem; (void)(((typeof((dividend)) *)0) == ((uint64_t *)0)); if (__builtin_expect(!!(((dividend) >> 32) == 0), 1)) { __rem = (uint32_t)(dividend) % __base; (dividend) = (uint32_t)(dividend) / __base; } else __rem = __div64_32(&(dividend), __base); __rem; });
+ return dividend;
+}
+
+
+
+extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
+
+
+
+extern u64 div64_u64(u64 dividend, u64 divisor);
+# 66 "include/linux/math64.h"
+static inline __attribute__((always_inline)) u64 div_u64(u64 dividend, u32 divisor)
+{
+ u32 remainder;
+ return div_u64_rem(dividend, divisor, &remainder);
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) s64 div_s64(s64 dividend, s32 divisor)
+{
+ s32 remainder;
+ return div_s64_rem(dividend, divisor, &remainder);
+}
+
+
+u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
+
+static inline __attribute__((always_inline)) __attribute__((always_inline)) u32
+__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
+{
+ u32 ret = 0;
+
+ while (dividend >= divisor) {
+
+
+ asm("" : "+rm"(dividend));
+
+ dividend -= divisor;
+ ret++;
+ }
+
+ *remainder = dividend;
+
+ return ret;
+}
+# 10 "include/linux/time.h" 2
+
+
+
+
+struct timespec {
+ __kernel_time_t tv_sec;
+ long tv_nsec;
+};
+
+
+struct timeval {
+ __kernel_time_t tv_sec;
+ __kernel_suseconds_t tv_usec;
+};
+
+struct timezone {
+ int tz_minuteswest;
+ int tz_dsttime;
+};
+
+
+
+extern struct timezone sys_tz;
+# 45 "include/linux/time.h"
+static inline __attribute__((always_inline)) int timespec_equal(const struct timespec *a,
+ const struct timespec *b)
+{
+ return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int timespec_compare(const struct timespec *lhs, const struct timespec *rhs)
+{
+ if (lhs->tv_sec < rhs->tv_sec)
+ return -1;
+ if (lhs->tv_sec > rhs->tv_sec)
+ return 1;
+ return lhs->tv_nsec - rhs->tv_nsec;
+}
+
+static inline __attribute__((always_inline)) int timeval_compare(const struct timeval *lhs, const struct timeval *rhs)
+{
+ if (lhs->tv_sec < rhs->tv_sec)
+ return -1;
+ if (lhs->tv_sec > rhs->tv_sec)
+ return 1;
+ return lhs->tv_usec - rhs->tv_usec;
+}
+
+extern unsigned long mktime(const unsigned int year, const unsigned int mon,
+ const unsigned int day, const unsigned int hour,
+ const unsigned int min, const unsigned int sec);
+
+extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec);
+extern struct timespec timespec_add_safe(const struct timespec lhs,
+ const struct timespec rhs);
+
+
+
+
+static inline __attribute__((always_inline)) struct timespec timespec_sub(struct timespec lhs,
+ struct timespec rhs)
+{
+ struct timespec ts_delta;
+ set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec,
+ lhs.tv_nsec - rhs.tv_nsec);
+ return ts_delta;
+}
+
+
+
+
+
+
+
+extern struct timespec xtime;
+extern struct timespec wall_to_monotonic;
+extern seqlock_t xtime_lock;
+
+extern void read_persistent_clock(struct timespec *ts);
+extern void read_boot_clock(struct timespec *ts);
+extern int update_persistent_clock(struct timespec now);
+extern int no_sync_cmos_clock ;
+void timekeeping_init(void);
+extern int timekeeping_suspended;
+
+unsigned long get_seconds(void);
+struct timespec current_kernel_time(void);
+struct timespec __current_kernel_time(void);
+struct timespec get_monotonic_coarse(void);
+# 131 "include/linux/time.h"
+static inline __attribute__((always_inline)) u32 arch_gettimeoffset(void) { return 0; }
+
+
+extern void do_gettimeofday(struct timeval *tv);
+extern int do_settimeofday(struct timespec *tv);
+extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz);
+
+extern long do_utimes(int dfd, char *filename, struct timespec *times, int flags);
+struct itimerval;
+extern int do_setitimer(int which, struct itimerval *value,
+ struct itimerval *ovalue);
+extern unsigned int alarm_setitimer(unsigned int seconds);
+extern int do_getitimer(int which, struct itimerval *value);
+extern void getnstimeofday(struct timespec *tv);
+extern void getrawmonotonic(struct timespec *ts);
+extern void getboottime(struct timespec *ts);
+extern void monotonic_to_bootbased(struct timespec *ts);
+
+extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
+extern int timekeeping_valid_for_hres(void);
+extern void update_wall_time(void);
+extern void update_xtime_cache(u64 nsec);
+extern void timekeeping_leap_insert(int leapsecond);
+
+struct tms;
+extern void do_sys_times(struct tms *);
+
+
+
+
+
+struct tm {
+
+
+
+
+ int tm_sec;
+
+ int tm_min;
+
+ int tm_hour;
+
+ int tm_mday;
+
+ int tm_mon;
+
+ long tm_year;
+
+ int tm_wday;
+
+ int tm_yday;
+};
+
+void time_to_tm(time_t totalsecs, int offset, struct tm *result);
+# 193 "include/linux/time.h"
+static inline __attribute__((always_inline)) s64 timespec_to_ns(const struct timespec *ts)
+{
+ return ((s64) ts->tv_sec * 1000000000L) + ts->tv_nsec;
+}
+# 205 "include/linux/time.h"
+static inline __attribute__((always_inline)) s64 timeval_to_ns(const struct timeval *tv)
+{
+ return ((s64) tv->tv_sec * 1000000000L) +
+ tv->tv_usec * 1000L;
+}
+
+
+
+
+
+
+
+extern struct timespec ns_to_timespec(const s64 nsec);
+
+
+
+
+
+
+
+extern struct timeval ns_to_timeval(const s64 nsec);
+# 235 "include/linux/time.h"
+static inline __attribute__((always_inline)) __attribute__((always_inline)) void timespec_add_ns(struct timespec *a, u64 ns)
+{
+ a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, 1000000000L, &ns);
+ a->tv_nsec = ns;
+}
+# 258 "include/linux/time.h"
+struct itimerspec {
+ struct timespec it_interval;
+ struct timespec it_value;
+};
+
+struct itimerval {
+ struct timeval it_interval;
+ struct timeval it_value;
+};
+# 61 "include/linux/stat.h" 2
+
+struct kstat {
+ u64 ino;
+ dev_t dev;
+ umode_t mode;
+ unsigned int nlink;
+ uid_t uid;
+ gid_t gid;
+ dev_t rdev;
+ loff_t size;
+ struct timespec atime;
+ struct timespec mtime;
+ struct timespec ctime;
+ unsigned long blksize;
+ unsigned long long blocks;
+};
+# 11 "include/linux/module.h" 2
+
+
+# 1 "include/linux/kmod.h" 1
+# 22 "include/linux/kmod.h"
+# 1 "include/linux/gfp.h" 1
+
+
+
+# 1 "include/linux/mmzone.h" 1
+# 9 "include/linux/mmzone.h"
+# 1 "include/linux/wait.h" 1
+# 26 "include/linux/wait.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/current.h" 1
+# 1 "include/asm-generic/current.h" 1
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/current.h" 2
+# 27 "include/linux/wait.h" 2
+
+typedef struct __wait_queue wait_queue_t;
+typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
+int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
+
+struct __wait_queue {
+ unsigned int flags;
+
+ void *private;
+ wait_queue_func_t func;
+ struct list_head task_list;
+};
+
+struct wait_bit_key {
+ void *flags;
+ int bit_nr;
+};
+
+struct wait_bit_queue {
+ struct wait_bit_key key;
+ wait_queue_t wait;
+};
+
+struct __wait_queue_head {
+ spinlock_t lock;
+ struct list_head task_list;
+};
+typedef struct __wait_queue_head wait_queue_head_t;
+
+struct task_struct;
+# 80 "include/linux/wait.h"
+extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *);
+# 98 "include/linux/wait.h"
+static inline __attribute__((always_inline)) void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
+{
+ q->flags = 0;
+ q->private = p;
+ q->func = default_wake_function;
+}
+
+static inline __attribute__((always_inline)) void init_waitqueue_func_entry(wait_queue_t *q,
+ wait_queue_func_t func)
+{
+ q->flags = 0;
+ q->private = ((void *)0);
+ q->func = func;
+}
+
+static inline __attribute__((always_inline)) int waitqueue_active(wait_queue_head_t *q)
+{
+ return !list_empty(&q->task_list);
+}
+
+extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
+extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
+extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
+
+static inline __attribute__((always_inline)) void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
+{
+ list_add(&new->task_list, &head->task_list);
+}
+
+
+
+
+static inline __attribute__((always_inline)) void __add_wait_queue_tail(wait_queue_head_t *head,
+ wait_queue_t *new)
+{
+ list_add_tail(&new->task_list, &head->task_list);
+}
+
+static inline __attribute__((always_inline)) void __remove_wait_queue(wait_queue_head_t *head,
+ wait_queue_t *old)
+{
+ list_del(&old->task_list);
+}
+
+void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
+void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
+void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
+ void *key);
+void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
+void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
+void __wake_up_bit(wait_queue_head_t *, void *, int);
+int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
+int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
+void wake_up_bit(void *, int);
+int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
+int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
+wait_queue_head_t *bit_waitqueue(void *, int);
+# 409 "include/linux/wait.h"
+static inline __attribute__((always_inline)) void add_wait_queue_exclusive_locked(wait_queue_head_t *q,
+ wait_queue_t * wait)
+{
+ wait->flags |= 0x01;
+ __add_wait_queue_tail(q, wait);
+}
+
+
+
+
+static inline __attribute__((always_inline)) void remove_wait_queue_locked(wait_queue_head_t *q,
+ wait_queue_t * wait)
+{
+ __remove_wait_queue(q, wait);
+}
+
+
+
+
+
+
+extern void sleep_on(wait_queue_head_t *q);
+extern long sleep_on_timeout(wait_queue_head_t *q,
+ signed long timeout);
+extern void interruptible_sleep_on(wait_queue_head_t *q);
+extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
+ signed long timeout);
+
+
+
+
+void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
+void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
+void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
+void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
+ unsigned int mode, void *key);
+int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
+int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
+# 489 "include/linux/wait.h"
+static inline __attribute__((always_inline)) int wait_on_bit(void *word, int bit,
+ int (*action)(void *), unsigned mode)
+{
+ if (!test_bit(bit, word))
+ return 0;
+ return out_of_line_wait_on_bit(word, bit, action, mode);
+}
+# 513 "include/linux/wait.h"
+static inline __attribute__((always_inline)) int wait_on_bit_lock(void *word, int bit,
+ int (*action)(void *), unsigned mode)
+{
+ if (!test_and_set_bit(bit, word))
+ return 0;
+ return out_of_line_wait_on_bit_lock(word, bit, action, mode);
+}
+# 10 "include/linux/mmzone.h" 2
+
+
+# 1 "include/linux/threads.h" 1
+# 13 "include/linux/mmzone.h" 2
+# 1 "include/linux/numa.h" 1
+# 14 "include/linux/mmzone.h" 2
+# 1 "include/linux/init.h" 1
+# 131 "include/linux/init.h"
+typedef int (*initcall_t)(void);
+typedef void (*exitcall_t)(void);
+
+extern initcall_t __con_initcall_start[], __con_initcall_end[];
+extern initcall_t __security_initcall_start[], __security_initcall_end[];
+
+
+typedef void (*ctor_fn_t)(void);
+
+
+extern int do_one_initcall(initcall_t fn);
+extern char __attribute__ ((__section__(".init.data"))) boot_command_line[];
+extern char *saved_command_line;
+extern unsigned int reset_devices;
+
+
+void setup_arch(char **);
+void prepare_namespace(void);
+
+extern void (*late_time_init)(void);
+# 15 "include/linux/mmzone.h" 2
+
+# 1 "include/linux/nodemask.h" 1
+# 95 "include/linux/nodemask.h"
+# 1 "include/linux/bitmap.h" 1
+
+
+
+
+
+
+
+# 1 "include/linux/string.h" 1
+# 15 "include/linux/string.h"
+extern char *strndup_user(const char *, long);
+extern void *memdup_user(const void *, size_t);
+
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/string.h" 1
+# 15 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/string.h"
+extern inline __attribute__((always_inline)) char *strcpy(char *dest, const char *src)
+{
+ char *xdest = dest;
+ char temp = 0;
+
+ __asm__ __volatile__ (
+ "1:"
+ "%2 = B [%1++] (Z);"
+ "B [%0++] = %2;"
+ "CC = %2;"
+ "if cc jump 1b (bp);"
+ : "+&a" (dest), "+&a" (src), "=&d" (temp)
+ :
+ : "memory", "CC");
+
+ return xdest;
+}
+
+
+extern inline __attribute__((always_inline)) char *strncpy(char *dest, const char *src, size_t n)
+{
+ char *xdest = dest;
+ char temp = 0;
+
+ if (n == 0)
+ return xdest;
+
+ __asm__ __volatile__ (
+ "1:"
+ "%3 = B [%1++] (Z);"
+ "B [%0++] = %3;"
+ "CC = %3;"
+ "if ! cc jump 2f;"
+ "%2 += -1;"
+ "CC = %2 == 0;"
+ "if ! cc jump 1b (bp);"
+ "jump 4f;"
+ "2:"
+
+ "%3 = 0;"
+ "3:"
+ "%2 += -1;"
+ "CC = %2 == 0;"
+ "if cc jump 4f;"
+ "B [%0++] = %3;"
+ "jump 3b;"
+ "4:"
+ : "+&a" (dest), "+&a" (src), "+&da" (n), "=&d" (temp)
+ :
+ : "memory", "CC");
+
+ return xdest;
+}
+
+
+extern inline __attribute__((always_inline)) int strcmp(const char *cs, const char *ct)
+{
+
+
+
+ int __res1, __res2;
+
+ __asm__ __volatile__ (
+ "1:"
+ "%2 = B[%0++] (Z);"
+ "%3 = B[%1++] (Z);"
+ "CC = %2 == %3;"
+ "if ! cc jump 2f;"
+ "CC = %2;"
+ "if cc jump 1b (bp);"
+ "jump.s 3f;"
+ "2:"
+ "%2 = %2 - %3;"
+ "3:"
+ : "+&a" (cs), "+&a" (ct), "=&d" (__res1), "=&d" (__res2)
+ :
+ : "memory", "CC");
+
+ return __res1;
+}
+
+
+extern inline __attribute__((always_inline)) int strncmp(const char *cs, const char *ct, size_t count)
+{
+
+
+
+ int __res1, __res2;
+
+ if (!count)
+ return 0;
+
+ __asm__ __volatile__ (
+ "1:"
+ "%3 = B[%0++] (Z);"
+ "%4 = B[%1++] (Z);"
+ "CC = %3 == %4;"
+ "if ! cc jump 3f;"
+ "CC = %3;"
+ "if ! cc jump 4f;"
+ "%2 += -1;"
+ "CC = %2 == 0;"
+ "if ! cc jump 1b;"
+ "2:"
+ "%3 = 0;"
+ "jump.s 4f;"
+ "3:"
+ "%3 = %3 - %4;"
+ "4:"
+ : "+&a" (cs), "+&a" (ct), "+&da" (count), "=&d" (__res1), "=&d" (__res2)
+ :
+ : "memory", "CC");
+
+ return __res1;
+}
+
+
+extern void *memset(void *s, int c, size_t count);
+
+extern void *memcpy(void *d, const void *s, size_t count);
+
+extern int memcmp(const void *, const void *, __kernel_size_t);
+
+extern void *memchr(const void *s, int c, size_t n);
+
+extern void *memmove(void *dest, const void *src, size_t count);
+# 22 "include/linux/string.h" 2
+# 30 "include/linux/string.h"
+size_t strlcpy(char *, const char *, size_t);
+
+
+extern char * strcat(char *, const char *);
+
+
+extern char * strncat(char *, const char *, __kernel_size_t);
+
+
+extern size_t strlcat(char *, const char *, __kernel_size_t);
+# 48 "include/linux/string.h"
+extern int strnicmp(const char *, const char *, __kernel_size_t);
+
+
+extern int strcasecmp(const char *s1, const char *s2);
+
+
+extern int strncasecmp(const char *s1, const char *s2, size_t n);
+
+
+extern char * strchr(const char *,int);
+
+
+extern char * strnchr(const char *, size_t, int);
+
+
+extern char * strrchr(const char *,int);
+
+extern char * __attribute__((warn_unused_result)) strstrip(char *);
+
+extern char * strstr(const char *,const char *);
+
+
+extern __kernel_size_t strlen(const char *);
+
+
+extern __kernel_size_t strnlen(const char *,__kernel_size_t);
+
+
+extern char * strpbrk(const char *,const char *);
+
+
+extern char * strsep(char **,const char *);
+
+
+extern __kernel_size_t strspn(const char *,const char *);
+
+
+extern __kernel_size_t strcspn(const char *,const char *);
+# 98 "include/linux/string.h"
+extern void * memscan(void *,int,__kernel_size_t);
+# 107 "include/linux/string.h"
+extern char *kstrdup(const char *s, gfp_t gfp);
+extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
+extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
+
+extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
+extern void argv_free(char **argv);
+
+extern bool sysfs_streq(const char *s1, const char *s2);
+
+
+int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
+int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
+int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __attribute__((format(printf,3,4)));
+
+
+extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
+ const void *from, size_t available);
+
+
+
+
+
+
+static inline __attribute__((always_inline)) bool strstarts(const char *str, const char *prefix)
+{
+ return strncmp(str, prefix, strlen(prefix)) == 0;
+}
+# 9 "include/linux/bitmap.h" 2
+# 87 "include/linux/bitmap.h"
+extern int __bitmap_empty(const unsigned long *bitmap, int bits);
+extern int __bitmap_full(const unsigned long *bitmap, int bits);
+extern int __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
+ int bits);
+extern void __bitmap_shift_right(unsigned long *dst,
+ const unsigned long *src, int shift, int bits);
+extern void __bitmap_shift_left(unsigned long *dst,
+ const unsigned long *src, int shift, int bits);
+extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+extern int __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+extern int __bitmap_subset(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits);
+extern int __bitmap_weight(const unsigned long *bitmap, int bits);
+
+extern int bitmap_scnprintf(char *buf, unsigned int len,
+ const unsigned long *src, int nbits);
+extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
+ unsigned long *dst, int nbits);
+extern int bitmap_parse_user(const char *ubuf, unsigned int ulen,
+ unsigned long *dst, int nbits);
+extern int bitmap_scnlistprintf(char *buf, unsigned int len,
+ const unsigned long *src, int nbits);
+extern int bitmap_parselist(const char *buf, unsigned long *maskp,
+ int nmaskbits);
+extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
+ const unsigned long *old, const unsigned long *new, int bits);
+extern int bitmap_bitremap(int oldbit,
+ const unsigned long *old, const unsigned long *new, int bits);
+extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
+ const unsigned long *relmap, int bits);
+extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
+ int sz, int bits);
+extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
+extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
+extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
+extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
+# 143 "include/linux/bitmap.h"
+static inline __attribute__((always_inline)) void bitmap_zero(unsigned long *dst, int nbits)
+{
+ if ((__builtin_constant_p(nbits) && (nbits) <= 32))
+ *dst = 0UL;
+ else {
+ int len = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(unsigned long);
+ memset(dst, 0, len);
+ }
+}
+
+static inline __attribute__((always_inline)) void bitmap_fill(unsigned long *dst, int nbits)
+{
+ size_t nlongs = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)));
+ if (!(__builtin_constant_p(nbits) && (nbits) <= 32)) {
+ int len = (nlongs - 1) * sizeof(unsigned long);
+ memset(dst, 0xff, len);
+ }
+ dst[nlongs - 1] = ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL );
+}
+
+static inline __attribute__((always_inline)) void bitmap_copy(unsigned long *dst, const unsigned long *src,
+ int nbits)
+{
+ if ((__builtin_constant_p(nbits) && (nbits) <= 32))
+ *dst = *src;
+ else {
+ int len = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(unsigned long);
+ memcpy(dst, src, len);
+ }
+}
+
+static inline __attribute__((always_inline)) int bitmap_and(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if ((__builtin_constant_p(nbits) && (nbits) <= 32))
+ return (*dst = *src1 & *src2) != 0;
+ return __bitmap_and(dst, src1, src2, nbits);
+}
+
+static inline __attribute__((always_inline)) void bitmap_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if ((__builtin_constant_p(nbits) && (nbits) <= 32))
+ *dst = *src1 | *src2;
+ else
+ __bitmap_or(dst, src1, src2, nbits);
+}
+
+static inline __attribute__((always_inline)) void bitmap_xor(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if ((__builtin_constant_p(nbits) && (nbits) <= 32))
+ *dst = *src1 ^ *src2;
+ else
+ __bitmap_xor(dst, src1, src2, nbits);
+}
+
+static inline __attribute__((always_inline)) int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if ((__builtin_constant_p(nbits) && (nbits) <= 32))
+ return (*dst = *src1 & ~(*src2)) != 0;
+ return __bitmap_andnot(dst, src1, src2, nbits);
+}
+
+static inline __attribute__((always_inline)) void bitmap_complement(unsigned long *dst, const unsigned long *src,
+ int nbits)
+{
+ if ((__builtin_constant_p(nbits) && (nbits) <= 32))
+ *dst = ~(*src) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL );
+ else
+ __bitmap_complement(dst, src, nbits);
+}
+
+static inline __attribute__((always_inline)) int bitmap_equal(const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if ((__builtin_constant_p(nbits) && (nbits) <= 32))
+ return ! ((*src1 ^ *src2) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
+ else
+ return __bitmap_equal(src1, src2, nbits);
+}
+
+static inline __attribute__((always_inline)) int bitmap_intersects(const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if ((__builtin_constant_p(nbits) && (nbits) <= 32))
+ return ((*src1 & *src2) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )) != 0;
+ else
+ return __bitmap_intersects(src1, src2, nbits);
+}
+
+static inline __attribute__((always_inline)) int bitmap_subset(const unsigned long *src1,
+ const unsigned long *src2, int nbits)
+{
+ if ((__builtin_constant_p(nbits) && (nbits) <= 32))
+ return ! ((*src1 & ~(*src2)) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
+ else
+ return __bitmap_subset(src1, src2, nbits);
+}
+
+static inline __attribute__((always_inline)) int bitmap_empty(const unsigned long *src, int nbits)
+{
+ if ((__builtin_constant_p(nbits) && (nbits) <= 32))
+ return ! (*src & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
+ else
+ return __bitmap_empty(src, nbits);
+}
+
+static inline __attribute__((always_inline)) int bitmap_full(const unsigned long *src, int nbits)
+{
+ if ((__builtin_constant_p(nbits) && (nbits) <= 32))
+ return ! (~(*src) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
+ else
+ return __bitmap_full(src, nbits);
+}
+
+static inline __attribute__((always_inline)) int bitmap_weight(const unsigned long *src, int nbits)
+{
+ if ((__builtin_constant_p(nbits) && (nbits) <= 32))
+ return hweight_long(*src & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
+ return __bitmap_weight(src, nbits);
+}
+
+static inline __attribute__((always_inline)) void bitmap_shift_right(unsigned long *dst,
+ const unsigned long *src, int n, int nbits)
+{
+ if ((__builtin_constant_p(nbits) && (nbits) <= 32))
+ *dst = *src >> n;
+ else
+ __bitmap_shift_right(dst, src, n, nbits);
+}
+
+static inline __attribute__((always_inline)) void bitmap_shift_left(unsigned long *dst,
+ const unsigned long *src, int n, int nbits)
+{
+ if ((__builtin_constant_p(nbits) && (nbits) <= 32))
+ *dst = (*src << n) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL );
+ else
+ __bitmap_shift_left(dst, src, n, nbits);
+}
+
+static inline __attribute__((always_inline)) int bitmap_parse(const char *buf, unsigned int buflen,
+ unsigned long *maskp, int nmaskbits)
+{
+ return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
+}
+# 96 "include/linux/nodemask.h" 2
+
+
+typedef struct { unsigned long bits[((((1 << 0)) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; } nodemask_t;
+extern nodemask_t _unused_nodemask_arg_;
+
+
+static inline __attribute__((always_inline)) void __node_set(int node, volatile nodemask_t *dstp)
+{
+ set_bit(node, dstp->bits);
+}
+
+
+static inline __attribute__((always_inline)) void __node_clear(int node, volatile nodemask_t *dstp)
+{
+ clear_bit(node, dstp->bits);
+}
+
+
+static inline __attribute__((always_inline)) void __nodes_setall(nodemask_t *dstp, int nbits)
+{
+ bitmap_fill(dstp->bits, nbits);
+}
+
+
+static inline __attribute__((always_inline)) void __nodes_clear(nodemask_t *dstp, int nbits)
+{
+ bitmap_zero(dstp->bits, nbits);
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int __node_test_and_set(int node, nodemask_t *addr)
+{
+ return test_and_set_bit(node, addr->bits);
+}
+
+
+
+static inline __attribute__((always_inline)) void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+
+
+static inline __attribute__((always_inline)) void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+
+
+static inline __attribute__((always_inline)) void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+
+
+static inline __attribute__((always_inline)) void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+
+
+static inline __attribute__((always_inline)) void __nodes_complement(nodemask_t *dstp,
+ const nodemask_t *srcp, int nbits)
+{
+ bitmap_complement(dstp->bits, srcp->bits, nbits);
+}
+
+
+
+static inline __attribute__((always_inline)) int __nodes_equal(const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ return bitmap_equal(src1p->bits, src2p->bits, nbits);
+}
+
+
+
+static inline __attribute__((always_inline)) int __nodes_intersects(const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ return bitmap_intersects(src1p->bits, src2p->bits, nbits);
+}
+
+
+
+static inline __attribute__((always_inline)) int __nodes_subset(const nodemask_t *src1p,
+ const nodemask_t *src2p, int nbits)
+{
+ return bitmap_subset(src1p->bits, src2p->bits, nbits);
+}
+
+
+static inline __attribute__((always_inline)) int __nodes_empty(const nodemask_t *srcp, int nbits)
+{
+ return bitmap_empty(srcp->bits, nbits);
+}
+
+
+static inline __attribute__((always_inline)) int __nodes_full(const nodemask_t *srcp, int nbits)
+{
+ return bitmap_full(srcp->bits, nbits);
+}
+
+
+static inline __attribute__((always_inline)) int __nodes_weight(const nodemask_t *srcp, int nbits)
+{
+ return bitmap_weight(srcp->bits, nbits);
+}
+
+
+
+static inline __attribute__((always_inline)) void __nodes_shift_right(nodemask_t *dstp,
+ const nodemask_t *srcp, int n, int nbits)
+{
+ bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
+}
+
+
+
+static inline __attribute__((always_inline)) void __nodes_shift_left(nodemask_t *dstp,
+ const nodemask_t *srcp, int n, int nbits)
+{
+ bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) int __first_node(const nodemask_t *srcp)
+{
+ return ({ int __min1 = ((1 << 0)); int __min2 = (find_next_bit((srcp->bits), ((1 << 0)), 0)); __min1 < __min2 ? __min1: __min2; });
+}
+
+
+static inline __attribute__((always_inline)) int __next_node(int n, const nodemask_t *srcp)
+{
+ return ({ int __min1 = ((1 << 0)); int __min2 = (find_next_bit(srcp->bits, (1 << 0), n+1)); __min1 < __min2 ? __min1: __min2; });
+}
+# 261 "include/linux/nodemask.h"
+static inline __attribute__((always_inline)) int __first_unset_node(const nodemask_t *maskp)
+{
+ return ({ int __min1 = ((1 << 0)); int __min2 = (find_next_zero_bit((maskp->bits), ((1 << 0)), 0)); __min1 < __min2 ? __min1: __min2; });
+
+}
+# 295 "include/linux/nodemask.h"
+static inline __attribute__((always_inline)) int __nodemask_scnprintf(char *buf, int len,
+ const nodemask_t *srcp, int nbits)
+{
+ return bitmap_scnprintf(buf, len, srcp->bits, nbits);
+}
+
+
+
+static inline __attribute__((always_inline)) int __nodemask_parse_user(const char *buf, int len,
+ nodemask_t *dstp, int nbits)
+{
+ return bitmap_parse_user(buf, len, dstp->bits, nbits);
+}
+
+
+
+static inline __attribute__((always_inline)) int __nodelist_scnprintf(char *buf, int len,
+ const nodemask_t *srcp, int nbits)
+{
+ return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
+}
+
+
+static inline __attribute__((always_inline)) int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
+{
+ return bitmap_parselist(buf, dstp->bits, nbits);
+}
+
+
+
+static inline __attribute__((always_inline)) int __node_remap(int oldbit,
+ const nodemask_t *oldp, const nodemask_t *newp, int nbits)
+{
+ return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
+}
+
+
+
+static inline __attribute__((always_inline)) void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
+ const nodemask_t *oldp, const nodemask_t *newp, int nbits)
+{
+ bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
+}
+
+
+
+static inline __attribute__((always_inline)) void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
+ const nodemask_t *relmapp, int nbits)
+{
+ bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
+}
+
+
+
+static inline __attribute__((always_inline)) void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
+ int sz, int nbits)
+{
+ bitmap_fold(dstp->bits, origp->bits, sz, nbits);
+}
+# 369 "include/linux/nodemask.h"
+enum node_states {
+ N_POSSIBLE,
+ N_ONLINE,
+ N_NORMAL_MEMORY,
+
+
+
+ N_HIGH_MEMORY = N_NORMAL_MEMORY,
+
+ N_CPU,
+ NR_NODE_STATES
+};
+
+
+
+
+
+
+extern nodemask_t node_states[NR_NODE_STATES];
+# 432 "include/linux/nodemask.h"
+static inline __attribute__((always_inline)) int node_state(int node, enum node_states state)
+{
+ return node == 0;
+}
+
+static inline __attribute__((always_inline)) void node_set_state(int node, enum node_states state)
+{
+}
+
+static inline __attribute__((always_inline)) void node_clear_state(int node, enum node_states state)
+{
+}
+
+static inline __attribute__((always_inline)) int num_node_state(enum node_states state)
+{
+ return 1;
+}
+# 495 "include/linux/nodemask.h"
+struct nodemask_scratch {
+ nodemask_t mask1;
+ nodemask_t mask2;
+};
+# 17 "include/linux/mmzone.h" 2
+# 1 "include/linux/pageblock-flags.h" 1
+# 29 "include/linux/pageblock-flags.h"
+enum pageblock_bits {
+ PB_migrate,
+ PB_migrate_end = PB_migrate + 3 - 1,
+
+ NR_PAGEBLOCK_BITS
+};
+# 60 "include/linux/pageblock-flags.h"
+struct page;
+
+
+unsigned long get_pageblock_flags_group(struct page *page,
+ int start_bitidx, int end_bitidx);
+void set_pageblock_flags_group(struct page *page, unsigned long flags,
+ int start_bitidx, int end_bitidx);
+# 18 "include/linux/mmzone.h" 2
+# 1 "include/linux/bounds.h" 1
+# 19 "include/linux/mmzone.h" 2
+# 50 "include/linux/mmzone.h"
+extern int page_group_by_mobility_disabled;
+
+static inline __attribute__((always_inline)) int get_pageblock_migratetype(struct page *page)
+{
+ return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
+}
+
+struct free_area {
+ struct list_head free_list[5];
+ unsigned long nr_free;
+};
+
+struct pglist_data;
+# 79 "include/linux/mmzone.h"
+enum zone_stat_item {
+
+ NR_FREE_PAGES,
+ NR_LRU_BASE,
+ NR_INACTIVE_ANON = NR_LRU_BASE,
+ NR_ACTIVE_ANON,
+ NR_INACTIVE_FILE,
+ NR_ACTIVE_FILE,
+ NR_UNEVICTABLE,
+ NR_MLOCK,
+ NR_ANON_PAGES,
+ NR_FILE_MAPPED,
+
+ NR_FILE_PAGES,
+ NR_FILE_DIRTY,
+ NR_WRITEBACK,
+ NR_SLAB_RECLAIMABLE,
+ NR_SLAB_UNRECLAIMABLE,
+ NR_PAGETABLE,
+ NR_KERNEL_STACK,
+
+ NR_UNSTABLE_NFS,
+ NR_BOUNCE,
+ NR_VMSCAN_WRITE,
+ NR_WRITEBACK_TEMP,
+ NR_ISOLATED_ANON,
+ NR_ISOLATED_FILE,
+ NR_SHMEM,
+# 115 "include/linux/mmzone.h"
+ NR_VM_ZONE_STAT_ITEMS };
+# 130 "include/linux/mmzone.h"
+enum lru_list {
+ LRU_INACTIVE_ANON = 0,
+ LRU_ACTIVE_ANON = 0 + 1,
+ LRU_INACTIVE_FILE = 0 + 2,
+ LRU_ACTIVE_FILE = 0 + 2 + 1,
+ LRU_UNEVICTABLE,
+ NR_LRU_LISTS
+};
+
+
+
+
+
+static inline __attribute__((always_inline)) int is_file_lru(enum lru_list l)
+{
+ return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE);
+}
+
+static inline __attribute__((always_inline)) int is_active_lru(enum lru_list l)
+{
+ return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE);
+}
+
+static inline __attribute__((always_inline)) int is_unevictable_lru(enum lru_list l)
+{
+ return (l == LRU_UNEVICTABLE);
+}
+
+enum zone_watermarks {
+ WMARK_MIN,
+ WMARK_LOW,
+ WMARK_HIGH,
+ NR_WMARK
+};
+
+
+
+
+
+struct per_cpu_pages {
+ int count;
+ int high;
+ int batch;
+
+
+ struct list_head lists[3];
+};
+
+struct per_cpu_pageset {
+ struct per_cpu_pages pcp;
+
+
+
+
+
+
+
+} ;
+# 197 "include/linux/mmzone.h"
+enum zone_type {
+# 217 "include/linux/mmzone.h"
+ ZONE_DMA,
+# 232 "include/linux/mmzone.h"
+ ZONE_NORMAL,
+# 244 "include/linux/mmzone.h"
+ ZONE_MOVABLE,
+ __MAX_NR_ZONES
+};
+# 268 "include/linux/mmzone.h"
+struct zone_reclaim_stat {
+# 277 "include/linux/mmzone.h"
+ unsigned long recent_rotated[2];
+ unsigned long recent_scanned[2];
+
+
+
+
+ unsigned long nr_saved_scan[NR_LRU_LISTS];
+};
+
+struct zone {
+
+
+
+ unsigned long watermark[NR_WMARK];
+# 300 "include/linux/mmzone.h"
+ unsigned long lowmem_reserve[3];
+# 311 "include/linux/mmzone.h"
+ struct per_cpu_pageset pageset[1];
+
+
+
+
+ spinlock_t lock;
+
+
+
+
+ struct free_area free_area[14];
+
+
+
+
+
+
+ unsigned long *pageblock_flags;
+
+
+
+
+
+
+ spinlock_t lru_lock;
+ struct zone_lru {
+ struct list_head list;
+ } lru[NR_LRU_LISTS];
+
+ struct zone_reclaim_stat reclaim_stat;
+
+ unsigned long pages_scanned;
+ unsigned long flags;
+
+
+ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+# 361 "include/linux/mmzone.h"
+ int prev_priority;
+
+
+
+
+
+ unsigned int inactive_ratio;
+
+
+
+# 397 "include/linux/mmzone.h"
+ wait_queue_head_t * wait_table;
+ unsigned long wait_table_hash_nr_entries;
+ unsigned long wait_table_bits;
+
+
+
+
+ struct pglist_data *zone_pgdat;
+
+ unsigned long zone_start_pfn;
+# 418 "include/linux/mmzone.h"
+ unsigned long spanned_pages;
+ unsigned long present_pages;
+
+
+
+
+ const char *name;
+} ;
+
+typedef enum {
+ ZONE_ALL_UNRECLAIMABLE,
+ ZONE_RECLAIM_LOCKED,
+ ZONE_OOM_LOCKED,
+} zone_flags_t;
+
+static inline __attribute__((always_inline)) void zone_set_flag(struct zone *zone, zone_flags_t flag)
+{
+ set_bit(flag, &zone->flags);
+}
+
+static inline __attribute__((always_inline)) int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
+{
+ return test_and_set_bit(flag, &zone->flags);
+}
+
+static inline __attribute__((always_inline)) void zone_clear_flag(struct zone *zone, zone_flags_t flag)
+{
+ clear_bit(flag, &zone->flags);
+}
+
+static inline __attribute__((always_inline)) int zone_is_all_unreclaimable(const struct zone *zone)
+{
+ return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags);
+}
+
+static inline __attribute__((always_inline)) int zone_is_reclaim_locked(const struct zone *zone)
+{
+ return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
+}
+
+static inline __attribute__((always_inline)) int zone_is_oom_locked(const struct zone *zone)
+{
+ return test_bit(ZONE_OOM_LOCKED, &zone->flags);
+}
+# 551 "include/linux/mmzone.h"
+struct zonelist_cache;
+
+
+
+
+
+
+struct zoneref {
+ struct zone *zone;
+ int zone_idx;
+};
+# 580 "include/linux/mmzone.h"
+struct zonelist {
+ struct zonelist_cache *zlcache_ptr;
+ struct zoneref _zonerefs[((1 << 0) * 3) + 1];
+
+
+
+};
+# 598 "include/linux/mmzone.h"
+extern struct page *mem_map;
+# 612 "include/linux/mmzone.h"
+struct bootmem_data;
+typedef struct pglist_data {
+ struct zone node_zones[3];
+ struct zonelist node_zonelists[1];
+ int nr_zones;
+
+ struct page *node_mem_map;
+
+
+
+
+ struct bootmem_data *bdata;
+# 634 "include/linux/mmzone.h"
+ unsigned long node_start_pfn;
+ unsigned long node_present_pages;
+ unsigned long node_spanned_pages;
+
+ int node_id;
+ wait_queue_head_t kswapd_wait;
+ struct task_struct *kswapd;
+ int kswapd_max_order;
+} pg_data_t;
+# 653 "include/linux/mmzone.h"
+# 1 "include/linux/memory_hotplug.h" 1
+
+
+
+# 1 "include/linux/mmzone.h" 1
+# 5 "include/linux/memory_hotplug.h" 2
+
+# 1 "include/linux/notifier.h" 1
+# 12 "include/linux/notifier.h"
+# 1 "include/linux/errno.h" 1
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/errno.h" 1
+# 1 "include/asm-generic/errno.h" 1
+
+
+
+# 1 "include/asm-generic/errno-base.h" 1
+# 5 "include/asm-generic/errno.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/errno.h" 2
+# 5 "include/linux/errno.h" 2
+# 13 "include/linux/notifier.h" 2
+# 1 "include/linux/mutex.h" 1
+# 48 "include/linux/mutex.h"
+struct mutex {
+
+ atomic_t count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+
+ struct thread_info *owner;
+
+
+ const char *name;
+ void *magic;
+
+
+ struct lockdep_map dep_map;
+
+};
+
+
+
+
+
+struct mutex_waiter {
+ struct list_head list;
+ struct task_struct *task;
+
+ void *magic;
+
+};
+
+
+# 1 "include/linux/mutex-debug.h" 1
+# 21 "include/linux/mutex-debug.h"
+extern void mutex_destroy(struct mutex *lock);
+# 79 "include/linux/mutex.h" 2
+# 107 "include/linux/mutex.h"
+extern void __mutex_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key);
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int mutex_is_locked(struct mutex *lock)
+{
+ return ((&lock->count)->counter) != 1;
+}
+
+
+
+
+
+
+extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
+extern int __attribute__((warn_unused_result)) mutex_lock_interruptible_nested(struct mutex *lock,
+ unsigned int subclass);
+extern int __attribute__((warn_unused_result)) mutex_lock_killable_nested(struct mutex *lock,
+ unsigned int subclass);
+# 151 "include/linux/mutex.h"
+extern int mutex_trylock(struct mutex *lock);
+extern void mutex_unlock(struct mutex *lock);
+extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+# 14 "include/linux/notifier.h" 2
+# 1 "include/linux/rwsem.h" 1
+# 17 "include/linux/rwsem.h"
+struct rw_semaphore;
+
+
+# 1 "include/linux/rwsem-spinlock.h" 1
+# 22 "include/linux/rwsem-spinlock.h"
+struct rwsem_waiter;
+# 31 "include/linux/rwsem-spinlock.h"
+struct rw_semaphore {
+ __s32 activity;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+
+ struct lockdep_map dep_map;
+
+};
+# 53 "include/linux/rwsem-spinlock.h"
+extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key);
+# 63 "include/linux/rwsem-spinlock.h"
+extern void __down_read(struct rw_semaphore *sem);
+extern int __down_read_trylock(struct rw_semaphore *sem);
+extern void __down_write(struct rw_semaphore *sem);
+extern void __down_write_nested(struct rw_semaphore *sem, int subclass);
+extern int __down_write_trylock(struct rw_semaphore *sem);
+extern void __up_read(struct rw_semaphore *sem);
+extern void __up_write(struct rw_semaphore *sem);
+extern void __downgrade_write(struct rw_semaphore *sem);
+
+static inline __attribute__((always_inline)) int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->activity != 0);
+}
+# 21 "include/linux/rwsem.h" 2
+
+
+
+
+
+
+
+extern void down_read(struct rw_semaphore *sem);
+
+
+
+
+extern int down_read_trylock(struct rw_semaphore *sem);
+
+
+
+
+extern void down_write(struct rw_semaphore *sem);
+
+
+
+
+extern int down_write_trylock(struct rw_semaphore *sem);
+
+
+
+
+extern void up_read(struct rw_semaphore *sem);
+
+
+
+
+extern void up_write(struct rw_semaphore *sem);
+
+
+
+
+extern void downgrade_write(struct rw_semaphore *sem);
+# 74 "include/linux/rwsem.h"
+extern void down_read_nested(struct rw_semaphore *sem, int subclass);
+extern void down_write_nested(struct rw_semaphore *sem, int subclass);
+
+
+
+
+
+
+extern void down_read_non_owner(struct rw_semaphore *sem);
+extern void up_read_non_owner(struct rw_semaphore *sem);
+# 15 "include/linux/notifier.h" 2
+# 1 "include/linux/srcu.h" 1
+# 30 "include/linux/srcu.h"
+struct srcu_struct_array {
+ int c[2];
+};
+
+struct srcu_struct {
+ int completed;
+ struct srcu_struct_array *per_cpu_ref;
+ struct mutex mutex;
+};
+
+
+
+
+
+
+
+int init_srcu_struct(struct srcu_struct *sp);
+void cleanup_srcu_struct(struct srcu_struct *sp);
+int srcu_read_lock(struct srcu_struct *sp) ;
+void srcu_read_unlock(struct srcu_struct *sp, int idx) ;
+void synchronize_srcu(struct srcu_struct *sp);
+long srcu_batches_completed(struct srcu_struct *sp);
+# 16 "include/linux/notifier.h" 2
+# 50 "include/linux/notifier.h"
+struct notifier_block {
+ int (*notifier_call)(struct notifier_block *, unsigned long, void *);
+ struct notifier_block *next;
+ int priority;
+};
+
+struct atomic_notifier_head {
+ spinlock_t lock;
+ struct notifier_block *head;
+};
+
+struct blocking_notifier_head {
+ struct rw_semaphore rwsem;
+ struct notifier_block *head;
+};
+
+struct raw_notifier_head {
+ struct notifier_block *head;
+};
+
+struct srcu_notifier_head {
+ struct mutex mutex;
+ struct srcu_struct srcu;
+ struct notifier_block *head;
+};
+# 89 "include/linux/notifier.h"
+extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
+# 115 "include/linux/notifier.h"
+extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
+ struct notifier_block *nb);
+extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
+ struct notifier_block *nb);
+extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
+ struct notifier_block *nb);
+extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
+ struct notifier_block *nb);
+
+extern int blocking_notifier_chain_cond_register(
+ struct blocking_notifier_head *nh,
+ struct notifier_block *nb);
+
+extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
+ struct notifier_block *nb);
+extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
+ struct notifier_block *nb);
+extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
+ struct notifier_block *nb);
+extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
+ struct notifier_block *nb);
+
+extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
+ unsigned long val, void *v);
+extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
+ unsigned long val, void *v, int nr_to_call, int *nr_calls);
+extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
+ unsigned long val, void *v);
+extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
+ unsigned long val, void *v, int nr_to_call, int *nr_calls);
+extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
+ unsigned long val, void *v);
+extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
+ unsigned long val, void *v, int nr_to_call, int *nr_calls);
+extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
+ unsigned long val, void *v);
+extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
+ unsigned long val, void *v, int nr_to_call, int *nr_calls);
+# 165 "include/linux/notifier.h"
+static inline __attribute__((always_inline)) int notifier_from_errno(int err)
+{
+ return 0x8000 | (0x0001 - err);
+}
+
+
+static inline __attribute__((always_inline)) int notifier_to_errno(int ret)
+{
+ ret &= ~0x8000;
+ return ret > 0x0001 ? 0x0001 - ret : 0;
+}
+# 261 "include/linux/notifier.h"
+extern struct blocking_notifier_head reboot_notifier_list;
+# 7 "include/linux/memory_hotplug.h" 2
+
+struct page;
+struct zone;
+struct pglist_data;
+struct mem_section;
+# 165 "include/linux/memory_hotplug.h"
+static inline __attribute__((always_inline)) void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
+static inline __attribute__((always_inline)) void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
+static inline __attribute__((always_inline)) void pgdat_resize_init(struct pglist_data *pgdat) {}
+
+static inline __attribute__((always_inline)) unsigned zone_span_seqbegin(struct zone *zone)
+{
+ return 0;
+}
+static inline __attribute__((always_inline)) int zone_span_seqretry(struct zone *zone, unsigned iv)
+{
+ return 0;
+}
+static inline __attribute__((always_inline)) void zone_span_writelock(struct zone *zone) {}
+static inline __attribute__((always_inline)) void zone_span_writeunlock(struct zone *zone) {}
+static inline __attribute__((always_inline)) void zone_seqlock_init(struct zone *zone) {}
+
+static inline __attribute__((always_inline)) int mhp_notimplemented(const char *func)
+{
+ printk("<4>" "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
+ dump_stack();
+ return -38;
+}
+
+static inline __attribute__((always_inline)) void register_page_bootmem_info_node(struct pglist_data *pgdat)
+{
+}
+# 199 "include/linux/memory_hotplug.h"
+static inline __attribute__((always_inline)) int is_mem_section_removable(unsigned long pfn,
+ unsigned long nr_pages)
+{
+ return 0;
+}
+
+
+extern int add_memory(int nid, u64 start, u64 size);
+extern int arch_add_memory(int nid, u64 start, u64 size);
+extern int remove_memory(u64 start, u64 size);
+extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
+ int nr_pages);
+extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
+extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
+ unsigned long pnum);
+# 654 "include/linux/mmzone.h" 2
+
+void get_zone_counts(unsigned long *active, unsigned long *inactive,
+ unsigned long *free);
+void build_all_zonelists(void);
+void wakeup_kswapd(struct zone *zone, int order);
+int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+ int classzone_idx, int alloc_flags);
+enum memmap_context {
+ MEMMAP_EARLY,
+ MEMMAP_HOTPLUG,
+};
+extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
+ unsigned long size,
+ enum memmap_context context);
+
+
+
+
+static inline __attribute__((always_inline)) void memory_present(int nid, unsigned long start, unsigned long end) {}
+# 684 "include/linux/mmzone.h"
+static inline __attribute__((always_inline)) int populated_zone(struct zone *zone)
+{
+ return (!!zone->present_pages);
+}
+
+extern int movable_zone;
+
+static inline __attribute__((always_inline)) int zone_movable_is_highmem(void)
+{
+
+
+
+ return 0;
+
+}
+
+static inline __attribute__((always_inline)) int is_highmem_idx(enum zone_type idx)
+{
+
+
+
+
+ return 0;
+
+}
+
+static inline __attribute__((always_inline)) int is_normal_idx(enum zone_type idx)
+{
+ return (idx == ZONE_NORMAL);
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int is_highmem(struct zone *zone)
+{
+
+
+
+
+
+
+ return 0;
+
+}
+
+static inline __attribute__((always_inline)) int is_normal(struct zone *zone)
+{
+ return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
+}
+
+static inline __attribute__((always_inline)) int is_dma32(struct zone *zone)
+{
+
+
+
+ return 0;
+
+}
+
+static inline __attribute__((always_inline)) int is_dma(struct zone *zone)
+{
+
+ return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
+
+
+
+}
+
+
+struct ctl_table;
+int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
+extern int sysctl_lowmem_reserve_ratio[3 -1];
+int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
+int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
+int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
+int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
+
+extern int numa_zonelist_order_handler(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
+extern char numa_zonelist_order[];
+
+
+
+
+extern struct pglist_data contig_page_data;
+# 787 "include/linux/mmzone.h"
+extern struct pglist_data *first_online_pgdat(void);
+extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
+extern struct zone *next_zone(struct zone *zone);
+# 819 "include/linux/mmzone.h"
+static inline __attribute__((always_inline)) struct zone *zonelist_zone(struct zoneref *zoneref)
+{
+ return zoneref->zone;
+}
+
+static inline __attribute__((always_inline)) int zonelist_zone_idx(struct zoneref *zoneref)
+{
+ return zoneref->zone_idx;
+}
+
+static inline __attribute__((always_inline)) int zonelist_node_idx(struct zoneref *zoneref)
+{
+
+
+
+
+ return 0;
+
+}
+# 852 "include/linux/mmzone.h"
+struct zoneref *next_zones_zonelist(struct zoneref *z,
+ enum zone_type highest_zoneidx,
+ nodemask_t *nodes,
+ struct zone **zone);
+# 869 "include/linux/mmzone.h"
+static inline __attribute__((always_inline)) struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
+ enum zone_type highest_zoneidx,
+ nodemask_t *nodes,
+ struct zone **zone)
+{
+ return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
+ zone);
+}
+# 912 "include/linux/mmzone.h"
+static inline __attribute__((always_inline)) unsigned long early_pfn_to_nid(unsigned long pfn)
+{
+ return 0;
+}
+# 1093 "include/linux/mmzone.h"
+void memory_present(int nid, unsigned long start, unsigned long end);
+unsigned long __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) node_memmap_size_bytes(int, unsigned long, unsigned long);
+# 1127 "include/linux/mmzone.h"
+static inline __attribute__((always_inline)) int memmap_valid_within(unsigned long pfn,
+ struct page *page, struct zone *zone)
+{
+ return 1;
+}
+# 5 "include/linux/gfp.h" 2
+
+
+# 1 "include/linux/topology.h" 1
+# 30 "include/linux/topology.h"
+# 1 "include/linux/cpumask.h" 1
+# 13 "include/linux/cpumask.h"
+typedef struct cpumask { unsigned long bits[(((1) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; } cpumask_t;
+# 78 "include/linux/cpumask.h"
+extern const struct cpumask *const cpu_possible_mask;
+extern const struct cpumask *const cpu_online_mask;
+extern const struct cpumask *const cpu_present_mask;
+extern const struct cpumask *const cpu_active_mask;
+# 102 "include/linux/cpumask.h"
+static inline __attribute__((always_inline)) unsigned int cpumask_check(unsigned int cpu)
+{
+
+
+
+ return cpu;
+}
+
+
+
+static inline __attribute__((always_inline)) unsigned int cpumask_first(const struct cpumask *srcp)
+{
+ return 0;
+}
+
+
+static inline __attribute__((always_inline)) unsigned int cpumask_next(int n, const struct cpumask *srcp)
+{
+ return n+1;
+}
+
+static inline __attribute__((always_inline)) unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+{
+ return n+1;
+}
+
+static inline __attribute__((always_inline)) unsigned int cpumask_next_and(int n,
+ const struct cpumask *srcp,
+ const struct cpumask *andp)
+{
+ return n+1;
+}
+
+
+static inline __attribute__((always_inline)) unsigned int cpumask_any_but(const struct cpumask *mask,
+ unsigned int cpu)
+{
+ return 1;
+}
+# 238 "include/linux/cpumask.h"
+static inline __attribute__((always_inline)) void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+{
+ set_bit(cpumask_check(cpu), ((dstp)->bits));
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
+{
+ clear_bit(cpumask_check(cpu), ((dstp)->bits));
+}
+# 270 "include/linux/cpumask.h"
+static inline __attribute__((always_inline)) int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
+{
+ return test_and_set_bit(cpumask_check(cpu), ((cpumask)->bits));
+}
+# 282 "include/linux/cpumask.h"
+static inline __attribute__((always_inline)) int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
+{
+ return test_and_clear_bit(cpumask_check(cpu), ((cpumask)->bits));
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) void cpumask_setall(struct cpumask *dstp)
+{
+ bitmap_fill(((dstp)->bits), 1);
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) void cpumask_clear(struct cpumask *dstp)
+{
+ bitmap_zero(((dstp)->bits), 1);
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int cpumask_and(struct cpumask *dstp,
+ const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return bitmap_and(((dstp)->bits), ((src1p)->bits),
+ ((src2p)->bits), 1);
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ bitmap_or(((dstp)->bits), ((src1p)->bits),
+ ((src2p)->bits), 1);
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void cpumask_xor(struct cpumask *dstp,
+ const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ bitmap_xor(((dstp)->bits), ((src1p)->bits),
+ ((src2p)->bits), 1);
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int cpumask_andnot(struct cpumask *dstp,
+ const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return bitmap_andnot(((dstp)->bits), ((src1p)->bits),
+ ((src2p)->bits), 1);
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void cpumask_complement(struct cpumask *dstp,
+ const struct cpumask *srcp)
+{
+ bitmap_complement(((dstp)->bits), ((srcp)->bits),
+ 1);
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) bool cpumask_equal(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return bitmap_equal(((src1p)->bits), ((src2p)->bits),
+ 1);
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) bool cpumask_intersects(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return bitmap_intersects(((src1p)->bits), ((src2p)->bits),
+ 1);
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int cpumask_subset(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return bitmap_subset(((src1p)->bits), ((src2p)->bits),
+ 1);
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) bool cpumask_empty(const struct cpumask *srcp)
+{
+ return bitmap_empty(((srcp)->bits), 1);
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) bool cpumask_full(const struct cpumask *srcp)
+{
+ return bitmap_full(((srcp)->bits), 1);
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) unsigned int cpumask_weight(const struct cpumask *srcp)
+{
+ return bitmap_weight(((srcp)->bits), 1);
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void cpumask_shift_right(struct cpumask *dstp,
+ const struct cpumask *srcp, int n)
+{
+ bitmap_shift_right(((dstp)->bits), ((srcp)->bits), n,
+ 1);
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void cpumask_shift_left(struct cpumask *dstp,
+ const struct cpumask *srcp, int n)
+{
+ bitmap_shift_left(((dstp)->bits), ((srcp)->bits), n,
+ 1);
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void cpumask_copy(struct cpumask *dstp,
+ const struct cpumask *srcp)
+{
+ bitmap_copy(((dstp)->bits), ((srcp)->bits), 1);
+}
+# 513 "include/linux/cpumask.h"
+static inline __attribute__((always_inline)) int cpumask_scnprintf(char *buf, int len,
+ const struct cpumask *srcp)
+{
+ return bitmap_scnprintf(buf, len, ((srcp)->bits), 1);
+}
+# 527 "include/linux/cpumask.h"
+static inline __attribute__((always_inline)) int cpumask_parse_user(const char *buf, int len,
+ struct cpumask *dstp)
+{
+ return bitmap_parse_user(buf, len, ((dstp)->bits), 1);
+}
+# 542 "include/linux/cpumask.h"
+static inline __attribute__((always_inline)) int cpulist_scnprintf(char *buf, int len,
+ const struct cpumask *srcp)
+{
+ return bitmap_scnlistprintf(buf, len, ((srcp)->bits),
+ 1);
+}
+# 557 "include/linux/cpumask.h"
+static inline __attribute__((always_inline)) int cpulist_parse(const char *buf, struct cpumask *dstp)
+{
+ return bitmap_parselist(buf, ((dstp)->bits), 1);
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) size_t cpumask_size(void)
+{
+
+
+ return (((1) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(long);
+}
+# 602 "include/linux/cpumask.h"
+typedef struct cpumask cpumask_var_t[1];
+
+static inline __attribute__((always_inline)) bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ return true;
+}
+
+static inline __attribute__((always_inline)) bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+ int node)
+{
+ return true;
+}
+
+static inline __attribute__((always_inline)) bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ cpumask_clear(*mask);
+ return true;
+}
+
+static inline __attribute__((always_inline)) bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+ int node)
+{
+ cpumask_clear(*mask);
+ return true;
+}
+
+static inline __attribute__((always_inline)) void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
+{
+}
+
+static inline __attribute__((always_inline)) void free_cpumask_var(cpumask_var_t mask)
+{
+}
+
+static inline __attribute__((always_inline)) void free_bootmem_cpumask_var(cpumask_var_t mask)
+{
+}
+
+
+
+
+extern const unsigned long cpu_all_bits[(((1) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))];
+# 654 "include/linux/cpumask.h"
+void set_cpu_possible(unsigned int cpu, bool possible);
+void set_cpu_present(unsigned int cpu, bool present);
+void set_cpu_online(unsigned int cpu, bool online);
+void set_cpu_active(unsigned int cpu, bool active);
+void init_cpu_present(const struct cpumask *src);
+void init_cpu_possible(const struct cpumask *src);
+void init_cpu_online(const struct cpumask *src);
+# 676 "include/linux/cpumask.h"
+static inline __attribute__((always_inline)) int __check_is_bitmap(const unsigned long *bitmap)
+{
+ return 1;
+}
+# 688 "include/linux/cpumask.h"
+extern const unsigned long
+ cpu_bit_bitmap[32 +1][(((1) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))];
+
+static inline __attribute__((always_inline)) const struct cpumask *get_cpu_mask(unsigned int cpu)
+{
+ const unsigned long *p = cpu_bit_bitmap[1 + cpu % 32];
+ p -= cpu / 32;
+ return ((struct cpumask *)(1 ? (p) : (void *)sizeof(__check_is_bitmap(p))));
+}
+# 795 "include/linux/cpumask.h"
+static inline __attribute__((always_inline)) void __cpu_set(int cpu, volatile cpumask_t *dstp)
+{
+ set_bit(cpu, dstp->bits);
+}
+
+
+static inline __attribute__((always_inline)) void __cpu_clear(int cpu, volatile cpumask_t *dstp)
+{
+ clear_bit(cpu, dstp->bits);
+}
+
+
+static inline __attribute__((always_inline)) void __cpus_setall(cpumask_t *dstp, int nbits)
+{
+ bitmap_fill(dstp->bits, nbits);
+}
+
+
+static inline __attribute__((always_inline)) void __cpus_clear(cpumask_t *dstp, int nbits)
+{
+ bitmap_zero(dstp->bits, nbits);
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) int __cpu_test_and_set(int cpu, cpumask_t *addr)
+{
+ return test_and_set_bit(cpu, addr->bits);
+}
+
+
+static inline __attribute__((always_inline)) int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+
+static inline __attribute__((always_inline)) void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+
+static inline __attribute__((always_inline)) void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+
+
+static inline __attribute__((always_inline)) int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+
+static inline __attribute__((always_inline)) int __cpus_equal(const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_equal(src1p->bits, src2p->bits, nbits);
+}
+
+
+static inline __attribute__((always_inline)) int __cpus_intersects(const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_intersects(src1p->bits, src2p->bits, nbits);
+}
+
+
+static inline __attribute__((always_inline)) int __cpus_subset(const cpumask_t *src1p,
+ const cpumask_t *src2p, int nbits)
+{
+ return bitmap_subset(src1p->bits, src2p->bits, nbits);
+}
+
+
+static inline __attribute__((always_inline)) int __cpus_empty(const cpumask_t *srcp, int nbits)
+{
+ return bitmap_empty(srcp->bits, nbits);
+}
+
+
+static inline __attribute__((always_inline)) int __cpus_weight(const cpumask_t *srcp, int nbits)
+{
+ return bitmap_weight(srcp->bits, nbits);
+}
+
+
+
+static inline __attribute__((always_inline)) void __cpus_shift_left(cpumask_t *dstp,
+ const cpumask_t *srcp, int n, int nbits)
+{
+ bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
+}
+# 31 "include/linux/topology.h" 2
+
+
+# 1 "include/linux/smp.h" 1
+# 14 "include/linux/smp.h"
+extern void cpu_idle(void);
+
+struct call_single_data {
+ struct list_head list;
+ void (*func) (void *info);
+ void *info;
+ u16 flags;
+ u16 priv;
+};
+
+
+extern unsigned int total_cpus;
+
+int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
+ int wait);
+# 116 "include/linux/smp.h"
+static inline __attribute__((always_inline)) void smp_send_stop(void) { }
+
+
+
+
+
+static inline __attribute__((always_inline)) int up_smp_call_function(void (*func)(void *), void *info)
+{
+ return 0;
+}
+# 135 "include/linux/smp.h"
+static inline __attribute__((always_inline)) void smp_send_reschedule(int cpu) { }
+
+
+
+
+static inline __attribute__((always_inline)) void init_call_single_data(void)
+{
+}
+# 174 "include/linux/smp.h"
+extern void arch_disable_smp_support(void);
+
+void smp_setup_processor_id(void);
+# 34 "include/linux/topology.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/topology.h" 1
+# 1 "include/asm-generic/topology.h" 1
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/topology.h" 2
+# 35 "include/linux/topology.h" 2
+# 48 "include/linux/topology.h"
+int arch_update_cpu_topology(void);
+# 8 "include/linux/gfp.h" 2
+# 1 "include/linux/mmdebug.h" 1
+
+
+
+# 1 "include/linux/autoconf.h" 1
+# 5 "include/linux/mmdebug.h" 2
+# 9 "include/linux/gfp.h" 2
+
+struct vm_area_struct;
+# 119 "include/linux/gfp.h"
+static inline __attribute__((always_inline)) int allocflags_to_migratetype(gfp_t gfp_flags)
+{
+ ({ int __ret_warn_on = !!((gfp_flags & ((( gfp_t)0x80000u)|(( gfp_t)0x08u))) == ((( gfp_t)0x80000u)|(( gfp_t)0x08u))); if (__builtin_expect(!!(__ret_warn_on), 0)) asm volatile( "1: .hword %0\n" " .section __bug_table,\"a\",@progbits\n" "2: .long 1b\n" " .long %1\n" " .short %2\n" " .short %3\n" " .org 2b + %4\n" " .previous" : : "i"(0xefcd), "i"("include/linux/gfp.h"), "i"(121), "i"((1<<0)), "i"(sizeof(struct bug_entry))); __builtin_expect(!!(__ret_warn_on), 0); });
+
+ if (__builtin_expect(!!(page_group_by_mobility_disabled), 0))
+ return 0;
+
+
+ return (((gfp_flags & (( gfp_t)0x08u)) != 0) << 1) |
+ ((gfp_flags & (( gfp_t)0x80000u)) != 0);
+}
+# 214 "include/linux/gfp.h"
+static inline __attribute__((always_inline)) enum zone_type gfp_zone(gfp_t flags)
+{
+ enum zone_type z;
+ int bit = flags & ((( gfp_t)0x01u)|(( gfp_t)0x02u)|(( gfp_t)0x04u)|(( gfp_t)0x08u));
+
+ z = (( (ZONE_NORMAL << 0 * 2) | (ZONE_DMA << (( gfp_t)0x01u) * 2) | (ZONE_NORMAL << (( gfp_t)0x02u) * 2) | (ZONE_NORMAL << (( gfp_t)0x04u) * 2) | (ZONE_NORMAL << (( gfp_t)0x08u) * 2) | (ZONE_DMA << ((( gfp_t)0x08u) | (( gfp_t)0x01u)) * 2) | (ZONE_MOVABLE << ((( gfp_t)0x08u) | (( gfp_t)0x02u)) * 2) | (ZONE_NORMAL << ((( gfp_t)0x08u) | (( gfp_t)0x04u)) * 2)) >> (bit * 2)) &
+ ((1 << 2) - 1);
+
+ if (__builtin_constant_p(bit))
+ ((void)sizeof(char[1 - 2 * !!((( 1 << ((( gfp_t)0x01u) | (( gfp_t)0x02u)) | 1 << ((( gfp_t)0x01u) | (( gfp_t)0x04u)) | 1 << ((( gfp_t)0x04u) | (( gfp_t)0x02u)) | 1 << ((( gfp_t)0x01u) | (( gfp_t)0x04u) | (( gfp_t)0x02u)) | 1 << ((( gfp_t)0x08u) | (( gfp_t)0x02u) | (( gfp_t)0x01u)) | 1 << ((( gfp_t)0x08u) | (( gfp_t)0x04u) | (( gfp_t)0x01u)) | 1 << ((( gfp_t)0x08u) | (( gfp_t)0x04u) | (( gfp_t)0x02u)) | 1 << ((( gfp_t)0x08u) | (( gfp_t)0x04u) | (( gfp_t)0x01u) | (( gfp_t)0x02u))) >> bit) & 1)]));
+ else {
+
+
+
+ }
+ return z;
+}
+# 239 "include/linux/gfp.h"
+static inline __attribute__((always_inline)) int gfp_zonelist(gfp_t flags)
+{
+ if (0 && __builtin_expect(!!(flags & (( gfp_t)0x40000u)), 0))
+ return 1;
+
+ return 0;
+}
+# 256 "include/linux/gfp.h"
+static inline __attribute__((always_inline)) struct zonelist *node_zonelist(int nid, gfp_t flags)
+{
+ return (&contig_page_data)->node_zonelists + gfp_zonelist(flags);
+}
+
+
+static inline __attribute__((always_inline)) void arch_free_page(struct page *page, int order) { }
+
+
+static inline __attribute__((always_inline)) void arch_alloc_page(struct page *page, int order) { }
+
+
+struct page *
+__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist, nodemask_t *nodemask);
+
+static inline __attribute__((always_inline)) struct page *
+__alloc_pages(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist)
+{
+ return __alloc_pages_nodemask(gfp_mask, order, zonelist, ((void *)0));
+}
+
+static inline __attribute__((always_inline)) struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
+ unsigned int order)
+{
+
+ if (nid < 0)
+ nid = (((void)(0),0));
+
+ return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
+}
+
+static inline __attribute__((always_inline)) struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
+ unsigned int order)
+{
+ do { } while (0);
+
+ return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
+}
+# 314 "include/linux/gfp.h"
+extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
+extern unsigned long get_zeroed_page(gfp_t gfp_mask);
+
+void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
+void free_pages_exact(void *virt, size_t size);
+
+
+
+
+
+
+
+extern void __free_pages(struct page *page, unsigned int order);
+extern void free_pages(unsigned long addr, unsigned int order);
+extern void free_hot_page(struct page *page);
+
+
+
+
+void page_alloc_init(void);
+void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
+void drain_all_pages(void);
+void drain_local_pages(void *dummy);
+
+extern gfp_t gfp_allowed_mask;
+
+static inline __attribute__((always_inline)) void set_gfp_allowed_mask(gfp_t mask)
+{
+ gfp_allowed_mask = mask;
+}
+# 23 "include/linux/kmod.h" 2
+# 32 "include/linux/kmod.h"
+extern int __request_module(bool wait, const char *name, ...) __attribute__((format(printf, 2, 3)));
+# 45 "include/linux/kmod.h"
+struct key;
+struct file;
+struct subprocess_info;
+
+
+struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
+ char **envp, gfp_t gfp_mask);
+
+
+void call_usermodehelper_setkeys(struct subprocess_info *info,
+ struct key *session_keyring);
+int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info,
+ struct file **filp);
+void call_usermodehelper_setcleanup(struct subprocess_info *info,
+ void (*cleanup)(char **argv, char **envp));
+
+enum umh_wait {
+ UMH_NO_WAIT = -1,
+ UMH_WAIT_EXEC = 0,
+ UMH_WAIT_PROC = 1,
+};
+
+
+int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait);
+
+
+
+void call_usermodehelper_freeinfo(struct subprocess_info *info);
+
+static inline __attribute__((always_inline)) int
+call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait)
+{
+ struct subprocess_info *info;
+ gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? ((( gfp_t)0x20u)) : ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u));
+
+ info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
+ if (info == ((void *)0))
+ return -12;
+ return call_usermodehelper_exec(info, wait);
+}
+
+static inline __attribute__((always_inline)) int
+call_usermodehelper_keys(char *path, char **argv, char **envp,
+ struct key *session_keyring, enum umh_wait wait)
+{
+ struct subprocess_info *info;
+ gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? ((( gfp_t)0x20u)) : ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u));
+
+ info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
+ if (info == ((void *)0))
+ return -12;
+
+ call_usermodehelper_setkeys(info, session_keyring);
+ return call_usermodehelper_exec(info, wait);
+}
+
+extern void usermodehelper_init(void);
+
+struct file;
+extern int call_usermodehelper_pipe(char *path, char *argv[], char *envp[],
+ struct file **filp);
+
+extern int usermodehelper_disable(void);
+extern void usermodehelper_enable(void);
+# 14 "include/linux/module.h" 2
+# 1 "include/linux/elf.h" 1
+
+
+
+
+# 1 "include/linux/elf-em.h" 1
+# 6 "include/linux/elf.h" 2
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/elf.h" 1
+# 15 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/elf.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/user.h" 1
+# 1 "include/asm-generic/user.h" 1
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/user.h" 2
+# 16 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/elf.h" 2
+# 25 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/elf.h"
+typedef unsigned long elf_greg_t;
+
+
+typedef elf_greg_t elf_gregset_t[40];
+
+typedef struct { } elf_fpregset_t;
+# 131 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/elf.h"
+struct mm_struct;
+struct elf_fdpic_params;
+struct elf32_phdr;
+extern int elf_fdpic_plat_process_phdr(struct mm_struct *, struct elf_fdpic_params *,
+ struct elf32_phdr *, unsigned long *, unsigned long *);
+# 8 "include/linux/elf.h" 2
+
+
+struct file;
+# 20 "include/linux/elf.h"
+typedef __u32 Elf32_Addr;
+typedef __u16 Elf32_Half;
+typedef __u32 Elf32_Off;
+typedef __s32 Elf32_Sword;
+typedef __u32 Elf32_Word;
+
+
+typedef __u64 Elf64_Addr;
+typedef __u16 Elf64_Half;
+typedef __s16 Elf64_SHalf;
+typedef __u64 Elf64_Off;
+typedef __s32 Elf64_Sword;
+typedef __u32 Elf64_Word;
+typedef __u64 Elf64_Xword;
+typedef __s64 Elf64_Sxword;
+# 127 "include/linux/elf.h"
+typedef struct dynamic{
+ Elf32_Sword d_tag;
+ union{
+ Elf32_Sword d_val;
+ Elf32_Addr d_ptr;
+ } d_un;
+} Elf32_Dyn;
+
+typedef struct {
+ Elf64_Sxword d_tag;
+ union {
+ Elf64_Xword d_val;
+ Elf64_Addr d_ptr;
+ } d_un;
+} Elf64_Dyn;
+# 150 "include/linux/elf.h"
+typedef struct elf32_rel {
+ Elf32_Addr r_offset;
+ Elf32_Word r_info;
+} Elf32_Rel;
+
+typedef struct elf64_rel {
+ Elf64_Addr r_offset;
+ Elf64_Xword r_info;
+} Elf64_Rel;
+
+typedef struct elf32_rela{
+ Elf32_Addr r_offset;
+ Elf32_Word r_info;
+ Elf32_Sword r_addend;
+} Elf32_Rela;
+
+typedef struct elf64_rela {
+ Elf64_Addr r_offset;
+ Elf64_Xword r_info;
+ Elf64_Sxword r_addend;
+} Elf64_Rela;
+
+typedef struct elf32_sym{
+ Elf32_Word st_name;
+ Elf32_Addr st_value;
+ Elf32_Word st_size;
+ unsigned char st_info;
+ unsigned char st_other;
+ Elf32_Half st_shndx;
+} Elf32_Sym;
+
+typedef struct elf64_sym {
+ Elf64_Word st_name;
+ unsigned char st_info;
+ unsigned char st_other;
+ Elf64_Half st_shndx;
+ Elf64_Addr st_value;
+ Elf64_Xword st_size;
+} Elf64_Sym;
+
+
+
+
+typedef struct elf32_hdr{
+ unsigned char e_ident[16];
+ Elf32_Half e_type;
+ Elf32_Half e_machine;
+ Elf32_Word e_version;
+ Elf32_Addr e_entry;
+ Elf32_Off e_phoff;
+ Elf32_Off e_shoff;
+ Elf32_Word e_flags;
+ Elf32_Half e_ehsize;
+ Elf32_Half e_phentsize;
+ Elf32_Half e_phnum;
+ Elf32_Half e_shentsize;
+ Elf32_Half e_shnum;
+ Elf32_Half e_shstrndx;
+} Elf32_Ehdr;
+
+typedef struct elf64_hdr {
+ unsigned char e_ident[16];
+ Elf64_Half e_type;
+ Elf64_Half e_machine;
+ Elf64_Word e_version;
+ Elf64_Addr e_entry;
+ Elf64_Off e_phoff;
+ Elf64_Off e_shoff;
+ Elf64_Word e_flags;
+ Elf64_Half e_ehsize;
+ Elf64_Half e_phentsize;
+ Elf64_Half e_phnum;
+ Elf64_Half e_shentsize;
+ Elf64_Half e_shnum;
+ Elf64_Half e_shstrndx;
+} Elf64_Ehdr;
+
+
+
+
+
+
+
+typedef struct elf32_phdr{
+ Elf32_Word p_type;
+ Elf32_Off p_offset;
+ Elf32_Addr p_vaddr;
+ Elf32_Addr p_paddr;
+ Elf32_Word p_filesz;
+ Elf32_Word p_memsz;
+ Elf32_Word p_flags;
+ Elf32_Word p_align;
+} Elf32_Phdr;
+
+typedef struct elf64_phdr {
+ Elf64_Word p_type;
+ Elf64_Word p_flags;
+ Elf64_Off p_offset;
+ Elf64_Addr p_vaddr;
+ Elf64_Addr p_paddr;
+ Elf64_Xword p_filesz;
+ Elf64_Xword p_memsz;
+ Elf64_Xword p_align;
+} Elf64_Phdr;
+# 289 "include/linux/elf.h"
+typedef struct {
+ Elf32_Word sh_name;
+ Elf32_Word sh_type;
+ Elf32_Word sh_flags;
+ Elf32_Addr sh_addr;
+ Elf32_Off sh_offset;
+ Elf32_Word sh_size;
+ Elf32_Word sh_link;
+ Elf32_Word sh_info;
+ Elf32_Word sh_addralign;
+ Elf32_Word sh_entsize;
+} Elf32_Shdr;
+
+typedef struct elf64_shdr {
+ Elf64_Word sh_name;
+ Elf64_Word sh_type;
+ Elf64_Xword sh_flags;
+ Elf64_Addr sh_addr;
+ Elf64_Off sh_offset;
+ Elf64_Xword sh_size;
+ Elf64_Word sh_link;
+ Elf64_Word sh_info;
+ Elf64_Xword sh_addralign;
+ Elf64_Xword sh_entsize;
+} Elf64_Shdr;
+# 368 "include/linux/elf.h"
+typedef struct elf32_note {
+ Elf32_Word n_namesz;
+ Elf32_Word n_descsz;
+ Elf32_Word n_type;
+} Elf32_Nhdr;
+
+
+typedef struct elf64_note {
+ Elf64_Word n_namesz;
+ Elf64_Word n_descsz;
+ Elf64_Word n_type;
+} Elf64_Nhdr;
+
+
+
+
+extern Elf32_Dyn _DYNAMIC [];
+# 402 "include/linux/elf.h"
+static inline __attribute__((always_inline)) int elf_coredump_extra_notes_size(void) { return 0; }
+static inline __attribute__((always_inline)) int elf_coredump_extra_notes_write(struct file *file,
+ loff_t *foffset) { return 0; }
+# 15 "include/linux/module.h" 2
+
+# 1 "include/linux/kobject.h" 1
+# 21 "include/linux/kobject.h"
+# 1 "include/linux/sysfs.h" 1
+# 20 "include/linux/sysfs.h"
+struct kobject;
+struct module;
+
+
+
+
+
+
+struct attribute {
+ const char *name;
+ struct module *owner;
+ mode_t mode;
+};
+
+struct attribute_group {
+ const char *name;
+ mode_t (*is_visible)(struct kobject *,
+ struct attribute *, int);
+ struct attribute **attrs;
+};
+# 63 "include/linux/sysfs.h"
+struct vm_area_struct;
+
+struct bin_attribute {
+ struct attribute attr;
+ size_t size;
+ void *private;
+ ssize_t (*read)(struct kobject *, struct bin_attribute *,
+ char *, loff_t, size_t);
+ ssize_t (*write)(struct kobject *, struct bin_attribute *,
+ char *, loff_t, size_t);
+ int (*mmap)(struct kobject *, struct bin_attribute *attr,
+ struct vm_area_struct *vma);
+};
+
+struct sysfs_ops {
+ ssize_t (*show)(struct kobject *, struct attribute *,char *);
+ ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
+};
+
+struct sysfs_dirent;
+
+
+
+int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *),
+ void *data, struct module *owner);
+
+int __attribute__((warn_unused_result)) sysfs_create_dir(struct kobject *kobj);
+void sysfs_remove_dir(struct kobject *kobj);
+int __attribute__((warn_unused_result)) sysfs_rename_dir(struct kobject *kobj, const char *new_name);
+int __attribute__((warn_unused_result)) sysfs_move_dir(struct kobject *kobj,
+ struct kobject *new_parent_kobj);
+
+int __attribute__((warn_unused_result)) sysfs_create_file(struct kobject *kobj,
+ const struct attribute *attr);
+int __attribute__((warn_unused_result)) sysfs_chmod_file(struct kobject *kobj, struct attribute *attr,
+ mode_t mode);
+void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr);
+
+int __attribute__((warn_unused_result)) sysfs_create_bin_file(struct kobject *kobj,
+ struct bin_attribute *attr);
+void sysfs_remove_bin_file(struct kobject *kobj, struct bin_attribute *attr);
+
+int __attribute__((warn_unused_result)) sysfs_create_link(struct kobject *kobj, struct kobject *target,
+ const char *name);
+int __attribute__((warn_unused_result)) sysfs_create_link_nowarn(struct kobject *kobj,
+ struct kobject *target,
+ const char *name);
+void sysfs_remove_link(struct kobject *kobj, const char *name);
+
+int __attribute__((warn_unused_result)) sysfs_create_group(struct kobject *kobj,
+ const struct attribute_group *grp);
+int sysfs_update_group(struct kobject *kobj,
+ const struct attribute_group *grp);
+void sysfs_remove_group(struct kobject *kobj,
+ const struct attribute_group *grp);
+int sysfs_add_file_to_group(struct kobject *kobj,
+ const struct attribute *attr, const char *group);
+void sysfs_remove_file_from_group(struct kobject *kobj,
+ const struct attribute *attr, const char *group);
+
+void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);
+void sysfs_notify_dirent(struct sysfs_dirent *sd);
+struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
+ const unsigned char *name);
+struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd);
+void sysfs_put(struct sysfs_dirent *sd);
+void sysfs_printk_last_file(void);
+int __attribute__((warn_unused_result)) sysfs_init(void);
+# 22 "include/linux/kobject.h" 2
+
+
+# 1 "include/linux/kref.h" 1
+# 20 "include/linux/kref.h"
+struct kref {
+ atomic_t refcount;
+};
+
+void kref_set(struct kref *kref, int num);
+void kref_init(struct kref *kref);
+void kref_get(struct kref *kref);
+int kref_put(struct kref *kref, void (*release) (struct kref *kref));
+# 25 "include/linux/kobject.h" 2
+# 34 "include/linux/kobject.h"
+extern char uevent_helper[];
+
+
+extern u64 uevent_seqnum;
+# 49 "include/linux/kobject.h"
+enum kobject_action {
+ KOBJ_ADD,
+ KOBJ_REMOVE,
+ KOBJ_CHANGE,
+ KOBJ_MOVE,
+ KOBJ_ONLINE,
+ KOBJ_OFFLINE,
+ KOBJ_MAX
+};
+
+struct kobject {
+ const char *name;
+ struct list_head entry;
+ struct kobject *parent;
+ struct kset *kset;
+ struct kobj_type *ktype;
+ struct sysfs_dirent *sd;
+ struct kref kref;
+ unsigned int state_initialized:1;
+ unsigned int state_in_sysfs:1;
+ unsigned int state_add_uevent_sent:1;
+ unsigned int state_remove_uevent_sent:1;
+ unsigned int uevent_suppress:1;
+};
+
+extern int kobject_set_name(struct kobject *kobj, const char *name, ...)
+ __attribute__((format(printf, 2, 3)));
+extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
+ va_list vargs);
+
+static inline __attribute__((always_inline)) const char *kobject_name(const struct kobject *kobj)
+{
+ return kobj->name;
+}
+
+extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype);
+extern int __attribute__((warn_unused_result)) kobject_add(struct kobject *kobj,
+ struct kobject *parent,
+ const char *fmt, ...);
+extern int __attribute__((warn_unused_result)) kobject_init_and_add(struct kobject *kobj,
+ struct kobj_type *ktype,
+ struct kobject *parent,
+ const char *fmt, ...);
+
+extern void kobject_del(struct kobject *kobj);
+
+extern struct kobject * __attribute__((warn_unused_result)) kobject_create(void);
+extern struct kobject * __attribute__((warn_unused_result)) kobject_create_and_add(const char *name,
+ struct kobject *parent);
+
+extern int __attribute__((warn_unused_result)) kobject_rename(struct kobject *, const char *new_name);
+extern int __attribute__((warn_unused_result)) kobject_move(struct kobject *, struct kobject *);
+
+extern struct kobject *kobject_get(struct kobject *kobj);
+extern void kobject_put(struct kobject *kobj);
+
+extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
+
+struct kobj_type {
+ void (*release)(struct kobject *kobj);
+ struct sysfs_ops *sysfs_ops;
+ struct attribute **default_attrs;
+};
+
+struct kobj_uevent_env {
+ char *envp[32];
+ int envp_idx;
+ char buf[2048];
+ int buflen;
+};
+
+struct kset_uevent_ops {
+ int (*filter)(struct kset *kset, struct kobject *kobj);
+ const char *(*name)(struct kset *kset, struct kobject *kobj);
+ int (*uevent)(struct kset *kset, struct kobject *kobj,
+ struct kobj_uevent_env *env);
+};
+
+struct kobj_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count);
+};
+
+extern struct sysfs_ops kobj_sysfs_ops;
+# 154 "include/linux/kobject.h"
+struct kset {
+ struct list_head list;
+ spinlock_t list_lock;
+ struct kobject kobj;
+ struct kset_uevent_ops *uevent_ops;
+};
+
+extern void kset_init(struct kset *kset);
+extern int __attribute__((warn_unused_result)) kset_register(struct kset *kset);
+extern void kset_unregister(struct kset *kset);
+extern struct kset * __attribute__((warn_unused_result)) kset_create_and_add(const char *name,
+ struct kset_uevent_ops *u,
+ struct kobject *parent_kobj);
+
+static inline __attribute__((always_inline)) struct kset *to_kset(struct kobject *kobj)
+{
+ return kobj ? ({ const typeof( ((struct kset *)0)->kobj ) *__mptr = (kobj); (struct kset *)( (char *)__mptr - __builtin_offsetof(struct kset,kobj) );}) : ((void *)0);
+}
+
+static inline __attribute__((always_inline)) struct kset *kset_get(struct kset *k)
+{
+ return k ? to_kset(kobject_get(&k->kobj)) : ((void *)0);
+}
+
+static inline __attribute__((always_inline)) void kset_put(struct kset *k)
+{
+ kobject_put(&k->kobj);
+}
+
+static inline __attribute__((always_inline)) struct kobj_type *get_ktype(struct kobject *kobj)
+{
+ return kobj->ktype;
+}
+
+extern struct kobject *kset_find_obj(struct kset *, const char *);
+
+
+extern struct kobject *kernel_kobj;
+
+extern struct kobject *mm_kobj;
+
+extern struct kobject *hypervisor_kobj;
+
+extern struct kobject *power_kobj;
+
+extern struct kobject *firmware_kobj;
+# 212 "include/linux/kobject.h"
+static inline __attribute__((always_inline)) int kobject_uevent(struct kobject *kobj,
+ enum kobject_action action)
+{ return 0; }
+static inline __attribute__((always_inline)) int kobject_uevent_env(struct kobject *kobj,
+ enum kobject_action action,
+ char *envp[])
+{ return 0; }
+
+static inline __attribute__((always_inline)) int add_uevent_var(struct kobj_uevent_env *env,
+ const char *format, ...)
+{ return 0; }
+
+static inline __attribute__((always_inline)) int kobject_action_type(const char *buf, size_t count,
+ enum kobject_action *type)
+{ return -22; }
+# 17 "include/linux/module.h" 2
+# 1 "include/linux/moduleparam.h" 1
+# 32 "include/linux/moduleparam.h"
+struct kernel_param;
+
+
+typedef int (*param_set_fn)(const char *val, struct kernel_param *kp);
+
+typedef int (*param_get_fn)(char *buffer, struct kernel_param *kp);
+
+
+
+
+struct kernel_param {
+ const char *name;
+ u16 perm;
+ u16 flags;
+ param_set_fn set;
+ param_get_fn get;
+ union {
+ void *arg;
+ const struct kparam_string *str;
+ const struct kparam_array *arr;
+ };
+};
+
+
+struct kparam_string {
+ unsigned int maxlen;
+ char *string;
+};
+
+
+struct kparam_array
+{
+ unsigned int max;
+ unsigned int *num;
+ param_set_fn set;
+ param_get_fn get;
+ unsigned int elemsize;
+ void *elem;
+};
+# 143 "include/linux/moduleparam.h"
+extern int parse_args(const char *name,
+ char *args,
+ struct kernel_param *params,
+ unsigned num,
+ int (*unknown)(char *param, char *val));
+
+
+
+extern void destroy_params(const struct kernel_param *params, unsigned num);
+# 165 "include/linux/moduleparam.h"
+extern int param_set_byte(const char *val, struct kernel_param *kp);
+extern int param_get_byte(char *buffer, struct kernel_param *kp);
+
+
+extern int param_set_short(const char *val, struct kernel_param *kp);
+extern int param_get_short(char *buffer, struct kernel_param *kp);
+
+
+extern int param_set_ushort(const char *val, struct kernel_param *kp);
+extern int param_get_ushort(char *buffer, struct kernel_param *kp);
+
+
+extern int param_set_int(const char *val, struct kernel_param *kp);
+extern int param_get_int(char *buffer, struct kernel_param *kp);
+
+
+extern int param_set_uint(const char *val, struct kernel_param *kp);
+extern int param_get_uint(char *buffer, struct kernel_param *kp);
+
+
+extern int param_set_long(const char *val, struct kernel_param *kp);
+extern int param_get_long(char *buffer, struct kernel_param *kp);
+
+
+extern int param_set_ulong(const char *val, struct kernel_param *kp);
+extern int param_get_ulong(char *buffer, struct kernel_param *kp);
+
+
+extern int param_set_charp(const char *val, struct kernel_param *kp);
+extern int param_get_charp(char *buffer, struct kernel_param *kp);
+
+
+
+extern int param_set_bool(const char *val, struct kernel_param *kp);
+extern int param_get_bool(char *buffer, struct kernel_param *kp);
+# 208 "include/linux/moduleparam.h"
+extern int param_set_invbool(const char *val, struct kernel_param *kp);
+extern int param_get_invbool(char *buffer, struct kernel_param *kp);
+# 226 "include/linux/moduleparam.h"
+extern int param_array_set(const char *val, struct kernel_param *kp);
+extern int param_array_get(char *buffer, struct kernel_param *kp);
+
+extern int param_set_copystring(const char *val, struct kernel_param *kp);
+extern int param_get_string(char *buffer, struct kernel_param *kp);
+
+
+
+struct module;
+
+
+extern int module_param_sysfs_setup(struct module *mod,
+ struct kernel_param *kparam,
+ unsigned int num_params);
+
+extern void module_param_sysfs_remove(struct module *mod);
+# 18 "include/linux/module.h" 2
+# 1 "include/linux/tracepoint.h" 1
+# 18 "include/linux/tracepoint.h"
+# 1 "include/linux/rcupdate.h" 1
+# 42 "include/linux/rcupdate.h"
+# 1 "include/linux/completion.h" 1
+# 25 "include/linux/completion.h"
+struct completion {
+ unsigned int done;
+ wait_queue_head_t wait;
+};
+# 73 "include/linux/completion.h"
+static inline __attribute__((always_inline)) void init_completion(struct completion *x)
+{
+ x->done = 0;
+ do { static struct lock_class_key __key; __init_waitqueue_head((&x->wait), &__key); } while (0);
+}
+
+extern void wait_for_completion(struct completion *);
+extern int wait_for_completion_interruptible(struct completion *x);
+extern int wait_for_completion_killable(struct completion *x);
+extern unsigned long wait_for_completion_timeout(struct completion *x,
+ unsigned long timeout);
+extern unsigned long wait_for_completion_interruptible_timeout(
+ struct completion *x, unsigned long timeout);
+extern bool try_wait_for_completion(struct completion *x);
+extern bool completion_done(struct completion *x);
+
+extern void complete(struct completion *);
+extern void complete_all(struct completion *);
+# 43 "include/linux/rcupdate.h" 2
+
+
+
+
+
+
+struct rcu_head {
+ struct rcu_head *next;
+ void (*func)(struct rcu_head *head);
+};
+
+
+
+
+
+
+
+extern void synchronize_rcu_bh(void);
+extern void synchronize_sched(void);
+extern void rcu_barrier(void);
+extern void rcu_barrier_bh(void);
+extern void rcu_barrier_sched(void);
+extern void synchronize_sched_expedited(void);
+extern int sched_expedited_torture_stats(char *page);
+
+
+extern void rcu_init(void);
+extern void rcu_scheduler_starting(void);
+extern int rcu_needs_cpu(int cpu);
+extern int rcu_scheduler_active;
+
+
+# 1 "include/linux/rcutree.h" 1
+# 33 "include/linux/rcutree.h"
+struct notifier_block;
+
+extern void rcu_sched_qs(int cpu);
+extern void rcu_bh_qs(int cpu);
+extern int rcu_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu);
+extern int rcu_needs_cpu(int cpu);
+extern int rcu_expedited_torture_stats(char *page);
+# 50 "include/linux/rcutree.h"
+static inline __attribute__((always_inline)) void __rcu_read_lock(void)
+{
+ do { } while (0);
+}
+
+static inline __attribute__((always_inline)) void __rcu_read_unlock(void)
+{
+ do { } while (0);
+}
+
+
+
+static inline __attribute__((always_inline)) void exit_rcu(void)
+{
+}
+
+
+
+static inline __attribute__((always_inline)) void __rcu_read_lock_bh(void)
+{
+ local_bh_disable();
+}
+static inline __attribute__((always_inline)) void __rcu_read_unlock_bh(void)
+{
+ local_bh_enable();
+}
+
+extern void call_rcu_sched(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu));
+extern void synchronize_rcu_expedited(void);
+
+static inline __attribute__((always_inline)) void synchronize_rcu_bh_expedited(void)
+{
+ synchronize_sched_expedited();
+}
+
+extern void __rcu_init(void);
+extern void rcu_check_callbacks(int cpu, int user);
+
+extern long rcu_batches_completed(void);
+extern long rcu_batches_completed_bh(void);
+extern long rcu_batches_completed_sched(void);
+
+
+void rcu_enter_nohz(void);
+void rcu_exit_nohz(void);
+# 106 "include/linux/rcutree.h"
+static inline __attribute__((always_inline)) int rcu_blocking_is_gp(void)
+{
+ return 1 == 1;
+}
+# 76 "include/linux/rcupdate.h" 2
+# 87 "include/linux/rcupdate.h"
+extern struct lockdep_map rcu_lock_map;
+# 125 "include/linux/rcupdate.h"
+static inline __attribute__((always_inline)) void rcu_read_lock(void)
+{
+ __rcu_read_lock();
+ (void)0;
+ lock_acquire(&rcu_lock_map, 0, 0, 2, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; }));
+}
+# 147 "include/linux/rcupdate.h"
+static inline __attribute__((always_inline)) void rcu_read_unlock(void)
+{
+ lock_release(&rcu_lock_map, 1, ({ __label__ __here; __here: (unsigned long)&&__here; }));
+ (void)0;
+ __rcu_read_unlock();
+}
+# 165 "include/linux/rcupdate.h"
+static inline __attribute__((always_inline)) void rcu_read_lock_bh(void)
+{
+ __rcu_read_lock_bh();
+ (void)0;
+ lock_acquire(&rcu_lock_map, 0, 0, 2, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; }));
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void rcu_read_unlock_bh(void)
+{
+ lock_release(&rcu_lock_map, 1, ({ __label__ __here; __here: (unsigned long)&&__here; }));
+ (void)0;
+ __rcu_read_unlock_bh();
+}
+# 193 "include/linux/rcupdate.h"
+static inline __attribute__((always_inline)) void rcu_read_lock_sched(void)
+{
+ do { } while (0);
+ (void)0;
+ lock_acquire(&rcu_lock_map, 0, 0, 2, 1, ((void *)0), ({ __label__ __here; __here: (unsigned long)&&__here; }));
+}
+
+
+static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void rcu_read_lock_sched_notrace(void)
+{
+ do { } while (0);
+ (void)0;
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void rcu_read_unlock_sched(void)
+{
+ lock_release(&rcu_lock_map, 1, ({ __label__ __here; __here: (unsigned long)&&__here; }));
+ (void)0;
+ do { } while (0);
+}
+
+
+static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void rcu_read_unlock_sched_notrace(void)
+{
+ (void)0;
+ do { } while (0);
+}
+# 266 "include/linux/rcupdate.h"
+struct rcu_synchronize {
+ struct rcu_head head;
+ struct completion completion;
+};
+
+extern void wakeme_after_rcu(struct rcu_head *head);
+# 284 "include/linux/rcupdate.h"
+extern void call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
+# 305 "include/linux/rcupdate.h"
+extern void call_rcu_bh(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
+# 19 "include/linux/tracepoint.h" 2
+
+struct module;
+struct tracepoint;
+
+struct tracepoint {
+ const char *name;
+ int state;
+ void (*regfunc)(void);
+ void (*unregfunc)(void);
+ void **funcs;
+} __attribute__((aligned(32)));
+# 99 "include/linux/tracepoint.h"
+extern void tracepoint_update_probe_range(struct tracepoint *begin,
+ struct tracepoint *end);
+# 132 "include/linux/tracepoint.h"
+extern int tracepoint_probe_register(const char *name, void *probe);
+
+
+
+
+
+extern int tracepoint_probe_unregister(const char *name, void *probe);
+
+extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
+extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
+extern void tracepoint_probe_update_all(void);
+
+struct tracepoint_iter {
+ struct module *module;
+ struct tracepoint *tracepoint;
+};
+
+extern void tracepoint_iter_start(struct tracepoint_iter *iter);
+extern void tracepoint_iter_next(struct tracepoint_iter *iter);
+extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
+extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
+extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
+ struct tracepoint *begin, struct tracepoint *end);
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void tracepoint_synchronize_unregister(void)
+{
+ synchronize_sched();
+}
+# 19 "include/linux/module.h" 2
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/local.h" 1
+# 1 "include/asm-generic/local.h" 1
+
+
+
+# 1 "include/linux/percpu.h" 1
+
+
+
+
+# 1 "include/linux/slab.h" 1
+# 93 "include/linux/slab.h"
+void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) kmem_cache_init(void);
+int slab_is_available(void);
+
+struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
+ unsigned long,
+ void (*)(void *));
+void kmem_cache_destroy(struct kmem_cache *);
+int kmem_cache_shrink(struct kmem_cache *);
+void kmem_cache_free(struct kmem_cache *, void *);
+unsigned int kmem_cache_size(struct kmem_cache *);
+const char *kmem_cache_name(struct kmem_cache *);
+int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
+# 136 "include/linux/slab.h"
+void * __attribute__((warn_unused_result)) __krealloc(const void *, size_t, gfp_t);
+void * __attribute__((warn_unused_result)) krealloc(const void *, size_t, gfp_t);
+void kfree(const void *);
+void kzfree(const void *);
+size_t ksize(const void *);
+# 162 "include/linux/slab.h"
+# 1 "include/linux/slub_def.h" 1
+# 11 "include/linux/slub_def.h"
+# 1 "include/linux/workqueue.h" 1
+
+
+
+
+
+
+
+# 1 "include/linux/timer.h" 1
+
+
+
+
+# 1 "include/linux/ktime.h" 1
+# 25 "include/linux/ktime.h"
+# 1 "include/linux/jiffies.h" 1
+
+
+
+
+
+
+
+# 1 "include/linux/timex.h" 1
+# 64 "include/linux/timex.h"
+struct timex {
+ unsigned int modes;
+ long offset;
+ long freq;
+ long maxerror;
+ long esterror;
+ int status;
+ long constant;
+ long precision;
+ long tolerance;
+
+
+ struct timeval time;
+ long tick;
+
+ long ppsfreq;
+ long jitter;
+ int shift;
+ long stabil;
+ long jitcnt;
+ long calcnt;
+ long errcnt;
+ long stbcnt;
+
+ int tai;
+
+ int :32; int :32; int :32; int :32;
+ int :32; int :32; int :32; int :32;
+ int :32; int :32; int :32;
+};
+# 170 "include/linux/timex.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/timex.h" 1
+# 14 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/timex.h"
+typedef unsigned long long cycles_t;
+
+static inline __attribute__((always_inline)) cycles_t get_cycles(void)
+{
+ unsigned long tmp, tmp2;
+ __asm__ __volatile__("%0 = cycles; %1 = cycles2;" : "=d"(tmp), "=d"(tmp2));
+ return tmp | ((cycles_t)tmp2 << 32);
+}
+# 171 "include/linux/timex.h" 2
+# 230 "include/linux/timex.h"
+extern unsigned long tick_usec;
+extern unsigned long tick_nsec;
+extern int tickadj;
+
+
+
+
+extern int time_status;
+extern long time_maxerror;
+extern long time_esterror;
+
+extern long time_adjust;
+
+extern void ntp_init(void);
+extern void ntp_clear(void);
+
+
+
+
+
+static inline __attribute__((always_inline)) int ntp_synced(void)
+{
+ return !(time_status & 0x0040);
+}
+# 272 "include/linux/timex.h"
+extern u64 tick_length;
+
+extern void second_overflow(void);
+extern void update_ntp_one_tick(void);
+extern int do_adjtimex(struct timex *);
+
+
+
+
+int read_current_timer(unsigned long *timer_val);
+# 9 "include/linux/jiffies.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/param.h" 1
+# 10 "include/linux/jiffies.h" 2
+# 81 "include/linux/jiffies.h"
+extern u64 __attribute__((section(".data"))) jiffies_64;
+extern unsigned long volatile __attribute__((section(".data"))) jiffies;
+
+
+u64 get_jiffies_64(void);
+# 183 "include/linux/jiffies.h"
+extern unsigned long preset_lpj;
+# 296 "include/linux/jiffies.h"
+extern unsigned int jiffies_to_msecs(const unsigned long j);
+extern unsigned int jiffies_to_usecs(const unsigned long j);
+extern unsigned long msecs_to_jiffies(const unsigned int m);
+extern unsigned long usecs_to_jiffies(const unsigned int u);
+extern unsigned long timespec_to_jiffies(const struct timespec *value);
+extern void jiffies_to_timespec(const unsigned long jiffies,
+ struct timespec *value);
+extern unsigned long timeval_to_jiffies(const struct timeval *value);
+extern void jiffies_to_timeval(const unsigned long jiffies,
+ struct timeval *value);
+extern clock_t jiffies_to_clock_t(long x);
+extern unsigned long clock_t_to_jiffies(unsigned long x);
+extern u64 jiffies_64_to_clock_t(u64 x);
+extern u64 nsec_to_clock_t(u64 x);
+# 26 "include/linux/ktime.h" 2
+# 46 "include/linux/ktime.h"
+union ktime {
+ s64 tv64;
+
+ struct {
+
+
+
+ s32 nsec, sec;
+
+ } tv;
+
+};
+
+typedef union ktime ktime_t;
+# 151 "include/linux/ktime.h"
+static inline __attribute__((always_inline)) ktime_t ktime_set(const long secs, const unsigned long nsecs)
+{
+ return (ktime_t) { .tv = { .sec = secs, .nsec = nsecs } };
+}
+# 163 "include/linux/ktime.h"
+static inline __attribute__((always_inline)) ktime_t ktime_sub(const ktime_t lhs, const ktime_t rhs)
+{
+ ktime_t res;
+
+ res.tv64 = lhs.tv64 - rhs.tv64;
+ if (res.tv.nsec < 0)
+ res.tv.nsec += 1000000000L;
+
+ return res;
+}
+# 181 "include/linux/ktime.h"
+static inline __attribute__((always_inline)) ktime_t ktime_add(const ktime_t add1, const ktime_t add2)
+{
+ ktime_t res;
+
+ res.tv64 = add1.tv64 + add2.tv64;
+# 194 "include/linux/ktime.h"
+ if (res.tv.nsec >= 1000000000L)
+ res.tv64 += (u32)-1000000000L;
+
+ return res;
+}
+# 207 "include/linux/ktime.h"
+extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec);
+# 216 "include/linux/ktime.h"
+extern ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec);
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) ktime_t timespec_to_ktime(const struct timespec ts)
+{
+ return (ktime_t) { .tv = { .sec = (s32)ts.tv_sec,
+ .nsec = (s32)ts.tv_nsec } };
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) ktime_t timeval_to_ktime(const struct timeval tv)
+{
+ return (ktime_t) { .tv = { .sec = (s32)tv.tv_sec,
+ .nsec = (s32)tv.tv_usec * 1000 } };
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) struct timespec ktime_to_timespec(const ktime_t kt)
+{
+ return (struct timespec) { .tv_sec = (time_t) kt.tv.sec,
+ .tv_nsec = (long) kt.tv.nsec };
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) struct timeval ktime_to_timeval(const ktime_t kt)
+{
+ return (struct timeval) {
+ .tv_sec = (time_t) kt.tv.sec,
+ .tv_usec = (suseconds_t) (kt.tv.nsec / 1000L) };
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) s64 ktime_to_ns(const ktime_t kt)
+{
+ return (s64) kt.tv.sec * 1000000000L + kt.tv.nsec;
+}
+# 287 "include/linux/ktime.h"
+static inline __attribute__((always_inline)) int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
+{
+ return cmp1.tv64 == cmp2.tv64;
+}
+
+static inline __attribute__((always_inline)) s64 ktime_to_us(const ktime_t kt)
+{
+ struct timeval tv = ktime_to_timeval(kt);
+ return (s64) tv.tv_sec * 1000000L + tv.tv_usec;
+}
+
+static inline __attribute__((always_inline)) s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
+{
+ return ktime_to_us(ktime_sub(later, earlier));
+}
+
+static inline __attribute__((always_inline)) ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
+{
+ return ktime_add_ns(kt, usec * 1000);
+}
+
+static inline __attribute__((always_inline)) ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
+{
+ return ktime_sub_ns(kt, usec * 1000);
+}
+
+extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
+# 325 "include/linux/ktime.h"
+extern void ktime_get_ts(struct timespec *ts);
+
+
+
+
+static inline __attribute__((always_inline)) ktime_t ns_to_ktime(u64 ns)
+{
+ static const ktime_t ktime_zero = { .tv64 = 0 };
+ return ktime_add_ns(ktime_zero, ns);
+}
+# 6 "include/linux/timer.h" 2
+
+# 1 "include/linux/debugobjects.h" 1
+
+
+
+
+
+
+enum debug_obj_state {
+ ODEBUG_STATE_NONE,
+ ODEBUG_STATE_INIT,
+ ODEBUG_STATE_INACTIVE,
+ ODEBUG_STATE_ACTIVE,
+ ODEBUG_STATE_DESTROYED,
+ ODEBUG_STATE_NOTAVAILABLE,
+ ODEBUG_STATE_MAX,
+};
+
+struct debug_obj_descr;
+# 26 "include/linux/debugobjects.h"
+struct debug_obj {
+ struct hlist_node node;
+ enum debug_obj_state state;
+ void *object;
+ struct debug_obj_descr *descr;
+};
+# 45 "include/linux/debugobjects.h"
+struct debug_obj_descr {
+ const char *name;
+
+ int (*fixup_init) (void *addr, enum debug_obj_state state);
+ int (*fixup_activate) (void *addr, enum debug_obj_state state);
+ int (*fixup_destroy) (void *addr, enum debug_obj_state state);
+ int (*fixup_free) (void *addr, enum debug_obj_state state);
+};
+
+
+extern void debug_object_init (void *addr, struct debug_obj_descr *descr);
+extern void
+debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr);
+extern void debug_object_activate (void *addr, struct debug_obj_descr *descr);
+extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
+extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr);
+extern void debug_object_free (void *addr, struct debug_obj_descr *descr);
+
+extern void debug_objects_early_init(void);
+extern void debug_objects_mem_init(void);
+# 86 "include/linux/debugobjects.h"
+static inline __attribute__((always_inline)) void
+debug_check_no_obj_freed(const void *address, unsigned long size) { }
+# 8 "include/linux/timer.h" 2
+
+
+struct tvec_base;
+
+struct timer_list {
+ struct list_head entry;
+ unsigned long expires;
+
+ void (*function)(unsigned long);
+ unsigned long data;
+
+ struct tvec_base *base;
+
+ void *start_site;
+ char start_comm[16];
+ int start_pid;
+
+
+ struct lockdep_map lockdep_map;
+
+};
+
+extern struct tvec_base boot_tvec_bases;
+# 59 "include/linux/timer.h"
+void init_timer_key(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key);
+void init_timer_deferrable_key(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key);
+# 111 "include/linux/timer.h"
+extern void init_timer_on_stack_key(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key);
+extern void destroy_timer_on_stack(struct timer_list *timer);
+# 125 "include/linux/timer.h"
+static inline __attribute__((always_inline)) void setup_timer_key(struct timer_list * timer,
+ const char *name,
+ struct lock_class_key *key,
+ void (*function)(unsigned long),
+ unsigned long data)
+{
+ timer->function = function;
+ timer->data = data;
+ init_timer_key(timer, name, key);
+}
+
+static inline __attribute__((always_inline)) void setup_timer_on_stack_key(struct timer_list *timer,
+ const char *name,
+ struct lock_class_key *key,
+ void (*function)(unsigned long),
+ unsigned long data)
+{
+ timer->function = function;
+ timer->data = data;
+ init_timer_on_stack_key(timer, name, key);
+}
+# 157 "include/linux/timer.h"
+static inline __attribute__((always_inline)) int timer_pending(const struct timer_list * timer)
+{
+ return timer->entry.next != ((void *)0);
+}
+
+extern void add_timer_on(struct timer_list *timer, int cpu);
+extern int del_timer(struct timer_list * timer);
+extern int mod_timer(struct timer_list *timer, unsigned long expires);
+extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
+extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
+# 181 "include/linux/timer.h"
+extern unsigned long get_next_timer_interrupt(unsigned long now);
+
+
+
+
+
+
+extern int timer_stats_active;
+
+
+
+extern void init_timer_stats(void);
+
+extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
+ void *timerf, char *comm,
+ unsigned int timer_flag);
+
+extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
+ void *addr);
+
+static inline __attribute__((always_inline)) void timer_stats_timer_set_start_info(struct timer_list *timer)
+{
+ if (__builtin_expect(!!(!timer_stats_active), 1))
+ return;
+ __timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
+}
+
+static inline __attribute__((always_inline)) void timer_stats_timer_clear_start_info(struct timer_list *timer)
+{
+ timer->start_site = ((void *)0);
+}
+# 226 "include/linux/timer.h"
+extern void add_timer(struct timer_list *timer);
+# 238 "include/linux/timer.h"
+extern void init_timers(void);
+extern void run_local_timers(void);
+struct hrtimer;
+extern enum hrtimer_restart it_real_fn(struct hrtimer *);
+
+unsigned long __round_jiffies(unsigned long j, int cpu);
+unsigned long __round_jiffies_relative(unsigned long j, int cpu);
+unsigned long round_jiffies(unsigned long j);
+unsigned long round_jiffies_relative(unsigned long j);
+
+unsigned long __round_jiffies_up(unsigned long j, int cpu);
+unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
+unsigned long round_jiffies_up(unsigned long j);
+unsigned long round_jiffies_up_relative(unsigned long j);
+# 9 "include/linux/workqueue.h" 2
+
+
+
+
+
+struct workqueue_struct;
+
+struct work_struct;
+typedef void (*work_func_t)(struct work_struct *work);
+
+
+
+
+
+
+
+struct work_struct {
+ atomic_long_t data;
+
+
+
+ struct list_head entry;
+ work_func_t func;
+
+ struct lockdep_map lockdep_map;
+
+};
+
+
+
+struct delayed_work {
+ struct work_struct work;
+ struct timer_list timer;
+};
+
+static inline __attribute__((always_inline)) struct delayed_work *to_delayed_work(struct work_struct *work)
+{
+ return ({ const typeof( ((struct delayed_work *)0)->work ) *__mptr = (work); (struct delayed_work *)( (char *)__mptr - __builtin_offsetof(struct delayed_work,work) );});
+}
+
+struct execute_work {
+ struct work_struct work;
+};
+# 167 "include/linux/workqueue.h"
+extern struct workqueue_struct *
+__create_workqueue_key(const char *name, int singlethread,
+ int freezeable, int rt, struct lock_class_key *key,
+ const char *lock_name);
+# 198 "include/linux/workqueue.h"
+extern void destroy_workqueue(struct workqueue_struct *wq);
+
+extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
+extern int queue_work_on(int cpu, struct workqueue_struct *wq,
+ struct work_struct *work);
+extern int queue_delayed_work(struct workqueue_struct *wq,
+ struct delayed_work *work, unsigned long delay);
+extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+ struct delayed_work *work, unsigned long delay);
+
+extern void flush_workqueue(struct workqueue_struct *wq);
+extern void flush_scheduled_work(void);
+extern void flush_delayed_work(struct delayed_work *work);
+
+extern int schedule_work(struct work_struct *work);
+extern int schedule_work_on(int cpu, struct work_struct *work);
+extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
+extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
+ unsigned long delay);
+extern int schedule_on_each_cpu(work_func_t func);
+extern int current_is_keventd(void);
+extern int keventd_up(void);
+
+extern void init_workqueues(void);
+int execute_in_process_context(work_func_t fn, struct execute_work *);
+
+extern int flush_work(struct work_struct *work);
+
+extern int cancel_work_sync(struct work_struct *work);
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int cancel_delayed_work(struct delayed_work *work)
+{
+ int ret;
+
+ ret = del_timer(&work->timer);
+ if (ret)
+ clear_bit(0, ((unsigned long *)(&(&work->work)->data)));
+ return ret;
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int __cancel_delayed_work(struct delayed_work *work)
+{
+ int ret;
+
+ ret = del_timer(&work->timer);
+ if (ret)
+ clear_bit(0, ((unsigned long *)(&(&work->work)->data)));
+ return ret;
+}
+
+extern int cancel_delayed_work_sync(struct delayed_work *work);
+
+
+static inline __attribute__((always_inline))
+void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
+ struct delayed_work *work)
+{
+ cancel_delayed_work_sync(work);
+}
+
+
+static inline __attribute__((always_inline))
+void cancel_rearming_delayed_work(struct delayed_work *work)
+{
+ cancel_delayed_work_sync(work);
+}
+
+
+static inline __attribute__((always_inline)) long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
+{
+ return fn(arg);
+}
+# 12 "include/linux/slub_def.h" 2
+
+# 1 "include/linux/kmemtrace.h" 1
+# 12 "include/linux/kmemtrace.h"
+# 1 "include/trace/events/kmem.h" 1
+
+
+
+
+
+
+
+# 1 "include/linux/tracepoint.h" 1
+# 9 "include/trace/events/kmem.h" 2
+# 47 "include/trace/events/kmem.h"
+extern struct tracepoint __tracepoint_kmalloc; static inline __attribute__((always_inline)) void trace_kmalloc(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags) { if (__builtin_expect(!!(__tracepoint_kmalloc.state), 0)) do { void **it_func; rcu_read_lock_sched_notrace(); it_func = ({ typeof((&__tracepoint_kmalloc)->funcs) _________p1 = (*(volatile typeof((&__tracepoint_kmalloc)->funcs) *)&((&__tracepoint_kmalloc)->funcs)); do { } while(0); (_________p1); }); if (it_func) { do { ((void(*)(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags))(*it_func))(call_site, ptr, bytes_req, bytes_alloc, gfp_flags); } while (*(++it_func)); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int register_trace_kmalloc(void (*probe)(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags)) { return tracepoint_probe_register("kmalloc", (void *)probe); } static inline __attribute__((always_inline)) int unregister_trace_kmalloc(void (*probe)(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags)) { return tracepoint_probe_unregister("kmalloc", (void *)probe); };
+# 81 "include/trace/events/kmem.h"
+extern struct tracepoint __tracepoint_kmem_cache_alloc; static inline __attribute__((always_inline)) void trace_kmem_cache_alloc(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags) { if (__builtin_expect(!!(__tracepoint_kmem_cache_alloc.state), 0)) do { void **it_func; rcu_read_lock_sched_notrace(); it_func = ({ typeof((&__tracepoint_kmem_cache_alloc)->funcs) _________p1 = (*(volatile typeof((&__tracepoint_kmem_cache_alloc)->funcs) *)&((&__tracepoint_kmem_cache_alloc)->funcs)); do { } while(0); (_________p1); }); if (it_func) { do { ((void(*)(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags))(*it_func))(call_site, ptr, bytes_req, bytes_alloc, gfp_flags); } while (*(++it_func)); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int register_trace_kmem_cache_alloc(void (*probe)(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags)) { return tracepoint_probe_register("kmem_cache_alloc", (void *)probe); } static inline __attribute__((always_inline)) int unregister_trace_kmem_cache_alloc(void (*probe)(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags)) { return tracepoint_probe_unregister("kmem_cache_alloc", (void *)probe); };
+# 115 "include/trace/events/kmem.h"
+extern struct tracepoint __tracepoint_kmalloc_node; static inline __attribute__((always_inline)) void trace_kmalloc_node(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node) { if (__builtin_expect(!!(__tracepoint_kmalloc_node.state), 0)) do { void **it_func; rcu_read_lock_sched_notrace(); it_func = ({ typeof((&__tracepoint_kmalloc_node)->funcs) _________p1 = (*(volatile typeof((&__tracepoint_kmalloc_node)->funcs) *)&((&__tracepoint_kmalloc_node)->funcs)); do { } while(0); (_________p1); }); if (it_func) { do { ((void(*)(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node))(*it_func))(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node); } while (*(++it_func)); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int register_trace_kmalloc_node(void (*probe)(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node)) { return tracepoint_probe_register("kmalloc_node", (void *)probe); } static inline __attribute__((always_inline)) int unregister_trace_kmalloc_node(void (*probe)(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node)) { return tracepoint_probe_unregister("kmalloc_node", (void *)probe); };
+# 153 "include/trace/events/kmem.h"
+extern struct tracepoint __tracepoint_kmem_cache_alloc_node; static inline __attribute__((always_inline)) void trace_kmem_cache_alloc_node(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node) { if (__builtin_expect(!!(__tracepoint_kmem_cache_alloc_node.state), 0)) do { void **it_func; rcu_read_lock_sched_notrace(); it_func = ({ typeof((&__tracepoint_kmem_cache_alloc_node)->funcs) _________p1 = (*(volatile typeof((&__tracepoint_kmem_cache_alloc_node)->funcs) *)&((&__tracepoint_kmem_cache_alloc_node)->funcs)); do { } while(0); (_________p1); }); if (it_func) { do { ((void(*)(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node))(*it_func))(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node); } while (*(++it_func)); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int register_trace_kmem_cache_alloc_node(void (*probe)(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node)) { return tracepoint_probe_register("kmem_cache_alloc_node", (void *)probe); } static inline __attribute__((always_inline)) int unregister_trace_kmem_cache_alloc_node(void (*probe)(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node)) { return tracepoint_probe_unregister("kmem_cache_alloc_node", (void *)probe); };
+# 191 "include/trace/events/kmem.h"
+extern struct tracepoint __tracepoint_kfree; static inline __attribute__((always_inline)) void trace_kfree(unsigned long call_site, const void *ptr) { if (__builtin_expect(!!(__tracepoint_kfree.state), 0)) do { void **it_func; rcu_read_lock_sched_notrace(); it_func = ({ typeof((&__tracepoint_kfree)->funcs) _________p1 = (*(volatile typeof((&__tracepoint_kfree)->funcs) *)&((&__tracepoint_kfree)->funcs)); do { } while(0); (_________p1); }); if (it_func) { do { ((void(*)(unsigned long call_site, const void *ptr))(*it_func))(call_site, ptr); } while (*(++it_func)); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int register_trace_kfree(void (*probe)(unsigned long call_site, const void *ptr)) { return tracepoint_probe_register("kfree", (void *)probe); } static inline __attribute__((always_inline)) int unregister_trace_kfree(void (*probe)(unsigned long call_site, const void *ptr)) { return tracepoint_probe_unregister("kfree", (void *)probe); };
+# 210 "include/trace/events/kmem.h"
+extern struct tracepoint __tracepoint_kmem_cache_free; static inline __attribute__((always_inline)) void trace_kmem_cache_free(unsigned long call_site, const void *ptr) { if (__builtin_expect(!!(__tracepoint_kmem_cache_free.state), 0)) do { void **it_func; rcu_read_lock_sched_notrace(); it_func = ({ typeof((&__tracepoint_kmem_cache_free)->funcs) _________p1 = (*(volatile typeof((&__tracepoint_kmem_cache_free)->funcs) *)&((&__tracepoint_kmem_cache_free)->funcs)); do { } while(0); (_________p1); }); if (it_func) { do { ((void(*)(unsigned long call_site, const void *ptr))(*it_func))(call_site, ptr); } while (*(++it_func)); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int register_trace_kmem_cache_free(void (*probe)(unsigned long call_site, const void *ptr)) { return tracepoint_probe_register("kmem_cache_free", (void *)probe); } static inline __attribute__((always_inline)) int unregister_trace_kmem_cache_free(void (*probe)(unsigned long call_site, const void *ptr)) { return tracepoint_probe_unregister("kmem_cache_free", (void *)probe); };
+# 229 "include/trace/events/kmem.h"
+extern struct tracepoint __tracepoint_mm_page_free_direct; static inline __attribute__((always_inline)) void trace_mm_page_free_direct(struct page *page, unsigned int order) { if (__builtin_expect(!!(__tracepoint_mm_page_free_direct.state), 0)) do { void **it_func; rcu_read_lock_sched_notrace(); it_func = ({ typeof((&__tracepoint_mm_page_free_direct)->funcs) _________p1 = (*(volatile typeof((&__tracepoint_mm_page_free_direct)->funcs) *)&((&__tracepoint_mm_page_free_direct)->funcs)); do { } while(0); (_________p1); }); if (it_func) { do { ((void(*)(struct page *page, unsigned int order))(*it_func))(page, order); } while (*(++it_func)); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int register_trace_mm_page_free_direct(void (*probe)(struct page *page, unsigned int order)) { return tracepoint_probe_register("mm_page_free_direct", (void *)probe); } static inline __attribute__((always_inline)) int unregister_trace_mm_page_free_direct(void (*probe)(struct page *page, unsigned int order)) { return tracepoint_probe_unregister("mm_page_free_direct", (void *)probe); };
+# 251 "include/trace/events/kmem.h"
+extern struct tracepoint __tracepoint_mm_pagevec_free; static inline __attribute__((always_inline)) void trace_mm_pagevec_free(struct page *page, int cold) { if (__builtin_expect(!!(__tracepoint_mm_pagevec_free.state), 0)) do { void **it_func; rcu_read_lock_sched_notrace(); it_func = ({ typeof((&__tracepoint_mm_pagevec_free)->funcs) _________p1 = (*(volatile typeof((&__tracepoint_mm_pagevec_free)->funcs) *)&((&__tracepoint_mm_pagevec_free)->funcs)); do { } while(0); (_________p1); }); if (it_func) { do { ((void(*)(struct page *page, int cold))(*it_func))(page, cold); } while (*(++it_func)); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int register_trace_mm_pagevec_free(void (*probe)(struct page *page, int cold)) { return tracepoint_probe_register("mm_pagevec_free", (void *)probe); } static inline __attribute__((always_inline)) int unregister_trace_mm_pagevec_free(void (*probe)(struct page *page, int cold)) { return tracepoint_probe_unregister("mm_pagevec_free", (void *)probe); };
+# 273 "include/trace/events/kmem.h"
+extern struct tracepoint __tracepoint_mm_page_alloc; static inline __attribute__((always_inline)) void trace_mm_page_alloc(struct page *page, unsigned int order, gfp_t gfp_flags, int migratetype) { if (__builtin_expect(!!(__tracepoint_mm_page_alloc.state), 0)) do { void **it_func; rcu_read_lock_sched_notrace(); it_func = ({ typeof((&__tracepoint_mm_page_alloc)->funcs) _________p1 = (*(volatile typeof((&__tracepoint_mm_page_alloc)->funcs) *)&((&__tracepoint_mm_page_alloc)->funcs)); do { } while(0); (_________p1); }); if (it_func) { do { ((void(*)(struct page *page, unsigned int order, gfp_t gfp_flags, int migratetype))(*it_func))(page, order, gfp_flags, migratetype); } while (*(++it_func)); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int register_trace_mm_page_alloc(void (*probe)(struct page *page, unsigned int order, gfp_t gfp_flags, int migratetype)) { return tracepoint_probe_register("mm_page_alloc", (void *)probe); } static inline __attribute__((always_inline)) int unregister_trace_mm_page_alloc(void (*probe)(struct page *page, unsigned int order, gfp_t gfp_flags, int migratetype)) { return tracepoint_probe_unregister("mm_page_alloc", (void *)probe); };
+# 302 "include/trace/events/kmem.h"
+extern struct tracepoint __tracepoint_mm_page_alloc_zone_locked; static inline __attribute__((always_inline)) void trace_mm_page_alloc_zone_locked(struct page *page, unsigned int order, int migratetype) { if (__builtin_expect(!!(__tracepoint_mm_page_alloc_zone_locked.state), 0)) do { void **it_func; rcu_read_lock_sched_notrace(); it_func = ({ typeof((&__tracepoint_mm_page_alloc_zone_locked)->funcs) _________p1 = (*(volatile typeof((&__tracepoint_mm_page_alloc_zone_locked)->funcs) *)&((&__tracepoint_mm_page_alloc_zone_locked)->funcs)); do { } while(0); (_________p1); }); if (it_func) { do { ((void(*)(struct page *page, unsigned int order, int migratetype))(*it_func))(page, order, migratetype); } while (*(++it_func)); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int register_trace_mm_page_alloc_zone_locked(void (*probe)(struct page *page, unsigned int order, int migratetype)) { return tracepoint_probe_register("mm_page_alloc_zone_locked", (void *)probe); } static inline __attribute__((always_inline)) int unregister_trace_mm_page_alloc_zone_locked(void (*probe)(struct page *page, unsigned int order, int migratetype)) { return tracepoint_probe_unregister("mm_page_alloc_zone_locked", (void *)probe); };
+# 328 "include/trace/events/kmem.h"
+extern struct tracepoint __tracepoint_mm_page_pcpu_drain; static inline __attribute__((always_inline)) void trace_mm_page_pcpu_drain(struct page *page, int order, int migratetype) { if (__builtin_expect(!!(__tracepoint_mm_page_pcpu_drain.state), 0)) do { void **it_func; rcu_read_lock_sched_notrace(); it_func = ({ typeof((&__tracepoint_mm_page_pcpu_drain)->funcs) _________p1 = (*(volatile typeof((&__tracepoint_mm_page_pcpu_drain)->funcs) *)&((&__tracepoint_mm_page_pcpu_drain)->funcs)); do { } while(0); (_________p1); }); if (it_func) { do { ((void(*)(struct page *page, int order, int migratetype))(*it_func))(page, order, migratetype); } while (*(++it_func)); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int register_trace_mm_page_pcpu_drain(void (*probe)(struct page *page, int order, int migratetype)) { return tracepoint_probe_register("mm_page_pcpu_drain", (void *)probe); } static inline __attribute__((always_inline)) int unregister_trace_mm_page_pcpu_drain(void (*probe)(struct page *page, int order, int migratetype)) { return tracepoint_probe_unregister("mm_page_pcpu_drain", (void *)probe); };
+# 353 "include/trace/events/kmem.h"
+extern struct tracepoint __tracepoint_mm_page_alloc_extfrag; static inline __attribute__((always_inline)) void trace_mm_page_alloc_extfrag(struct page *page, int alloc_order, int fallback_order, int alloc_migratetype, int fallback_migratetype) { if (__builtin_expect(!!(__tracepoint_mm_page_alloc_extfrag.state), 0)) do { void **it_func; rcu_read_lock_sched_notrace(); it_func = ({ typeof((&__tracepoint_mm_page_alloc_extfrag)->funcs) _________p1 = (*(volatile typeof((&__tracepoint_mm_page_alloc_extfrag)->funcs) *)&((&__tracepoint_mm_page_alloc_extfrag)->funcs)); do { } while(0); (_________p1); }); if (it_func) { do { ((void(*)(struct page *page, int alloc_order, int fallback_order, int alloc_migratetype, int fallback_migratetype))(*it_func))(page, alloc_order, fallback_order, alloc_migratetype, fallback_migratetype); } while (*(++it_func)); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int register_trace_mm_page_alloc_extfrag(void (*probe)(struct page *page, int alloc_order, int fallback_order, int alloc_migratetype, int fallback_migratetype)) { return tracepoint_probe_register("mm_page_alloc_extfrag", (void *)probe); } static inline __attribute__((always_inline)) int unregister_trace_mm_page_alloc_extfrag(void (*probe)(struct page *page, int alloc_order, int fallback_order, int alloc_migratetype, int fallback_migratetype)) { return tracepoint_probe_unregister("mm_page_alloc_extfrag", (void *)probe); };
+# 394 "include/trace/events/kmem.h"
+# 1 "include/trace/define_trace.h" 1
+# 394 "include/trace/events/kmem.h" 2
+# 13 "include/linux/kmemtrace.h" 2
+
+
+
+
+static inline __attribute__((always_inline)) void kmemtrace_init(void)
+{
+}
+# 14 "include/linux/slub_def.h" 2
+# 1 "include/linux/kmemleak.h" 1
+# 60 "include/linux/kmemleak.h"
+static inline __attribute__((always_inline)) void kmemleak_init(void)
+{
+}
+static inline __attribute__((always_inline)) void kmemleak_alloc(const void *ptr, size_t size, int min_count,
+ gfp_t gfp)
+{
+}
+static inline __attribute__((always_inline)) void kmemleak_alloc_recursive(const void *ptr, size_t size,
+ int min_count, unsigned long flags,
+ gfp_t gfp)
+{
+}
+static inline __attribute__((always_inline)) void kmemleak_free(const void *ptr)
+{
+}
+static inline __attribute__((always_inline)) void kmemleak_free_part(const void *ptr, size_t size)
+{
+}
+static inline __attribute__((always_inline)) void kmemleak_free_recursive(const void *ptr, unsigned long flags)
+{
+}
+static inline __attribute__((always_inline)) void kmemleak_not_leak(const void *ptr)
+{
+}
+static inline __attribute__((always_inline)) void kmemleak_ignore(const void *ptr)
+{
+}
+static inline __attribute__((always_inline)) void kmemleak_scan_area(const void *ptr, unsigned long offset,
+ size_t length, gfp_t gfp)
+{
+}
+static inline __attribute__((always_inline)) void kmemleak_erase(void **ptr)
+{
+}
+static inline __attribute__((always_inline)) void kmemleak_no_scan(const void *ptr)
+{
+}
+# 15 "include/linux/slub_def.h" 2
+
+enum stat_item {
+ ALLOC_FASTPATH,
+ ALLOC_SLOWPATH,
+ FREE_FASTPATH,
+ FREE_SLOWPATH,
+ FREE_FROZEN,
+ FREE_ADD_PARTIAL,
+ FREE_REMOVE_PARTIAL,
+ ALLOC_FROM_PARTIAL,
+ ALLOC_SLAB,
+ ALLOC_REFILL,
+ FREE_SLAB,
+ CPUSLAB_FLUSH,
+ DEACTIVATE_FULL,
+ DEACTIVATE_EMPTY,
+ DEACTIVATE_TO_HEAD,
+ DEACTIVATE_TO_TAIL,
+ DEACTIVATE_REMOTE_FREES,
+ ORDER_FALLBACK,
+ NR_SLUB_STAT_ITEMS };
+
+struct kmem_cache_cpu {
+ void **freelist;
+ struct page *page;
+ int node;
+ unsigned int offset;
+ unsigned int objsize;
+
+ unsigned stat[NR_SLUB_STAT_ITEMS];
+
+};
+
+struct kmem_cache_node {
+ spinlock_t list_lock;
+ unsigned long nr_partial;
+ struct list_head partial;
+
+ atomic_long_t nr_slabs;
+ atomic_long_t total_objects;
+ struct list_head full;
+
+};
+
+
+
+
+
+
+struct kmem_cache_order_objects {
+ unsigned long x;
+};
+
+
+
+
+struct kmem_cache {
+
+ unsigned long flags;
+ int size;
+ int objsize;
+ int offset;
+ struct kmem_cache_order_objects oo;
+
+
+
+
+
+ struct kmem_cache_node local_node;
+
+
+ struct kmem_cache_order_objects max;
+ struct kmem_cache_order_objects min;
+ gfp_t allocflags;
+ int refcount;
+ void (*ctor)(void *);
+ int inuse;
+ int align;
+ unsigned long min_partial;
+ const char *name;
+ struct list_head list;
+
+ struct kobject kobj;
+# 110 "include/linux/slub_def.h"
+ struct kmem_cache_cpu cpu_slab;
+
+};
+# 142 "include/linux/slub_def.h"
+extern struct kmem_cache kmalloc_caches[(12 + 2)];
+
+
+
+
+
+static inline __attribute__((always_inline)) __attribute__((always_inline)) int kmalloc_index(size_t size)
+{
+ if (!size)
+ return 0;
+
+ if (size <= 8)
+ return ( __builtin_constant_p(8) ? ( (8) < 1 ? ____ilog2_NaN() : (8) & (1ULL << 63) ? 63 : (8) & (1ULL << 62) ? 62 : (8) & (1ULL << 61) ? 61 : (8) & (1ULL << 60) ? 60 : (8) & (1ULL << 59) ? 59 : (8) & (1ULL << 58) ? 58 : (8) & (1ULL << 57) ? 57 : (8) & (1ULL << 56) ? 56 : (8) & (1ULL << 55) ? 55 : (8) & (1ULL << 54) ? 54 : (8) & (1ULL << 53) ? 53 : (8) & (1ULL << 52) ? 52 : (8) & (1ULL << 51) ? 51 : (8) & (1ULL << 50) ? 50 : (8) & (1ULL << 49) ? 49 : (8) & (1ULL << 48) ? 48 : (8) & (1ULL << 47) ? 47 : (8) & (1ULL << 46) ? 46 : (8) & (1ULL << 45) ? 45 : (8) & (1ULL << 44) ? 44 : (8) & (1ULL << 43) ? 43 : (8) & (1ULL << 42) ? 42 : (8) & (1ULL << 41) ? 41 : (8) & (1ULL << 40) ? 40 : (8) & (1ULL << 39) ? 39 : (8) & (1ULL << 38) ? 38 : (8) & (1ULL << 37) ? 37 : (8) & (1ULL << 36) ? 36 : (8) & (1ULL << 35) ? 35 : (8) & (1ULL << 34) ? 34 : (8) & (1ULL << 33) ? 33 : (8) & (1ULL << 32) ? 32 : (8) & (1ULL << 31) ? 31 : (8) & (1ULL << 30) ? 30 : (8) & (1ULL << 29) ? 29 : (8) & (1ULL << 28) ? 28 : (8) & (1ULL << 27) ? 27 : (8) & (1ULL << 26) ? 26 : (8) & (1ULL << 25) ? 25 : (8) & (1ULL << 24) ? 24 : (8) & (1ULL << 23) ? 23 : (8) & (1ULL << 22) ? 22 : (8) & (1ULL << 21) ? 21 : (8) & (1ULL << 20) ? 20 : (8) & (1ULL << 19) ? 19 : (8) & (1ULL << 18) ? 18 : (8) & (1ULL << 17) ? 17 : (8) & (1ULL << 16) ? 16 : (8) & (1ULL << 15) ? 15 : (8) & (1ULL << 14) ? 14 : (8) & (1ULL << 13) ? 13 : (8) & (1ULL << 12) ? 12 : (8) & (1ULL << 11) ? 11 : (8) & (1ULL << 10) ? 10 : (8) & (1ULL << 9) ? 9 : (8) & (1ULL << 8) ? 8 : (8) & (1ULL << 7) ? 7 : (8) & (1ULL << 6) ? 6 : (8) & (1ULL << 5) ? 5 : (8) & (1ULL << 4) ? 4 : (8) & (1ULL << 3) ? 3 : (8) & (1ULL << 2) ? 2 : (8) & (1ULL << 1) ? 1 : (8) & (1ULL << 0) ? 0 : ____ilog2_NaN() ) : (sizeof(8) <= 4) ? __ilog2_u32(8) : __ilog2_u64(8) );
+
+ if (8 <= 32 && size > 64 && size <= 96)
+ return 1;
+ if (8 <= 64 && size > 128 && size <= 192)
+ return 2;
+ if (size <= 8) return 3;
+ if (size <= 16) return 4;
+ if (size <= 32) return 5;
+ if (size <= 64) return 6;
+ if (size <= 128) return 7;
+ if (size <= 256) return 8;
+ if (size <= 512) return 9;
+ if (size <= 1024) return 10;
+ if (size <= 2 * 1024) return 11;
+ if (size <= 4 * 1024) return 12;
+
+
+
+
+ if (size <= 8 * 1024) return 13;
+ if (size <= 16 * 1024) return 14;
+ if (size <= 32 * 1024) return 15;
+ if (size <= 64 * 1024) return 16;
+ if (size <= 128 * 1024) return 17;
+ if (size <= 256 * 1024) return 18;
+ if (size <= 512 * 1024) return 19;
+ if (size <= 1024 * 1024) return 20;
+ if (size <= 2 * 1024 * 1024) return 21;
+ return -1;
+# 192 "include/linux/slub_def.h"
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) __attribute__((always_inline)) struct kmem_cache *kmalloc_slab(size_t size)
+{
+ int index = kmalloc_index(size);
+
+ if (index == 0)
+ return ((void *)0);
+
+ return &kmalloc_caches[index];
+}
+# 217 "include/linux/slub_def.h"
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
+void *__kmalloc(size_t size, gfp_t flags);
+
+
+
+
+static inline __attribute__((always_inline)) __attribute__((always_inline)) void *
+kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
+{
+ return kmem_cache_alloc(s, gfpflags);
+}
+
+
+static inline __attribute__((always_inline)) __attribute__((always_inline)) void *kmalloc_large(size_t size, gfp_t flags)
+{
+ unsigned int order = get_order(size);
+ void *ret = (void *) __get_free_pages(flags | (( gfp_t)0x4000u), order);
+
+ kmemleak_alloc(ret, size, 1, flags);
+ trace_kmalloc(({ __label__ __here; __here: (unsigned long)&&__here; }), ret, size, (1UL << 12) << order, flags);
+
+ return ret;
+}
+
+static inline __attribute__((always_inline)) __attribute__((always_inline)) void *kmalloc(size_t size, gfp_t flags)
+{
+ void *ret;
+
+ if (__builtin_constant_p(size)) {
+ if (size > (2 * (1UL << 12)))
+ return kmalloc_large(size, flags);
+
+ if (!(flags & (( gfp_t)0x01u))) {
+ struct kmem_cache *s = kmalloc_slab(size);
+
+ if (!s)
+ return ((void *)16);
+
+ ret = kmem_cache_alloc_notrace(s, flags);
+
+ trace_kmalloc(({ __label__ __here; __here: (unsigned long)&&__here; }), ret, size, s->size, flags);
+
+ return ret;
+ }
+ }
+ return __kmalloc(size, flags);
+}
+# 163 "include/linux/slab.h" 2
+# 220 "include/linux/slab.h"
+static inline __attribute__((always_inline)) void *kcalloc(size_t n, size_t size, gfp_t flags)
+{
+ if (size != 0 && n > (~0UL) / size)
+ return ((void *)0);
+ return __kmalloc(n * size, flags | (( gfp_t)0x8000u));
+}
+# 238 "include/linux/slab.h"
+static inline __attribute__((always_inline)) void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ return kmalloc(size, flags);
+}
+
+static inline __attribute__((always_inline)) void *__kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ return __kmalloc(size, flags);
+}
+
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
+
+static inline __attribute__((always_inline)) void *kmem_cache_alloc_node(struct kmem_cache *cachep,
+ gfp_t flags, int node)
+{
+ return kmem_cache_alloc(cachep, flags);
+}
+# 266 "include/linux/slab.h"
+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
+# 303 "include/linux/slab.h"
+static inline __attribute__((always_inline)) void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
+{
+ return kmem_cache_alloc(k, flags | (( gfp_t)0x8000u));
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void *kzalloc(size_t size, gfp_t flags)
+{
+ return kmalloc(size, flags | (( gfp_t)0x8000u));
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void *kzalloc_node(size_t size, gfp_t flags, int node)
+{
+ return kmalloc_node(size, flags | (( gfp_t)0x8000u), node);
+}
+
+void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) kmem_cache_init_late(void);
+# 6 "include/linux/percpu.h" 2
+
+
+# 1 "include/linux/pfn.h" 1
+# 9 "include/linux/percpu.h" 2
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/percpu.h" 1
+# 1 "include/asm-generic/percpu.h" 1
+
+
+
+
+
+# 1 "include/linux/percpu-defs.h" 1
+# 7 "include/asm-generic/percpu.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/percpu.h" 2
+# 11 "include/linux/percpu.h" 2
+# 166 "include/linux/percpu.h"
+static inline __attribute__((always_inline)) void *__alloc_percpu(size_t size, size_t align)
+{
+
+
+
+
+
+ ({ static int __warned; int __ret_warn_once = !!(align > (1 << 5)); if (__builtin_expect(!!(__ret_warn_once), 0)) if (({ int __ret_warn_on = !!(!__warned); if (__builtin_expect(!!(__ret_warn_on), 0)) asm volatile( "1: .hword %0\n" " .section __bug_table,\"a\",@progbits\n" "2: .long 1b\n" " .long %1\n" " .short %2\n" " .short %3\n" " .org 2b + %4\n" " .previous" : : "i"(0xefcd), "i"("include/linux/percpu.h"), "i"(173), "i"((1<<0)), "i"(sizeof(struct bug_entry))); __builtin_expect(!!(__ret_warn_on), 0); })) __warned = 1; __builtin_expect(!!(__ret_warn_once), 0); });
+ return kzalloc(size, ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u)));
+}
+
+static inline __attribute__((always_inline)) void free_percpu(void *p)
+{
+ kfree(p);
+}
+
+static inline __attribute__((always_inline)) void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) setup_per_cpu_areas(void) { }
+
+static inline __attribute__((always_inline)) void *pcpu_lpage_remapped(void *kaddr)
+{
+ return ((void *)0);
+}
+# 5 "include/asm-generic/local.h" 2
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" 1
+# 7 "include/asm-generic/local.h" 2
+# 21 "include/asm-generic/local.h"
+typedef struct
+{
+ atomic_long_t a;
+} local_t;
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/local.h" 2
+# 21 "include/linux/module.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/module.h" 1
+# 16 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/module.h"
+struct mod_arch_specific {
+ Elf32_Shdr *text_l1;
+ Elf32_Shdr *data_a_l1;
+ Elf32_Shdr *bss_a_l1;
+ Elf32_Shdr *data_b_l1;
+ Elf32_Shdr *bss_b_l1;
+ Elf32_Shdr *text_l2;
+ Elf32_Shdr *data_l2;
+ Elf32_Shdr *bss_l2;
+};
+# 22 "include/linux/module.h" 2
+
+# 1 "include/trace/events/module.h" 1
+
+
+
+
+
+
+# 1 "include/linux/tracepoint.h" 1
+# 8 "include/trace/events/module.h" 2
+
+
+
+struct module;
+
+
+
+
+
+
+extern struct tracepoint __tracepoint_module_load; static inline __attribute__((always_inline)) void trace_module_load(struct module *mod) { if (__builtin_expect(!!(__tracepoint_module_load.state), 0)) do { void **it_func; rcu_read_lock_sched_notrace(); it_func = ({ typeof((&__tracepoint_module_load)->funcs) _________p1 = (*(volatile typeof((&__tracepoint_module_load)->funcs) *)&((&__tracepoint_module_load)->funcs)); do { } while(0); (_________p1); }); if (it_func) { do { ((void(*)(struct module *mod))(*it_func))(mod); } while (*(++it_func)); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int register_trace_module_load(void (*probe)(struct module *mod)) { return tracepoint_probe_register("module_load", (void *)probe); } static inline __attribute__((always_inline)) int unregister_trace_module_load(void (*probe)(struct module *mod)) { return tracepoint_probe_unregister("module_load", (void *)probe); };
+# 37 "include/trace/events/module.h"
+extern struct tracepoint __tracepoint_module_free; static inline __attribute__((always_inline)) void trace_module_free(struct module *mod) { if (__builtin_expect(!!(__tracepoint_module_free.state), 0)) do { void **it_func; rcu_read_lock_sched_notrace(); it_func = ({ typeof((&__tracepoint_module_free)->funcs) _________p1 = (*(volatile typeof((&__tracepoint_module_free)->funcs) *)&((&__tracepoint_module_free)->funcs)); do { } while(0); (_________p1); }); if (it_func) { do { ((void(*)(struct module *mod))(*it_func))(mod); } while (*(++it_func)); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int register_trace_module_free(void (*probe)(struct module *mod)) { return tracepoint_probe_register("module_free", (void *)probe); } static inline __attribute__((always_inline)) int unregister_trace_module_free(void (*probe)(struct module *mod)) { return tracepoint_probe_unregister("module_free", (void *)probe); };
+# 54 "include/trace/events/module.h"
+extern struct tracepoint __tracepoint_module_get; static inline __attribute__((always_inline)) void trace_module_get(struct module *mod, unsigned long ip, int refcnt) { if (__builtin_expect(!!(__tracepoint_module_get.state), 0)) do { void **it_func; rcu_read_lock_sched_notrace(); it_func = ({ typeof((&__tracepoint_module_get)->funcs) _________p1 = (*(volatile typeof((&__tracepoint_module_get)->funcs) *)&((&__tracepoint_module_get)->funcs)); do { } while(0); (_________p1); }); if (it_func) { do { ((void(*)(struct module *mod, unsigned long ip, int refcnt))(*it_func))(mod, ip, refcnt); } while (*(++it_func)); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int register_trace_module_get(void (*probe)(struct module *mod, unsigned long ip, int refcnt)) { return tracepoint_probe_register("module_get", (void *)probe); } static inline __attribute__((always_inline)) int unregister_trace_module_get(void (*probe)(struct module *mod, unsigned long ip, int refcnt)) { return tracepoint_probe_unregister("module_get", (void *)probe); };
+# 76 "include/trace/events/module.h"
+extern struct tracepoint __tracepoint_module_put; static inline __attribute__((always_inline)) void trace_module_put(struct module *mod, unsigned long ip, int refcnt) { if (__builtin_expect(!!(__tracepoint_module_put.state), 0)) do { void **it_func; rcu_read_lock_sched_notrace(); it_func = ({ typeof((&__tracepoint_module_put)->funcs) _________p1 = (*(volatile typeof((&__tracepoint_module_put)->funcs) *)&((&__tracepoint_module_put)->funcs)); do { } while(0); (_________p1); }); if (it_func) { do { ((void(*)(struct module *mod, unsigned long ip, int refcnt))(*it_func))(mod, ip, refcnt); } while (*(++it_func)); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int register_trace_module_put(void (*probe)(struct module *mod, unsigned long ip, int refcnt)) { return tracepoint_probe_register("module_put", (void *)probe); } static inline __attribute__((always_inline)) int unregister_trace_module_put(void (*probe)(struct module *mod, unsigned long ip, int refcnt)) { return tracepoint_probe_unregister("module_put", (void *)probe); };
+# 98 "include/trace/events/module.h"
+extern struct tracepoint __tracepoint_module_request; static inline __attribute__((always_inline)) void trace_module_request(char *name, bool wait, unsigned long ip) { if (__builtin_expect(!!(__tracepoint_module_request.state), 0)) do { void **it_func; rcu_read_lock_sched_notrace(); it_func = ({ typeof((&__tracepoint_module_request)->funcs) _________p1 = (*(volatile typeof((&__tracepoint_module_request)->funcs) *)&((&__tracepoint_module_request)->funcs)); do { } while(0); (_________p1); }); if (it_func) { do { ((void(*)(char *name, bool wait, unsigned long ip))(*it_func))(name, wait, ip); } while (*(++it_func)); } rcu_read_unlock_sched_notrace(); } while (0); } static inline __attribute__((always_inline)) int register_trace_module_request(void (*probe)(char *name, bool wait, unsigned long ip)) { return tracepoint_probe_register("module_request", (void *)probe); } static inline __attribute__((always_inline)) int unregister_trace_module_request(void (*probe)(char *name, bool wait, unsigned long ip)) { return tracepoint_probe_unregister("module_request", (void *)probe); };
+# 125 "include/trace/events/module.h"
+# 1 "include/trace/define_trace.h" 1
+# 126 "include/trace/events/module.h" 2
+# 24 "include/linux/module.h" 2
+# 35 "include/linux/module.h"
+struct kernel_symbol
+{
+ unsigned long value;
+ const char *name;
+};
+
+struct modversion_info
+{
+ unsigned long crc;
+ char name[(64 - sizeof(unsigned long))];
+};
+
+struct module;
+
+struct module_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct module_attribute *, struct module *, char *);
+ ssize_t (*store)(struct module_attribute *, struct module *,
+ const char *, size_t count);
+ void (*setup)(struct module *, const char *);
+ int (*test)(struct module *);
+ void (*free)(struct module *);
+};
+
+struct module_kobject
+{
+ struct kobject kobj;
+ struct module *mod;
+ struct kobject *drivers_dir;
+ struct module_param_attrs *mp;
+};
+
+
+extern int init_module(void);
+extern void cleanup_module(void);
+
+
+struct exception_table_entry;
+
+const struct exception_table_entry *
+search_extable(const struct exception_table_entry *first,
+ const struct exception_table_entry *last,
+ unsigned long value);
+void sort_extable(struct exception_table_entry *start,
+ struct exception_table_entry *finish);
+void sort_main_extable(void);
+void trim_init_extable(struct module *m);
+
+
+
+
+
+
+extern struct module __this_module;
+# 170 "include/linux/module.h"
+const struct exception_table_entry *search_exception_tables(unsigned long add);
+
+struct notifier_block;
+
+
+
+
+void *__symbol_get(const char *symbol);
+void *__symbol_get_gpl(const char *symbol);
+# 227 "include/linux/module.h"
+enum module_state
+{
+ MODULE_STATE_LIVE,
+ MODULE_STATE_COMING,
+ MODULE_STATE_GOING,
+};
+
+struct module
+{
+ enum module_state state;
+
+
+ struct list_head list;
+
+
+ char name[(64 - sizeof(unsigned long))];
+
+
+ struct module_kobject mkobj;
+ struct module_attribute *modinfo_attrs;
+ const char *version;
+ const char *srcversion;
+ struct kobject *holders_dir;
+
+
+ const struct kernel_symbol *syms;
+ const unsigned long *crcs;
+ unsigned int num_syms;
+
+
+ struct kernel_param *kp;
+ unsigned int num_kp;
+
+
+ unsigned int num_gpl_syms;
+ const struct kernel_symbol *gpl_syms;
+ const unsigned long *gpl_crcs;
+
+
+
+ const struct kernel_symbol *unused_syms;
+ const unsigned long *unused_crcs;
+ unsigned int num_unused_syms;
+
+
+ unsigned int num_unused_gpl_syms;
+ const struct kernel_symbol *unused_gpl_syms;
+ const unsigned long *unused_gpl_crcs;
+
+
+
+ const struct kernel_symbol *gpl_future_syms;
+ const unsigned long *gpl_future_crcs;
+ unsigned int num_gpl_future_syms;
+
+
+ unsigned int num_exentries;
+ struct exception_table_entry *extable;
+
+
+ int (*init)(void);
+
+
+ void *module_init;
+
+
+ void *module_core;
+
+
+ unsigned int init_size, core_size;
+
+
+ unsigned int init_text_size, core_text_size;
+
+
+ struct mod_arch_specific arch;
+
+ unsigned int taints;
+
+
+
+ unsigned num_bugs;
+ struct list_head bug_list;
+ struct bug_entry *bug_table;
+# 319 "include/linux/module.h"
+ Elf32_Sym *symtab, *core_symtab;
+ unsigned int num_symtab, core_num_syms;
+ char *strtab, *core_strtab;
+
+
+ struct module_sect_attrs *sect_attrs;
+
+
+ struct module_notes_attrs *notes_attrs;
+
+
+
+ void *percpu;
+
+
+
+ char *args;
+
+ struct tracepoint *tracepoints;
+ unsigned int num_tracepoints;
+
+
+
+ const char **trace_bprintk_fmt_start;
+ unsigned int num_trace_bprintk_fmt;
+
+
+ struct ftrace_event_call *trace_events;
+ unsigned int num_trace_events;
+# 356 "include/linux/module.h"
+ struct list_head modules_which_use_me;
+
+
+ struct task_struct *waiter;
+
+
+ void (*exit)(void);
+
+
+
+
+ local_t ref;
+
+
+
+
+
+ ctor_fn_t *ctors;
+ unsigned int num_ctors;
+
+};
+
+
+
+
+extern struct mutex module_mutex;
+
+
+
+
+static inline __attribute__((always_inline)) int module_is_live(struct module *mod)
+{
+ return mod->state != MODULE_STATE_GOING;
+}
+
+struct module *__module_text_address(unsigned long addr);
+struct module *__module_address(unsigned long addr);
+bool is_module_address(unsigned long addr);
+bool is_module_text_address(unsigned long addr);
+
+static inline __attribute__((always_inline)) int within_module_core(unsigned long addr, struct module *mod)
+{
+ return (unsigned long)mod->module_core <= addr &&
+ addr < (unsigned long)mod->module_core + mod->core_size;
+}
+
+static inline __attribute__((always_inline)) int within_module_init(unsigned long addr, struct module *mod)
+{
+ return (unsigned long)mod->module_init <= addr &&
+ addr < (unsigned long)mod->module_init + mod->init_size;
+}
+
+
+struct module *find_module(const char *name);
+
+struct symsearch {
+ const struct kernel_symbol *start, *stop;
+ const unsigned long *crcs;
+ enum {
+ NOT_GPL_ONLY,
+ GPL_ONLY,
+ WILL_BE_GPL_ONLY,
+ } licence;
+ bool unused;
+};
+
+
+const struct kernel_symbol *find_symbol(const char *name,
+ struct module **owner,
+ const unsigned long **crc,
+ bool gplok,
+ bool warn);
+
+
+bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
+ unsigned int symnum, void *data), void *data);
+
+
+
+int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ char *name, char *module_name, int *exported);
+
+
+unsigned long module_kallsyms_lookup_name(const char *name);
+
+int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ struct module *, unsigned long),
+ void *data);
+
+extern void __module_put_and_exit(struct module *mod, long code)
+ __attribute__((noreturn));
+
+
+
+unsigned int module_refcount(struct module *mod);
+void __symbol_put(const char *symbol);
+
+void symbol_put_addr(void *addr);
+
+static inline __attribute__((always_inline)) local_t *__module_ref_addr(struct module *mod, int cpu)
+{
+
+
+
+ return &mod->ref;
+
+}
+
+
+
+static inline __attribute__((always_inline)) void __module_get(struct module *module)
+{
+ if (module) {
+ unsigned int cpu = ({ do { } while (0); 0; });
+ atomic_long_inc(&(__module_ref_addr(module, cpu))->a);
+ trace_module_get(module, ({ __label__ __here; __here: (unsigned long)&&__here; }),
+ atomic_long_read(&(__module_ref_addr(module, cpu))->a));
+ do { } while (0);
+ }
+}
+
+static inline __attribute__((always_inline)) int try_module_get(struct module *module)
+{
+ int ret = 1;
+
+ if (module) {
+ unsigned int cpu = ({ do { } while (0); 0; });
+ if (__builtin_expect(!!(module_is_live(module)), 1)) {
+ atomic_long_inc(&(__module_ref_addr(module, cpu))->a);
+ trace_module_get(module, ({ __label__ __here; __here: (unsigned long)&&__here; }),
+ atomic_long_read(&(__module_ref_addr(module, cpu))->a));
+ }
+ else
+ ret = 0;
+ do { } while (0);
+ }
+ return ret;
+}
+
+extern void module_put(struct module *module);
+# 512 "include/linux/module.h"
+int use_module(struct module *a, struct module *b);
+# 524 "include/linux/module.h"
+const char *module_address_lookup(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset,
+ char **modname,
+ char *namebuf);
+int lookup_module_symbol_name(unsigned long addr, char *symname);
+int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
+
+
+const struct exception_table_entry *search_module_extables(unsigned long addr);
+
+int register_module_notifier(struct notifier_block * nb);
+int unregister_module_notifier(struct notifier_block * nb);
+
+extern void print_modules(void);
+
+extern void module_update_tracepoints(void);
+extern int module_get_iter_tracepoints(struct tracepoint_iter *iter);
+# 665 "include/linux/module.h"
+struct device_driver;
+
+struct module;
+
+extern struct kset *module_kset;
+extern struct kobj_type module_ktype;
+extern int module_sysfs_initialized;
+
+int mod_sysfs_init(struct module *mod);
+int mod_sysfs_setup(struct module *mod,
+ struct kernel_param *kparam,
+ unsigned int num_params);
+int module_add_modinfo_attrs(struct module *mod);
+void module_remove_modinfo_attrs(struct module *mod);
+# 712 "include/linux/module.h"
+int module_bug_finalize(const Elf32_Ehdr *, const Elf32_Shdr *,
+ struct module *);
+void module_bug_cleanup(struct module *);
+# 24 "fs/sysv/super.c" 2
+
+
+# 1 "include/linux/buffer_head.h" 1
+# 11 "include/linux/buffer_head.h"
+# 1 "include/linux/fs.h" 1
+# 9 "include/linux/fs.h"
+# 1 "include/linux/limits.h" 1
+# 10 "include/linux/fs.h" 2
+# 1 "include/linux/ioctl.h" 1
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ioctl.h" 1
+# 1 "include/asm-generic/ioctl.h" 1
+# 73 "include/asm-generic/ioctl.h"
+extern unsigned int __invalid_size_argument_for_IOC;
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ioctl.h" 2
+# 5 "include/linux/ioctl.h" 2
+# 11 "include/linux/fs.h" 2
+# 35 "include/linux/fs.h"
+struct files_stat_struct {
+ int nr_files;
+ int nr_free_files;
+ int max_files;
+};
+
+struct inodes_stat_t {
+ int nr_inodes;
+ int nr_unused;
+ int dummy[5];
+};
+# 365 "include/linux/fs.h"
+# 1 "include/linux/kdev_t.h" 1
+# 21 "include/linux/kdev_t.h"
+static inline __attribute__((always_inline)) int old_valid_dev(dev_t dev)
+{
+ return ((unsigned int) ((dev) >> 20)) < 256 && ((unsigned int) ((dev) & ((1U << 20) - 1))) < 256;
+}
+
+static inline __attribute__((always_inline)) u16 old_encode_dev(dev_t dev)
+{
+ return (((unsigned int) ((dev) >> 20)) << 8) | ((unsigned int) ((dev) & ((1U << 20) - 1)));
+}
+
+static inline __attribute__((always_inline)) dev_t old_decode_dev(u16 val)
+{
+ return ((((val >> 8) & 255) << 20) | (val & 255));
+}
+
+static inline __attribute__((always_inline)) int new_valid_dev(dev_t dev)
+{
+ return 1;
+}
+
+static inline __attribute__((always_inline)) u32 new_encode_dev(dev_t dev)
+{
+ unsigned major = ((unsigned int) ((dev) >> 20));
+ unsigned minor = ((unsigned int) ((dev) & ((1U << 20) - 1)));
+ return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
+}
+
+static inline __attribute__((always_inline)) dev_t new_decode_dev(u32 dev)
+{
+ unsigned major = (dev & 0xfff00) >> 8;
+ unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
+ return (((major) << 20) | (minor));
+}
+
+static inline __attribute__((always_inline)) int huge_valid_dev(dev_t dev)
+{
+ return 1;
+}
+
+static inline __attribute__((always_inline)) u64 huge_encode_dev(dev_t dev)
+{
+ return new_encode_dev(dev);
+}
+
+static inline __attribute__((always_inline)) dev_t huge_decode_dev(u64 dev)
+{
+ return new_decode_dev(dev);
+}
+
+static inline __attribute__((always_inline)) int sysv_valid_dev(dev_t dev)
+{
+ return ((unsigned int) ((dev) >> 20)) < (1<<14) && ((unsigned int) ((dev) & ((1U << 20) - 1))) < (1<<18);
+}
+
+static inline __attribute__((always_inline)) u32 sysv_encode_dev(dev_t dev)
+{
+ return ((unsigned int) ((dev) & ((1U << 20) - 1))) | (((unsigned int) ((dev) >> 20)) << 18);
+}
+
+static inline __attribute__((always_inline)) unsigned sysv_major(u32 dev)
+{
+ return (dev >> 18) & 0x3fff;
+}
+
+static inline __attribute__((always_inline)) unsigned sysv_minor(u32 dev)
+{
+ return dev & 0x3ffff;
+}
+# 366 "include/linux/fs.h" 2
+# 1 "include/linux/dcache.h" 1
+
+
+
+
+
+# 1 "include/linux/rculist.h" 1
+# 18 "include/linux/rculist.h"
+static inline __attribute__((always_inline)) void __list_add_rcu(struct list_head *new,
+ struct list_head *prev, struct list_head *next)
+{
+ new->next = next;
+ new->prev = prev;
+ ({ if (!__builtin_constant_p(new) || ((new) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (prev->next) = (new); });
+ next->prev = new;
+}
+# 43 "include/linux/rculist.h"
+static inline __attribute__((always_inline)) void list_add_rcu(struct list_head *new, struct list_head *head)
+{
+ __list_add_rcu(new, head, head->next);
+}
+# 64 "include/linux/rculist.h"
+static inline __attribute__((always_inline)) void list_add_tail_rcu(struct list_head *new,
+ struct list_head *head)
+{
+ __list_add_rcu(new, head->prev, head);
+}
+# 94 "include/linux/rculist.h"
+static inline __attribute__((always_inline)) void list_del_rcu(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ entry->prev = ((void *) 0x00200200);
+}
+# 120 "include/linux/rculist.h"
+static inline __attribute__((always_inline)) void hlist_del_init_rcu(struct hlist_node *n)
+{
+ if (!hlist_unhashed(n)) {
+ __hlist_del(n);
+ n->pprev = ((void *)0);
+ }
+}
+# 136 "include/linux/rculist.h"
+static inline __attribute__((always_inline)) void list_replace_rcu(struct list_head *old,
+ struct list_head *new)
+{
+ new->next = old->next;
+ new->prev = old->prev;
+ ({ if (!__builtin_constant_p(new) || ((new) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (new->prev->next) = (new); });
+ new->next->prev = new;
+ old->prev = ((void *) 0x00200200);
+}
+# 163 "include/linux/rculist.h"
+static inline __attribute__((always_inline)) void list_splice_init_rcu(struct list_head *list,
+ struct list_head *head,
+ void (*sync)(void))
+{
+ struct list_head *first = list->next;
+ struct list_head *last = list->prev;
+ struct list_head *at = head->next;
+
+ if (list_empty(head))
+ return;
+
+
+
+ INIT_LIST_HEAD(list);
+# 185 "include/linux/rculist.h"
+ sync();
+# 195 "include/linux/rculist.h"
+ last->next = at;
+ ({ if (!__builtin_constant_p(first) || ((first) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (head->next) = (first); });
+ first->prev = head;
+ at->prev = last;
+}
+# 283 "include/linux/rculist.h"
+static inline __attribute__((always_inline)) void hlist_del_rcu(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->pprev = ((void *) 0x00200200);
+}
+# 296 "include/linux/rculist.h"
+static inline __attribute__((always_inline)) void hlist_replace_rcu(struct hlist_node *old,
+ struct hlist_node *new)
+{
+ struct hlist_node *next = old->next;
+
+ new->next = next;
+ new->pprev = old->pprev;
+ ({ if (!__builtin_constant_p(new) || ((new) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (*new->pprev) = (new); });
+ if (next)
+ new->next->pprev = &new->next;
+ old->pprev = ((void *) 0x00200200);
+}
+# 328 "include/linux/rculist.h"
+static inline __attribute__((always_inline)) void hlist_add_head_rcu(struct hlist_node *n,
+ struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+
+ n->next = first;
+ n->pprev = &h->first;
+ ({ if (!__builtin_constant_p(n) || ((n) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (h->first) = (n); });
+ if (first)
+ first->pprev = &n->next;
+}
+# 358 "include/linux/rculist.h"
+static inline __attribute__((always_inline)) void hlist_add_before_rcu(struct hlist_node *n,
+ struct hlist_node *next)
+{
+ n->pprev = next->pprev;
+ n->next = next;
+ ({ if (!__builtin_constant_p(n) || ((n) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (*(n->pprev)) = (n); });
+ next->pprev = &n->next;
+}
+# 385 "include/linux/rculist.h"
+static inline __attribute__((always_inline)) void hlist_add_after_rcu(struct hlist_node *prev,
+ struct hlist_node *n)
+{
+ n->next = prev->next;
+ n->pprev = &prev->next;
+ ({ if (!__builtin_constant_p(n) || ((n) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (prev->next) = (n); });
+ if (n->next)
+ n->next->pprev = &n->next;
+}
+# 7 "include/linux/dcache.h" 2
+
+
+
+
+struct nameidata;
+struct path;
+struct vfsmount;
+# 33 "include/linux/dcache.h"
+struct qstr {
+ unsigned int hash;
+ unsigned int len;
+ const unsigned char *name;
+};
+
+struct dentry_stat_t {
+ int nr_dentry;
+ int nr_unused;
+ int age_limit;
+ int want_pages;
+ int dummy[2];
+};
+extern struct dentry_stat_t dentry_stat;
+
+
+
+
+
+
+static inline __attribute__((always_inline)) unsigned long
+partial_name_hash(unsigned long c, unsigned long prevhash)
+{
+ return (prevhash + (c << 4) + (c >> 4)) * 11;
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) unsigned long end_name_hash(unsigned long hash)
+{
+ return (unsigned int) hash;
+}
+
+
+static inline __attribute__((always_inline)) unsigned int
+full_name_hash(const unsigned char *name, unsigned int len)
+{
+ unsigned long hash = 0;
+ while (len--)
+ hash = partial_name_hash(*name++, hash);
+ return end_name_hash(hash);
+}
+# 89 "include/linux/dcache.h"
+struct dentry {
+ atomic_t d_count;
+ unsigned int d_flags;
+ spinlock_t d_lock;
+ int d_mounted;
+ struct inode *d_inode;
+
+
+
+
+
+ struct hlist_node d_hash;
+ struct dentry *d_parent;
+ struct qstr d_name;
+
+ struct list_head d_lru;
+
+
+
+ union {
+ struct list_head d_child;
+ struct rcu_head d_rcu;
+ } d_u;
+ struct list_head d_subdirs;
+ struct list_head d_alias;
+ unsigned long d_time;
+ const struct dentry_operations *d_op;
+ struct super_block *d_sb;
+ void *d_fsdata;
+
+ unsigned char d_iname[40];
+};
+
+
+
+
+
+
+
+enum dentry_d_lock_class
+{
+ DENTRY_D_LOCK_NORMAL,
+ DENTRY_D_LOCK_NESTED
+};
+
+struct dentry_operations {
+ int (*d_revalidate)(struct dentry *, struct nameidata *);
+ int (*d_hash) (struct dentry *, struct qstr *);
+ int (*d_compare) (struct dentry *, struct qstr *, struct qstr *);
+ int (*d_delete)(struct dentry *);
+ void (*d_release)(struct dentry *);
+ void (*d_iput)(struct dentry *, struct inode *);
+ char *(*d_dname)(struct dentry *, char *, int);
+};
+# 189 "include/linux/dcache.h"
+extern spinlock_t dcache_lock;
+extern seqlock_t rename_lock;
+# 208 "include/linux/dcache.h"
+static inline __attribute__((always_inline)) void __d_drop(struct dentry *dentry)
+{
+ if (!(dentry->d_flags & 0x0010)) {
+ dentry->d_flags |= 0x0010;
+ hlist_del_rcu(&dentry->d_hash);
+ }
+}
+
+static inline __attribute__((always_inline)) void d_drop(struct dentry *dentry)
+{
+ _spin_lock(&dcache_lock);
+ _spin_lock(&dentry->d_lock);
+ __d_drop(dentry);
+ _spin_unlock(&dentry->d_lock);
+ _spin_unlock(&dcache_lock);
+}
+
+static inline __attribute__((always_inline)) int dname_external(struct dentry *dentry)
+{
+ return dentry->d_name.name != dentry->d_iname;
+}
+
+
+
+
+extern void d_instantiate(struct dentry *, struct inode *);
+extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
+extern struct dentry * d_materialise_unique(struct dentry *, struct inode *);
+extern void d_delete(struct dentry *);
+
+
+extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
+extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
+extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
+extern struct dentry * d_obtain_alias(struct inode *);
+extern void shrink_dcache_sb(struct super_block *);
+extern void shrink_dcache_parent(struct dentry *);
+extern void shrink_dcache_for_umount(struct super_block *);
+extern int d_invalidate(struct dentry *);
+
+
+extern struct dentry * d_alloc_root(struct inode *);
+
+
+extern void d_genocide(struct dentry *);
+
+extern struct dentry *d_find_alias(struct inode *);
+extern void d_prune_aliases(struct inode *);
+
+
+extern int have_submounts(struct dentry *);
+
+
+
+
+extern void d_rehash(struct dentry *);
+# 274 "include/linux/dcache.h"
+static inline __attribute__((always_inline)) void d_add(struct dentry *entry, struct inode *inode)
+{
+ d_instantiate(entry, inode);
+ d_rehash(entry);
+}
+# 288 "include/linux/dcache.h"
+static inline __attribute__((always_inline)) struct dentry *d_add_unique(struct dentry *entry, struct inode *inode)
+{
+ struct dentry *res;
+
+ res = d_instantiate_unique(entry, inode);
+ d_rehash(res != ((void *)0) ? res : entry);
+ return res;
+}
+
+
+extern void d_move(struct dentry *, struct dentry *);
+extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
+
+
+extern struct dentry * d_lookup(struct dentry *, struct qstr *);
+extern struct dentry * __d_lookup(struct dentry *, struct qstr *);
+extern struct dentry * d_hash_and_lookup(struct dentry *, struct qstr *);
+
+
+extern int d_validate(struct dentry *, struct dentry *);
+
+
+
+
+extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+
+extern char *__d_path(const struct path *path, struct path *root, char *, int);
+extern char *d_path(const struct path *, char *, int);
+extern char *dentry_path(struct dentry *, char *, int);
+# 333 "include/linux/dcache.h"
+static inline __attribute__((always_inline)) struct dentry *dget(struct dentry *dentry)
+{
+ if (dentry) {
+ do { if (__builtin_expect(!!(!((&dentry->d_count)->counter)), 0)) do { asm volatile( "1: .hword %0\n" " .section __bug_table,\"a\",@progbits\n" "2: .long 1b\n" " .long %1\n" " .short %2\n" " .short %3\n" " .org 2b + %4\n" " .previous" : : "i"(0xefcd), "i"("include/linux/dcache.h"), "i"(336), "i"(0), "i"(sizeof(struct bug_entry))); for (;;); } while (0); } while(0);
+ atomic_inc(&dentry->d_count);
+ }
+ return dentry;
+}
+
+extern struct dentry * dget_locked(struct dentry *);
+# 351 "include/linux/dcache.h"
+static inline __attribute__((always_inline)) int d_unhashed(struct dentry *dentry)
+{
+ return (dentry->d_flags & 0x0010);
+}
+
+static inline __attribute__((always_inline)) int d_unlinked(struct dentry *dentry)
+{
+ return d_unhashed(dentry) && !((dentry) == (dentry)->d_parent);
+}
+
+static inline __attribute__((always_inline)) struct dentry *dget_parent(struct dentry *dentry)
+{
+ struct dentry *ret;
+
+ _spin_lock(&dentry->d_lock);
+ ret = dget(dentry->d_parent);
+ _spin_unlock(&dentry->d_lock);
+ return ret;
+}
+
+extern void dput(struct dentry *);
+
+static inline __attribute__((always_inline)) int d_mountpoint(struct dentry *dentry)
+{
+ return dentry->d_mounted;
+}
+
+extern struct vfsmount *lookup_mnt(struct path *);
+extern struct dentry *lookup_create(struct nameidata *nd, int is_dir);
+
+extern int sysctl_vfs_cache_pressure;
+# 367 "include/linux/fs.h" 2
+# 1 "include/linux/path.h" 1
+
+
+
+struct dentry;
+struct vfsmount;
+
+struct path {
+ struct vfsmount *mnt;
+ struct dentry *dentry;
+};
+
+extern void path_get(struct path *);
+extern void path_put(struct path *);
+# 368 "include/linux/fs.h" 2
+
+
+
+
+# 1 "include/linux/radix-tree.h" 1
+# 41 "include/linux/radix-tree.h"
+static inline __attribute__((always_inline)) void *radix_tree_ptr_to_indirect(void *ptr)
+{
+ return (void *)((unsigned long)ptr | 1);
+}
+
+static inline __attribute__((always_inline)) void *radix_tree_indirect_to_ptr(void *ptr)
+{
+ return (void *)((unsigned long)ptr & ~1);
+}
+
+static inline __attribute__((always_inline)) int radix_tree_is_indirect_ptr(void *ptr)
+{
+ return (int)((unsigned long)ptr & 1);
+}
+
+
+
+
+
+
+struct radix_tree_root {
+ unsigned int height;
+ gfp_t gfp_mask;
+ struct radix_tree_node *rnode;
+};
+# 137 "include/linux/radix-tree.h"
+static inline __attribute__((always_inline)) void *radix_tree_deref_slot(void **pslot)
+{
+ void *ret = ({ typeof(*pslot) _________p1 = (*(volatile typeof(*pslot) *)&(*pslot)); do { } while(0); (_________p1); });
+ if (__builtin_expect(!!(radix_tree_is_indirect_ptr(ret)), 0))
+ ret = ((void *)-1UL);
+ return ret;
+}
+# 152 "include/linux/radix-tree.h"
+static inline __attribute__((always_inline)) void radix_tree_replace_slot(void **pslot, void *item)
+{
+ do { if (__builtin_expect(!!(radix_tree_is_indirect_ptr(item)), 0)) do { asm volatile( "1: .hword %0\n" " .section __bug_table,\"a\",@progbits\n" "2: .long 1b\n" " .long %1\n" " .short %2\n" " .short %3\n" " .org 2b + %4\n" " .previous" : : "i"(0xefcd), "i"("include/linux/radix-tree.h"), "i"(154), "i"(0), "i"(sizeof(struct bug_entry))); for (;;); } while (0); } while(0);
+ ({ if (!__builtin_constant_p(item) || ((item) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (*pslot) = (item); });
+}
+
+int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
+void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
+void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
+void *radix_tree_delete(struct radix_tree_root *, unsigned long);
+unsigned int
+radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
+ unsigned long first_index, unsigned int max_items);
+unsigned int
+radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
+ unsigned long first_index, unsigned int max_items);
+unsigned long radix_tree_next_hole(struct radix_tree_root *root,
+ unsigned long index, unsigned long max_scan);
+unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
+ unsigned long index, unsigned long max_scan);
+int radix_tree_preload(gfp_t gfp_mask);
+void radix_tree_init(void);
+void *radix_tree_tag_set(struct radix_tree_root *root,
+ unsigned long index, unsigned int tag);
+void *radix_tree_tag_clear(struct radix_tree_root *root,
+ unsigned long index, unsigned int tag);
+int radix_tree_tag_get(struct radix_tree_root *root,
+ unsigned long index, unsigned int tag);
+unsigned int
+radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
+ unsigned long first_index, unsigned int max_items,
+ unsigned int tag);
+unsigned int
+radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
+ unsigned long first_index, unsigned int max_items,
+ unsigned int tag);
+int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
+
+static inline __attribute__((always_inline)) void radix_tree_preload_end(void)
+{
+ do { } while (0);
+}
+# 373 "include/linux/fs.h" 2
+# 1 "include/linux/prio_tree.h" 1
+# 14 "include/linux/prio_tree.h"
+struct raw_prio_tree_node {
+ struct prio_tree_node *left;
+ struct prio_tree_node *right;
+ struct prio_tree_node *parent;
+};
+
+struct prio_tree_node {
+ struct prio_tree_node *left;
+ struct prio_tree_node *right;
+ struct prio_tree_node *parent;
+ unsigned long start;
+ unsigned long last;
+};
+
+struct prio_tree_root {
+ struct prio_tree_node *prio_tree_node;
+ unsigned short index_bits;
+ unsigned short raw;
+
+
+
+
+};
+
+struct prio_tree_iter {
+ struct prio_tree_node *cur;
+ unsigned long mask;
+ unsigned long value;
+ int size_level;
+
+ struct prio_tree_root *root;
+ unsigned long r_index;
+ unsigned long h_index;
+};
+
+static inline __attribute__((always_inline)) void prio_tree_iter_init(struct prio_tree_iter *iter,
+ struct prio_tree_root *root, unsigned long r_index, unsigned long h_index)
+{
+ iter->root = root;
+ iter->r_index = r_index;
+ iter->h_index = h_index;
+ iter->cur = ((void *)0);
+}
+# 84 "include/linux/prio_tree.h"
+static inline __attribute__((always_inline)) int prio_tree_empty(const struct prio_tree_root *root)
+{
+ return root->prio_tree_node == ((void *)0);
+}
+
+static inline __attribute__((always_inline)) int prio_tree_root(const struct prio_tree_node *node)
+{
+ return node->parent == node;
+}
+
+static inline __attribute__((always_inline)) int prio_tree_left_empty(const struct prio_tree_node *node)
+{
+ return node->left == node;
+}
+
+static inline __attribute__((always_inline)) int prio_tree_right_empty(const struct prio_tree_node *node)
+{
+ return node->right == node;
+}
+
+
+struct prio_tree_node *prio_tree_replace(struct prio_tree_root *root,
+ struct prio_tree_node *old, struct prio_tree_node *node);
+struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root,
+ struct prio_tree_node *node);
+void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node);
+struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter);
+# 374 "include/linux/fs.h" 2
+
+# 1 "include/linux/pid.h" 1
+
+
+
+
+
+enum pid_type
+{
+ PIDTYPE_PID,
+ PIDTYPE_PGID,
+ PIDTYPE_SID,
+ PIDTYPE_MAX
+};
+# 50 "include/linux/pid.h"
+struct upid {
+
+ int nr;
+ struct pid_namespace *ns;
+ struct hlist_node pid_chain;
+};
+
+struct pid
+{
+ atomic_t count;
+ unsigned int level;
+
+ struct hlist_head tasks[PIDTYPE_MAX];
+ struct rcu_head rcu;
+ struct upid numbers[1];
+};
+
+extern struct pid init_struct_pid;
+
+struct pid_link
+{
+ struct hlist_node node;
+ struct pid *pid;
+};
+
+static inline __attribute__((always_inline)) struct pid *get_pid(struct pid *pid)
+{
+ if (pid)
+ atomic_inc(&pid->count);
+ return pid;
+}
+
+extern void put_pid(struct pid *pid);
+extern struct task_struct *pid_task(struct pid *pid, enum pid_type);
+extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type);
+
+extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
+
+
+
+
+
+extern void attach_pid(struct task_struct *task, enum pid_type type,
+ struct pid *pid);
+extern void detach_pid(struct task_struct *task, enum pid_type);
+extern void change_pid(struct task_struct *task, enum pid_type,
+ struct pid *pid);
+extern void transfer_pid(struct task_struct *old, struct task_struct *new,
+ enum pid_type);
+
+struct pid_namespace;
+extern struct pid_namespace init_pid_ns;
+# 112 "include/linux/pid.h"
+extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns);
+extern struct pid *find_vpid(int nr);
+
+
+
+
+extern struct pid *find_get_pid(int nr);
+extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
+int next_pidmap(struct pid_namespace *pid_ns, int last);
+
+extern struct pid *alloc_pid(struct pid_namespace *ns);
+extern void free_pid(struct pid *pid);
+# 135 "include/linux/pid.h"
+static inline __attribute__((always_inline)) struct pid_namespace *ns_of_pid(struct pid *pid)
+{
+ struct pid_namespace *ns = ((void *)0);
+ if (pid)
+ ns = pid->numbers[pid->level].ns;
+ return ns;
+}
+# 154 "include/linux/pid.h"
+static inline __attribute__((always_inline)) pid_t pid_nr(struct pid *pid)
+{
+ pid_t nr = 0;
+ if (pid)
+ nr = pid->numbers[0].nr;
+ return nr;
+}
+
+pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns);
+pid_t pid_vnr(struct pid *pid);
+# 376 "include/linux/fs.h" 2
+
+# 1 "include/linux/capability.h" 1
+# 18 "include/linux/capability.h"
+struct task_struct;
+# 40 "include/linux/capability.h"
+typedef struct __user_cap_header_struct {
+ __u32 version;
+ int pid;
+} *cap_user_header_t;
+
+typedef struct __user_cap_data_struct {
+ __u32 effective;
+ __u32 permitted;
+ __u32 inheritable;
+} *cap_user_data_t;
+# 72 "include/linux/capability.h"
+struct vfs_cap_data {
+ __le32 magic_etc;
+ struct {
+ __le32 permitted;
+ __le32 inheritable;
+ } data[2];
+};
+# 96 "include/linux/capability.h"
+extern int file_caps_enabled;
+
+
+typedef struct kernel_cap_struct {
+ __u32 cap[2];
+} kernel_cap_t;
+
+
+struct cpu_vfs_cap_data {
+ __u32 magic_etc;
+ kernel_cap_t permitted;
+ kernel_cap_t inheritable;
+};
+# 444 "include/linux/capability.h"
+static inline __attribute__((always_inline)) kernel_cap_t cap_combine(const kernel_cap_t a,
+ const kernel_cap_t b)
+{
+ kernel_cap_t dest;
+ do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] | b.cap[__capi]; } } while (0);
+ return dest;
+}
+
+static inline __attribute__((always_inline)) kernel_cap_t cap_intersect(const kernel_cap_t a,
+ const kernel_cap_t b)
+{
+ kernel_cap_t dest;
+ do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] & b.cap[__capi]; } } while (0);
+ return dest;
+}
+
+static inline __attribute__((always_inline)) kernel_cap_t cap_drop(const kernel_cap_t a,
+ const kernel_cap_t drop)
+{
+ kernel_cap_t dest;
+ do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] &~ drop.cap[__capi]; } } while (0);
+ return dest;
+}
+
+static inline __attribute__((always_inline)) kernel_cap_t cap_invert(const kernel_cap_t c)
+{
+ kernel_cap_t dest;
+ do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = ~ c.cap[__capi]; } } while (0);
+ return dest;
+}
+
+static inline __attribute__((always_inline)) int cap_isclear(const kernel_cap_t a)
+{
+ unsigned __capi;
+ for (__capi = 0; __capi < 2; ++__capi) {
+ if (a.cap[__capi] != 0)
+ return 0;
+ }
+ return 1;
+}
+# 492 "include/linux/capability.h"
+static inline __attribute__((always_inline)) int cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
+{
+ kernel_cap_t dest;
+ dest = cap_drop(a, set);
+ return cap_isclear(dest);
+}
+
+
+
+static inline __attribute__((always_inline)) int cap_is_fs_cap(int cap)
+{
+ const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } });
+ return !!((1 << ((cap) & 31)) & __cap_fs_set.cap[((cap) >> 5)]);
+}
+
+static inline __attribute__((always_inline)) kernel_cap_t cap_drop_fs_set(const kernel_cap_t a)
+{
+ const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } });
+ return cap_drop(a, __cap_fs_set);
+}
+
+static inline __attribute__((always_inline)) kernel_cap_t cap_raise_fs_set(const kernel_cap_t a,
+ const kernel_cap_t permitted)
+{
+ const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } });
+ return cap_combine(a,
+ cap_intersect(permitted, __cap_fs_set));
+}
+
+static inline __attribute__((always_inline)) kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
+{
+ const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((24) & 31)), ((1 << ((32) & 31))) } });
+ return cap_drop(a, __cap_fs_set);
+}
+
+static inline __attribute__((always_inline)) kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
+ const kernel_cap_t permitted)
+{
+ const kernel_cap_t __cap_nfsd_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((24) & 31)), ((1 << ((32) & 31))) } });
+ return cap_combine(a,
+ cap_intersect(permitted, __cap_nfsd_set));
+}
+
+extern const kernel_cap_t __cap_empty_set;
+extern const kernel_cap_t __cap_full_set;
+extern const kernel_cap_t __cap_init_eff_set;
+# 565 "include/linux/capability.h"
+extern int capable(int cap);
+
+
+struct dentry;
+extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
+# 378 "include/linux/fs.h" 2
+# 1 "include/linux/semaphore.h" 1
+# 16 "include/linux/semaphore.h"
+struct semaphore {
+ spinlock_t lock;
+ unsigned int count;
+ struct list_head wait_list;
+};
+# 32 "include/linux/semaphore.h"
+static inline __attribute__((always_inline)) void sema_init(struct semaphore *sem, int val)
+{
+ static struct lock_class_key __key;
+ *sem = (struct semaphore) { .lock = (spinlock_t) { .raw_lock = { 1 }, .magic = 0xdead4ead, .owner = ((void *)-1L), .owner_cpu = -1, .dep_map = { .name = "(*sem).lock" } }, .count = val, .wait_list = { &((*sem).wait_list), &((*sem).wait_list) }, };
+ lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0);
+}
+
+
+
+
+extern void down(struct semaphore *sem);
+extern int __attribute__((warn_unused_result)) down_interruptible(struct semaphore *sem);
+extern int __attribute__((warn_unused_result)) down_killable(struct semaphore *sem);
+extern int __attribute__((warn_unused_result)) down_trylock(struct semaphore *sem);
+extern int __attribute__((warn_unused_result)) down_timeout(struct semaphore *sem, long jiffies);
+extern void up(struct semaphore *sem);
+# 379 "include/linux/fs.h" 2
+# 1 "include/linux/fiemap.h" 1
+# 16 "include/linux/fiemap.h"
+struct fiemap_extent {
+ __u64 fe_logical;
+
+ __u64 fe_physical;
+
+ __u64 fe_length;
+ __u64 fe_reserved64[2];
+ __u32 fe_flags;
+ __u32 fe_reserved[3];
+};
+
+struct fiemap {
+ __u64 fm_start;
+
+ __u64 fm_length;
+
+ __u32 fm_flags;
+ __u32 fm_mapped_extents;
+ __u32 fm_extent_count;
+ __u32 fm_reserved;
+ struct fiemap_extent fm_extents[0];
+};
+# 380 "include/linux/fs.h" 2
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/byteorder.h" 1
+# 383 "include/linux/fs.h" 2
+
+struct export_operations;
+struct hd_geometry;
+struct iovec;
+struct nameidata;
+struct kiocb;
+struct pipe_inode_info;
+struct poll_table_struct;
+struct kstatfs;
+struct vm_area_struct;
+struct vfsmount;
+struct cred;
+
+extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) inode_init(void);
+extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) inode_init_early(void);
+extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) files_init(unsigned long);
+
+extern struct files_stat_struct files_stat;
+extern int get_max_files(void);
+extern int sysctl_nr_open;
+extern struct inodes_stat_t inodes_stat;
+extern int leases_enable, lease_break_time;
+
+extern int dir_notify_enable;
+
+
+struct buffer_head;
+typedef int (get_block_t)(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create);
+typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
+ ssize_t bytes, void *private);
+# 446 "include/linux/fs.h"
+struct iattr {
+ unsigned int ia_valid;
+ umode_t ia_mode;
+ uid_t ia_uid;
+ gid_t ia_gid;
+ loff_t ia_size;
+ struct timespec ia_atime;
+ struct timespec ia_mtime;
+ struct timespec ia_ctime;
+
+
+
+
+
+
+ struct file *ia_file;
+};
+
+
+
+
+# 1 "include/linux/quota.h" 1
+# 86 "include/linux/quota.h"
+enum {
+ QIF_BLIMITS_B = 0,
+ QIF_SPACE_B,
+ QIF_ILIMITS_B,
+ QIF_INODES_B,
+ QIF_BTIME_B,
+ QIF_ITIME_B,
+};
+# 106 "include/linux/quota.h"
+struct if_dqblk {
+ __u64 dqb_bhardlimit;
+ __u64 dqb_bsoftlimit;
+ __u64 dqb_curspace;
+ __u64 dqb_ihardlimit;
+ __u64 dqb_isoftlimit;
+ __u64 dqb_curinodes;
+ __u64 dqb_btime;
+ __u64 dqb_itime;
+ __u32 dqb_valid;
+};
+# 127 "include/linux/quota.h"
+struct if_dqinfo {
+ __u64 dqi_bgrace;
+ __u64 dqi_igrace;
+ __u32 dqi_flags;
+ __u32 dqi_valid;
+};
+# 149 "include/linux/quota.h"
+enum {
+ QUOTA_NL_C_UNSPEC,
+ QUOTA_NL_C_WARNING,
+ __QUOTA_NL_C_MAX,
+};
+
+
+enum {
+ QUOTA_NL_A_UNSPEC,
+ QUOTA_NL_A_QTYPE,
+ QUOTA_NL_A_EXCESS_ID,
+ QUOTA_NL_A_WARNING,
+ QUOTA_NL_A_DEV_MAJOR,
+ QUOTA_NL_A_DEV_MINOR,
+ QUOTA_NL_A_CAUSED_ID,
+ __QUOTA_NL_A_MAX,
+};
+# 176 "include/linux/quota.h"
+# 1 "include/linux/dqblk_xfs.h" 1
+# 50 "include/linux/dqblk_xfs.h"
+typedef struct fs_disk_quota {
+ __s8 d_version;
+ __s8 d_flags;
+ __u16 d_fieldmask;
+ __u32 d_id;
+ __u64 d_blk_hardlimit;
+ __u64 d_blk_softlimit;
+ __u64 d_ino_hardlimit;
+ __u64 d_ino_softlimit;
+ __u64 d_bcount;
+ __u64 d_icount;
+ __s32 d_itimer;
+
+ __s32 d_btimer;
+ __u16 d_iwarns;
+ __u16 d_bwarns;
+ __s32 d_padding2;
+ __u64 d_rtb_hardlimit;
+ __u64 d_rtb_softlimit;
+ __u64 d_rtbcount;
+ __s32 d_rtbtimer;
+ __u16 d_rtbwarns;
+ __s16 d_padding3;
+ char d_padding4[8];
+} fs_disk_quota_t;
+# 137 "include/linux/dqblk_xfs.h"
+typedef struct fs_qfilestat {
+ __u64 qfs_ino;
+ __u64 qfs_nblks;
+ __u32 qfs_nextents;
+} fs_qfilestat_t;
+
+typedef struct fs_quota_stat {
+ __s8 qs_version;
+ __u16 qs_flags;
+ __s8 qs_pad;
+ fs_qfilestat_t qs_uquota;
+ fs_qfilestat_t qs_gquota;
+ __u32 qs_incoredqs;
+ __s32 qs_btimelimit;
+ __s32 qs_itimelimit;
+ __s32 qs_rtbtimelimit;
+ __u16 qs_bwarnlimit;
+ __u16 qs_iwarnlimit;
+} fs_quota_stat_t;
+# 177 "include/linux/quota.h" 2
+# 1 "include/linux/dqblk_v1.h" 1
+# 178 "include/linux/quota.h" 2
+# 1 "include/linux/dqblk_v2.h" 1
+
+
+
+
+
+
+
+# 1 "include/linux/dqblk_qtree.h" 1
+# 17 "include/linux/dqblk_qtree.h"
+struct dquot;
+
+
+struct qtree_fmt_operations {
+ void (*mem2disk_dqblk)(void *disk, struct dquot *dquot);
+ void (*disk2mem_dqblk)(struct dquot *dquot, void *disk);
+ int (*is_id)(void *disk, struct dquot *dquot);
+};
+
+
+struct qtree_mem_dqinfo {
+ struct super_block *dqi_sb;
+ int dqi_type;
+ unsigned int dqi_blocks;
+ unsigned int dqi_free_blk;
+ unsigned int dqi_free_entry;
+ unsigned int dqi_blocksize_bits;
+ unsigned int dqi_entry_size;
+ unsigned int dqi_usable_bs;
+ unsigned int dqi_qtree_depth;
+ struct qtree_fmt_operations *dqi_ops;
+};
+
+int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
+int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
+int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
+int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
+int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk);
+static inline __attribute__((always_inline)) int qtree_depth(struct qtree_mem_dqinfo *info)
+{
+ unsigned int epb = info->dqi_usable_bs >> 2;
+ unsigned long long entries = epb;
+ int i;
+
+ for (i = 1; entries < (1ULL << 32); i++)
+ entries *= epb;
+ return i;
+}
+# 9 "include/linux/dqblk_v2.h" 2
+# 179 "include/linux/quota.h" 2
+
+
+
+typedef __kernel_uid32_t qid_t;
+typedef long long qsize_t;
+
+extern spinlock_t dq_data_lock;
+# 197 "include/linux/quota.h"
+struct mem_dqblk {
+ qsize_t dqb_bhardlimit;
+ qsize_t dqb_bsoftlimit;
+ qsize_t dqb_curspace;
+ qsize_t dqb_rsvspace;
+ qsize_t dqb_ihardlimit;
+ qsize_t dqb_isoftlimit;
+ qsize_t dqb_curinodes;
+ time_t dqb_btime;
+ time_t dqb_itime;
+};
+
+
+
+
+struct quota_format_type;
+
+struct mem_dqinfo {
+ struct quota_format_type *dqi_format;
+ int dqi_fmt_id;
+
+ struct list_head dqi_dirty_list;
+ unsigned long dqi_flags;
+ unsigned int dqi_bgrace;
+ unsigned int dqi_igrace;
+ qsize_t dqi_maxblimit;
+ qsize_t dqi_maxilimit;
+ void *dqi_priv;
+};
+
+struct super_block;
+
+
+
+
+
+extern void mark_info_dirty(struct super_block *sb, int type);
+static inline __attribute__((always_inline)) int info_dirty(struct mem_dqinfo *info)
+{
+ return test_bit(16, &info->dqi_flags);
+}
+
+struct dqstats {
+ int lookups;
+ int drops;
+ int reads;
+ int writes;
+ int cache_hits;
+ int allocated_dquots;
+ int free_dquots;
+ int syncs;
+};
+
+extern struct dqstats dqstats;
+# 264 "include/linux/quota.h"
+struct dquot {
+ struct hlist_node dq_hash;
+ struct list_head dq_inuse;
+ struct list_head dq_free;
+ struct list_head dq_dirty;
+ struct mutex dq_lock;
+ atomic_t dq_count;
+ wait_queue_head_t dq_wait_unused;
+ struct super_block *dq_sb;
+ unsigned int dq_id;
+ loff_t dq_off;
+ unsigned long dq_flags;
+ short dq_type;
+ struct mem_dqblk dq_dqb;
+};
+
+
+
+
+
+struct quota_format_ops {
+ int (*check_quota_file)(struct super_block *sb, int type);
+ int (*read_file_info)(struct super_block *sb, int type);
+ int (*write_file_info)(struct super_block *sb, int type);
+ int (*free_file_info)(struct super_block *sb, int type);
+ int (*read_dqblk)(struct dquot *dquot);
+ int (*commit_dqblk)(struct dquot *dquot);
+ int (*release_dqblk)(struct dquot *dquot);
+};
+
+
+struct dquot_operations {
+ int (*initialize) (struct inode *, int);
+ int (*drop) (struct inode *);
+ int (*alloc_space) (struct inode *, qsize_t, int);
+ int (*alloc_inode) (const struct inode *, qsize_t);
+ int (*free_space) (struct inode *, qsize_t);
+ int (*free_inode) (const struct inode *, qsize_t);
+ int (*transfer) (struct inode *, struct iattr *);
+ int (*write_dquot) (struct dquot *);
+ struct dquot *(*alloc_dquot)(struct super_block *, int);
+ void (*destroy_dquot)(struct dquot *);
+ int (*acquire_dquot) (struct dquot *);
+ int (*release_dquot) (struct dquot *);
+ int (*mark_dirty) (struct dquot *);
+ int (*write_info) (struct super_block *, int);
+
+ int (*reserve_space) (struct inode *, qsize_t, int);
+
+ int (*claim_space) (struct inode *, qsize_t);
+
+ void (*release_rsv) (struct inode *, qsize_t);
+
+ qsize_t (*get_reserved_space) (struct inode *);
+};
+
+
+struct quotactl_ops {
+ int (*quota_on)(struct super_block *, int, int, char *, int);
+ int (*quota_off)(struct super_block *, int, int);
+ int (*quota_sync)(struct super_block *, int);
+ int (*get_info)(struct super_block *, int, struct if_dqinfo *);
+ int (*set_info)(struct super_block *, int, struct if_dqinfo *);
+ int (*get_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *);
+ int (*set_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *);
+ int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
+ int (*set_xstate)(struct super_block *, unsigned int, int);
+ int (*get_xquota)(struct super_block *, int, qid_t, struct fs_disk_quota *);
+ int (*set_xquota)(struct super_block *, int, qid_t, struct fs_disk_quota *);
+};
+
+struct quota_format_type {
+ int qf_fmt_id;
+ struct quota_format_ops *qf_ops;
+ struct module *qf_owner;
+ struct quota_format_type *qf_next;
+};
+
+
+enum {
+ _DQUOT_USAGE_ENABLED = 0,
+ _DQUOT_LIMITS_ENABLED,
+ _DQUOT_SUSPENDED,
+
+
+ _DQUOT_STATE_FLAGS
+};
+# 365 "include/linux/quota.h"
+static inline __attribute__((always_inline)) unsigned int dquot_state_flag(unsigned int flags, int type)
+{
+ if (type == 0)
+ return flags;
+ return flags << _DQUOT_STATE_FLAGS;
+}
+
+static inline __attribute__((always_inline)) unsigned int dquot_generic_flag(unsigned int flags, int type)
+{
+ if (type == 0)
+ return flags;
+ return flags >> _DQUOT_STATE_FLAGS;
+}
+
+struct quota_info {
+ unsigned int flags;
+ struct mutex dqio_mutex;
+ struct mutex dqonoff_mutex;
+ struct rw_semaphore dqptr_sem;
+ struct inode *files[2];
+ struct mem_dqinfo info[2];
+ struct quota_format_ops *ops[2];
+};
+
+int register_quota_format(struct quota_format_type *fmt);
+void unregister_quota_format(struct quota_format_type *fmt);
+
+struct quota_module_name {
+ int qm_fmt_id;
+ char *qm_mod_name;
+};
+# 468 "include/linux/fs.h" 2
+# 495 "include/linux/fs.h"
+enum positive_aop_returns {
+ AOP_WRITEPAGE_ACTIVATE = 0x80000,
+ AOP_TRUNCATED_PAGE = 0x80001,
+};
+# 509 "include/linux/fs.h"
+struct page;
+struct address_space;
+struct writeback_control;
+
+struct iov_iter {
+ const struct iovec *iov;
+ unsigned long nr_segs;
+ size_t iov_offset;
+ size_t count;
+};
+
+size_t iov_iter_copy_from_user_atomic(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes);
+size_t iov_iter_copy_from_user(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes);
+void iov_iter_advance(struct iov_iter *i, size_t bytes);
+int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+size_t iov_iter_single_seg_count(struct iov_iter *i);
+
+static inline __attribute__((always_inline)) void iov_iter_init(struct iov_iter *i,
+ const struct iovec *iov, unsigned long nr_segs,
+ size_t count, size_t written)
+{
+ i->iov = iov;
+ i->nr_segs = nr_segs;
+ i->iov_offset = 0;
+ i->count = count + written;
+
+ iov_iter_advance(i, written);
+}
+
+static inline __attribute__((always_inline)) size_t iov_iter_count(struct iov_iter *i)
+{
+ return i->count;
+}
+# 554 "include/linux/fs.h"
+typedef struct {
+ size_t written;
+ size_t count;
+ union {
+ char *buf;
+ void *data;
+ } arg;
+ int error;
+} read_descriptor_t;
+
+typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
+ unsigned long, unsigned long);
+
+struct address_space_operations {
+ int (*writepage)(struct page *page, struct writeback_control *wbc);
+ int (*readpage)(struct file *, struct page *);
+ void (*sync_page)(struct page *);
+
+
+ int (*writepages)(struct address_space *, struct writeback_control *);
+
+
+ int (*set_page_dirty)(struct page *page);
+
+ int (*readpages)(struct file *filp, struct address_space *mapping,
+ struct list_head *pages, unsigned nr_pages);
+
+ int (*write_begin)(struct file *, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata);
+ int (*write_end)(struct file *, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata);
+
+
+ sector_t (*bmap)(struct address_space *, sector_t);
+ void (*invalidatepage) (struct page *, unsigned long);
+ int (*releasepage) (struct page *, gfp_t);
+ ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
+ loff_t offset, unsigned long nr_segs);
+ int (*get_xip_mem)(struct address_space *, unsigned long, int,
+ void **, unsigned long *);
+
+ int (*migratepage) (struct address_space *,
+ struct page *, struct page *);
+ int (*launder_page) (struct page *);
+ int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
+ unsigned long);
+ int (*error_remove_page)(struct address_space *, struct page *);
+};
+
+
+
+
+
+int pagecache_write_begin(struct file *, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata);
+
+int pagecache_write_end(struct file *, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata);
+
+struct backing_dev_info;
+struct address_space {
+ struct inode *host;
+ struct radix_tree_root page_tree;
+ spinlock_t tree_lock;
+ unsigned int i_mmap_writable;
+ struct prio_tree_root i_mmap;
+ struct list_head i_mmap_nonlinear;
+ spinlock_t i_mmap_lock;
+ unsigned int truncate_count;
+ unsigned long nrpages;
+ unsigned long writeback_index;
+ const struct address_space_operations *a_ops;
+ unsigned long flags;
+ struct backing_dev_info *backing_dev_info;
+ spinlock_t private_lock;
+ struct list_head private_list;
+ struct address_space *assoc_mapping;
+} __attribute__((aligned(sizeof(long))));
+
+
+
+
+
+
+struct block_device {
+ dev_t bd_dev;
+ struct inode * bd_inode;
+ struct super_block * bd_super;
+ int bd_openers;
+ struct mutex bd_mutex;
+ struct list_head bd_inodes;
+ void * bd_holder;
+ int bd_holders;
+
+ struct list_head bd_holder_list;
+
+ struct block_device * bd_contains;
+ unsigned bd_block_size;
+ struct hd_struct * bd_part;
+
+ unsigned bd_part_count;
+ int bd_invalidated;
+ struct gendisk * bd_disk;
+ struct list_head bd_list;
+
+
+
+
+
+
+ unsigned long bd_private;
+
+
+ int bd_fsfreeze_count;
+
+ struct mutex bd_fsfreeze_mutex;
+};
+# 683 "include/linux/fs.h"
+int mapping_tagged(struct address_space *mapping, int tag);
+
+
+
+
+static inline __attribute__((always_inline)) int mapping_mapped(struct address_space *mapping)
+{
+ return !prio_tree_empty(&mapping->i_mmap) ||
+ !list_empty(&mapping->i_mmap_nonlinear);
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int mapping_writably_mapped(struct address_space *mapping)
+{
+ return mapping->i_mmap_writable != 0;
+}
+# 716 "include/linux/fs.h"
+struct posix_acl;
+
+
+struct inode {
+ struct hlist_node i_hash;
+ struct list_head i_list;
+ struct list_head i_sb_list;
+ struct list_head i_dentry;
+ unsigned long i_ino;
+ atomic_t i_count;
+ unsigned int i_nlink;
+ uid_t i_uid;
+ gid_t i_gid;
+ dev_t i_rdev;
+ u64 i_version;
+ loff_t i_size;
+
+
+
+ struct timespec i_atime;
+ struct timespec i_mtime;
+ struct timespec i_ctime;
+ blkcnt_t i_blocks;
+ unsigned int i_blkbits;
+ unsigned short i_bytes;
+ umode_t i_mode;
+ spinlock_t i_lock;
+ struct mutex i_mutex;
+ struct rw_semaphore i_alloc_sem;
+ const struct inode_operations *i_op;
+ const struct file_operations *i_fop;
+ struct super_block *i_sb;
+ struct file_lock *i_flock;
+ struct address_space *i_mapping;
+ struct address_space i_data;
+
+ struct dquot *i_dquot[2];
+
+ struct list_head i_devices;
+ union {
+ struct pipe_inode_info *i_pipe;
+ struct block_device *i_bdev;
+ struct cdev *i_cdev;
+ };
+
+ __u32 i_generation;
+
+
+ __u32 i_fsnotify_mask;
+ struct hlist_head i_fsnotify_mark_entries;
+
+
+
+ struct list_head inotify_watches;
+ struct mutex inotify_mutex;
+
+
+ unsigned long i_state;
+ unsigned long dirtied_when;
+
+ unsigned int i_flags;
+
+ atomic_t i_writecount;
+
+ void *i_security;
+
+
+ struct posix_acl *i_acl;
+ struct posix_acl *i_default_acl;
+
+ void *i_private;
+};
+# 800 "include/linux/fs.h"
+enum inode_i_mutex_lock_class
+{
+ I_MUTEX_NORMAL,
+ I_MUTEX_PARENT,
+ I_MUTEX_CHILD,
+ I_MUTEX_XATTR,
+ I_MUTEX_QUOTA
+};
+# 819 "include/linux/fs.h"
+static inline __attribute__((always_inline)) loff_t i_size_read(const struct inode *inode)
+{
+# 838 "include/linux/fs.h"
+ return inode->i_size;
+
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void i_size_write(struct inode *inode, loff_t i_size)
+{
+# 858 "include/linux/fs.h"
+ inode->i_size = i_size;
+
+}
+
+static inline __attribute__((always_inline)) unsigned iminor(const struct inode *inode)
+{
+ return ((unsigned int) ((inode->i_rdev) & ((1U << 20) - 1)));
+}
+
+static inline __attribute__((always_inline)) unsigned imajor(const struct inode *inode)
+{
+ return ((unsigned int) ((inode->i_rdev) >> 20));
+}
+
+extern struct block_device *I_BDEV(struct inode *inode);
+
+struct fown_struct {
+ rwlock_t lock;
+ struct pid *pid;
+ enum pid_type pid_type;
+ uid_t uid, euid;
+ int signum;
+};
+
+
+
+
+struct file_ra_state {
+ unsigned long start;
+ unsigned int size;
+ unsigned int async_size;
+
+
+ unsigned int ra_pages;
+ unsigned int mmap_miss;
+ loff_t prev_pos;
+};
+
+
+
+
+static inline __attribute__((always_inline)) int ra_has_index(struct file_ra_state *ra, unsigned long index)
+{
+ return (index >= ra->start &&
+ index < ra->start + ra->size);
+}
+
+
+
+
+struct file {
+
+
+
+
+ union {
+ struct list_head fu_list;
+ struct rcu_head fu_rcuhead;
+ } f_u;
+ struct path f_path;
+
+
+ const struct file_operations *f_op;
+ spinlock_t f_lock;
+ atomic_long_t f_count;
+ unsigned int f_flags;
+ fmode_t f_mode;
+ loff_t f_pos;
+ struct fown_struct f_owner;
+ const struct cred *f_cred;
+ struct file_ra_state f_ra;
+
+ u64 f_version;
+
+ void *f_security;
+
+
+ void *private_data;
+
+
+
+ struct list_head f_ep_links;
+
+ struct address_space *f_mapping;
+
+ unsigned long f_mnt_write_state;
+
+};
+extern spinlock_t files_lock;
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void file_take_write(struct file *f)
+{
+ ({ int __ret_warn_on = !!(f->f_mnt_write_state != 0); if (__builtin_expect(!!(__ret_warn_on), 0)) asm volatile( "1: .hword %0\n" " .section __bug_table,\"a\",@progbits\n" "2: .long 1b\n" " .long %1\n" " .short %2\n" " .short %3\n" " .org 2b + %4\n" " .previous" : : "i"(0xefcd), "i"("include/linux/fs.h"), "i"(956), "i"((1<<0)), "i"(sizeof(struct bug_entry))); __builtin_expect(!!(__ret_warn_on), 0); });
+ f->f_mnt_write_state = 1;
+}
+static inline __attribute__((always_inline)) void file_release_write(struct file *f)
+{
+ f->f_mnt_write_state |= 2;
+}
+static inline __attribute__((always_inline)) void file_reset_write(struct file *f)
+{
+ f->f_mnt_write_state = 0;
+}
+static inline __attribute__((always_inline)) void file_check_state(struct file *f)
+{
+
+
+
+
+ ({ int __ret_warn_on = !!(f->f_mnt_write_state == 1); if (__builtin_expect(!!(__ret_warn_on), 0)) asm volatile( "1: .hword %0\n" " .section __bug_table,\"a\",@progbits\n" "2: .long 1b\n" " .long %1\n" " .short %2\n" " .short %3\n" " .org 2b + %4\n" " .previous" : : "i"(0xefcd), "i"("include/linux/fs.h"), "i"(973), "i"((1<<0)), "i"(sizeof(struct bug_entry))); __builtin_expect(!!(__ret_warn_on), 0); });
+ ({ int __ret_warn_on = !!(f->f_mnt_write_state == 2); if (__builtin_expect(!!(__ret_warn_on), 0)) asm volatile( "1: .hword %0\n" " .section __bug_table,\"a\",@progbits\n" "2: .long 1b\n" " .long %1\n" " .short %2\n" " .short %3\n" " .org 2b + %4\n" " .previous" : : "i"(0xefcd), "i"("include/linux/fs.h"), "i"(974), "i"((1<<0)), "i"(sizeof(struct bug_entry))); __builtin_expect(!!(__ret_warn_on), 0); });
+}
+static inline __attribute__((always_inline)) int file_check_writeable(struct file *f)
+{
+ if (f->f_mnt_write_state == 1)
+ return 0;
+ printk("<4>" "writeable file with no "
+ "mnt_want_write()\n");
+ ({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) asm volatile( "1: .hword %0\n" " .section __bug_table,\"a\",@progbits\n" "2: .long 1b\n" " .long %1\n" " .short %2\n" " .short %3\n" " .org 2b + %4\n" " .previous" : : "i"(0xefcd), "i"("include/linux/fs.h"), "i"(982), "i"((1<<0)), "i"(sizeof(struct bug_entry))); __builtin_expect(!!(__ret_warn_on), 0); });
+ return -22;
+}
+# 1027 "include/linux/fs.h"
+typedef struct files_struct *fl_owner_t;
+
+struct file_lock_operations {
+ void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
+ void (*fl_release_private)(struct file_lock *);
+};
+
+struct lock_manager_operations {
+ int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
+ void (*fl_notify)(struct file_lock *);
+ int (*fl_grant)(struct file_lock *, struct file_lock *, int);
+ void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
+ void (*fl_release_private)(struct file_lock *);
+ void (*fl_break)(struct file_lock *);
+ int (*fl_mylease)(struct file_lock *, struct file_lock *);
+ int (*fl_change)(struct file_lock **, int);
+};
+
+struct lock_manager {
+ struct list_head list;
+};
+
+void locks_start_grace(struct lock_manager *);
+void locks_end_grace(struct lock_manager *);
+int locks_in_grace(void);
+
+
+# 1 "include/linux/nfs_fs_i.h" 1
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/types.h" 1
+# 5 "include/linux/nfs_fs_i.h" 2
+
+# 1 "include/linux/nfs.h" 1
+# 40 "include/linux/nfs.h"
+ enum nfs_stat {
+ NFS_OK = 0,
+ NFSERR_PERM = 1,
+ NFSERR_NOENT = 2,
+ NFSERR_IO = 5,
+ NFSERR_NXIO = 6,
+ NFSERR_EAGAIN = 11,
+ NFSERR_ACCES = 13,
+ NFSERR_EXIST = 17,
+ NFSERR_XDEV = 18,
+ NFSERR_NODEV = 19,
+ NFSERR_NOTDIR = 20,
+ NFSERR_ISDIR = 21,
+ NFSERR_INVAL = 22,
+ NFSERR_FBIG = 27,
+ NFSERR_NOSPC = 28,
+ NFSERR_ROFS = 30,
+ NFSERR_MLINK = 31,
+ NFSERR_OPNOTSUPP = 45,
+ NFSERR_NAMETOOLONG = 63,
+ NFSERR_NOTEMPTY = 66,
+ NFSERR_DQUOT = 69,
+ NFSERR_STALE = 70,
+ NFSERR_REMOTE = 71,
+ NFSERR_WFLUSH = 99,
+ NFSERR_BADHANDLE = 10001,
+ NFSERR_NOT_SYNC = 10002,
+ NFSERR_BAD_COOKIE = 10003,
+ NFSERR_NOTSUPP = 10004,
+ NFSERR_TOOSMALL = 10005,
+ NFSERR_SERVERFAULT = 10006,
+ NFSERR_BADTYPE = 10007,
+ NFSERR_JUKEBOX = 10008,
+ NFSERR_SAME = 10009,
+ NFSERR_DENIED = 10010,
+ NFSERR_EXPIRED = 10011,
+ NFSERR_LOCKED = 10012,
+ NFSERR_GRACE = 10013,
+ NFSERR_FHEXPIRED = 10014,
+ NFSERR_SHARE_DENIED = 10015,
+ NFSERR_WRONGSEC = 10016,
+ NFSERR_CLID_INUSE = 10017,
+ NFSERR_RESOURCE = 10018,
+ NFSERR_MOVED = 10019,
+ NFSERR_NOFILEHANDLE = 10020,
+ NFSERR_MINOR_VERS_MISMATCH = 10021,
+ NFSERR_STALE_CLIENTID = 10022,
+ NFSERR_STALE_STATEID = 10023,
+ NFSERR_OLD_STATEID = 10024,
+ NFSERR_BAD_STATEID = 10025,
+ NFSERR_BAD_SEQID = 10026,
+ NFSERR_NOT_SAME = 10027,
+ NFSERR_LOCK_RANGE = 10028,
+ NFSERR_SYMLINK = 10029,
+ NFSERR_RESTOREFH = 10030,
+ NFSERR_LEASE_MOVED = 10031,
+ NFSERR_ATTRNOTSUPP = 10032,
+ NFSERR_NO_GRACE = 10033,
+ NFSERR_RECLAIM_BAD = 10034,
+ NFSERR_RECLAIM_CONFLICT = 10035,
+ NFSERR_BAD_XDR = 10036,
+ NFSERR_LOCKS_HELD = 10037,
+ NFSERR_OPENMODE = 10038,
+ NFSERR_BADOWNER = 10039,
+ NFSERR_BADCHAR = 10040,
+ NFSERR_BADNAME = 10041,
+ NFSERR_BAD_RANGE = 10042,
+ NFSERR_LOCK_NOTSUPP = 10043,
+ NFSERR_OP_ILLEGAL = 10044,
+ NFSERR_DEADLOCK = 10045,
+ NFSERR_FILE_OPEN = 10046,
+ NFSERR_ADMIN_REVOKED = 10047,
+ NFSERR_CB_PATH_DOWN = 10048,
+};
+
+
+
+enum nfs_ftype {
+ NFNON = 0,
+ NFREG = 1,
+ NFDIR = 2,
+ NFBLK = 3,
+ NFCHR = 4,
+ NFLNK = 5,
+ NFSOCK = 6,
+ NFBAD = 7,
+ NFFIFO = 8
+};
+
+
+# 1 "include/linux/sunrpc/msg_prot.h" 1
+# 18 "include/linux/sunrpc/msg_prot.h"
+typedef u32 rpc_authflavor_t;
+
+enum rpc_auth_flavors {
+ RPC_AUTH_NULL = 0,
+ RPC_AUTH_UNIX = 1,
+ RPC_AUTH_SHORT = 2,
+ RPC_AUTH_DES = 3,
+ RPC_AUTH_KRB = 4,
+ RPC_AUTH_GSS = 6,
+ RPC_AUTH_MAXFLAVOR = 8,
+
+ RPC_AUTH_GSS_KRB5 = 390003,
+ RPC_AUTH_GSS_KRB5I = 390004,
+ RPC_AUTH_GSS_KRB5P = 390005,
+ RPC_AUTH_GSS_LKEY = 390006,
+ RPC_AUTH_GSS_LKEYI = 390007,
+ RPC_AUTH_GSS_LKEYP = 390008,
+ RPC_AUTH_GSS_SPKM = 390009,
+ RPC_AUTH_GSS_SPKMI = 390010,
+ RPC_AUTH_GSS_SPKMP = 390011,
+};
+
+
+
+
+enum rpc_msg_type {
+ RPC_CALL = 0,
+ RPC_REPLY = 1
+};
+
+enum rpc_reply_stat {
+ RPC_MSG_ACCEPTED = 0,
+ RPC_MSG_DENIED = 1
+};
+
+enum rpc_accept_stat {
+ RPC_SUCCESS = 0,
+ RPC_PROG_UNAVAIL = 1,
+ RPC_PROG_MISMATCH = 2,
+ RPC_PROC_UNAVAIL = 3,
+ RPC_GARBAGE_ARGS = 4,
+ RPC_SYSTEM_ERR = 5,
+
+ RPC_DROP_REPLY = 60000,
+};
+
+enum rpc_reject_stat {
+ RPC_MISMATCH = 0,
+ RPC_AUTH_ERROR = 1
+};
+
+enum rpc_auth_stat {
+ RPC_AUTH_OK = 0,
+ RPC_AUTH_BADCRED = 1,
+ RPC_AUTH_REJECTEDCRED = 2,
+ RPC_AUTH_BADVERF = 3,
+ RPC_AUTH_REJECTEDVERF = 4,
+ RPC_AUTH_TOOWEAK = 5,
+
+ RPCSEC_GSS_CREDPROBLEM = 13,
+ RPCSEC_GSS_CTXPROBLEM = 14
+};
+# 102 "include/linux/sunrpc/msg_prot.h"
+typedef __be32 rpc_fraghdr;
+# 193 "include/linux/sunrpc/msg_prot.h"
+# 1 "include/linux/inet.h" 1
+# 54 "include/linux/inet.h"
+extern __be32 in_aton(const char *str);
+extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
+extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
+# 194 "include/linux/sunrpc/msg_prot.h" 2
+# 131 "include/linux/nfs.h" 2
+
+
+
+
+
+
+struct nfs_fh {
+ unsigned short size;
+ unsigned char data[128];
+};
+
+
+
+
+
+static inline __attribute__((always_inline)) int nfs_compare_fh(const struct nfs_fh *a, const struct nfs_fh *b)
+{
+ return a->size != b->size || memcmp(a->data, b->data, a->size) != 0;
+}
+
+static inline __attribute__((always_inline)) void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *source)
+{
+ target->size = source->size;
+ memcpy(target->data, source->data, source->size);
+}
+# 165 "include/linux/nfs.h"
+enum nfs3_stable_how {
+ NFS_UNSTABLE = 0,
+ NFS_DATA_SYNC = 1,
+ NFS_FILE_SYNC = 2
+};
+# 7 "include/linux/nfs_fs_i.h" 2
+
+struct nlm_lockowner;
+
+
+
+
+struct nfs_lock_info {
+ u32 state;
+ struct nlm_lockowner *owner;
+ struct list_head list;
+};
+
+struct nfs4_lock_state;
+struct nfs4_lock_info {
+ struct nfs4_lock_state *owner;
+};
+# 1055 "include/linux/fs.h" 2
+
+struct file_lock {
+ struct file_lock *fl_next;
+ struct list_head fl_link;
+ struct list_head fl_block;
+ fl_owner_t fl_owner;
+ unsigned char fl_flags;
+ unsigned char fl_type;
+ unsigned int fl_pid;
+ struct pid *fl_nspid;
+ wait_queue_head_t fl_wait;
+ struct file *fl_file;
+ loff_t fl_start;
+ loff_t fl_end;
+
+ struct fasync_struct * fl_fasync;
+ unsigned long fl_break_time;
+
+ const struct file_lock_operations *fl_ops;
+ const struct lock_manager_operations *fl_lmops;
+ union {
+ struct nfs_lock_info nfs_fl;
+ struct nfs4_lock_info nfs4_fl;
+ struct {
+ struct list_head link;
+ int state;
+ } afs;
+ } fl_u;
+};
+# 1092 "include/linux/fs.h"
+# 1 "include/linux/fcntl.h" 1
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/fcntl.h" 1
+# 17 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/fcntl.h"
+# 1 "include/asm-generic/fcntl.h" 1
+# 94 "include/asm-generic/fcntl.h"
+struct f_owner_ex {
+ int type;
+ pid_t pid;
+};
+# 139 "include/asm-generic/fcntl.h"
+struct flock {
+ short l_type;
+ short l_whence;
+ __kernel_off_t l_start;
+ __kernel_off_t l_len;
+ __kernel_pid_t l_pid;
+
+};
+# 156 "include/asm-generic/fcntl.h"
+struct flock64 {
+ short l_type;
+ short l_whence;
+ __kernel_loff_t l_start;
+ __kernel_loff_t l_len;
+ __kernel_pid_t l_pid;
+
+};
+# 18 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/fcntl.h" 2
+# 5 "include/linux/fcntl.h" 2
+# 1093 "include/linux/fs.h" 2
+
+extern void send_sigio(struct fown_struct *fown, int fd, int band);
+
+
+extern int do_sync_mapping_range(struct address_space *mapping, loff_t offset,
+ loff_t endbyte, unsigned int flags);
+
+
+extern int fcntl_getlk(struct file *, struct flock *);
+extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
+ struct flock *);
+
+
+extern int fcntl_getlk64(struct file *, struct flock64 *);
+extern int fcntl_setlk64(unsigned int, struct file *, unsigned int,
+ struct flock64 *);
+
+
+extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
+extern int fcntl_getlease(struct file *filp);
+
+
+extern void locks_init_lock(struct file_lock *);
+extern void locks_copy_lock(struct file_lock *, struct file_lock *);
+extern void __locks_copy_lock(struct file_lock *, const struct file_lock *);
+extern void locks_remove_posix(struct file *, fl_owner_t);
+extern void locks_remove_flock(struct file *);
+extern void locks_release_private(struct file_lock *);
+extern void posix_test_lock(struct file *, struct file_lock *);
+extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
+extern int posix_lock_file_wait(struct file *, struct file_lock *);
+extern int posix_unblock_lock(struct file *, struct file_lock *);
+extern int vfs_test_lock(struct file *, struct file_lock *);
+extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
+extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
+extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
+extern int __break_lease(struct inode *inode, unsigned int flags);
+extern void lease_get_mtime(struct inode *, struct timespec *time);
+extern int generic_setlease(struct file *, long, struct file_lock **);
+extern int vfs_setlease(struct file *, long, struct file_lock **);
+extern int lease_modify(struct file_lock **, int);
+extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
+extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
+# 1281 "include/linux/fs.h"
+struct fasync_struct {
+ int magic;
+ int fa_fd;
+ struct fasync_struct *fa_next;
+ struct file *fa_file;
+};
+
+
+
+
+extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
+
+extern void kill_fasync(struct fasync_struct **, int, int);
+
+extern void __kill_fasync(struct fasync_struct *, int, int);
+
+extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
+extern int f_setown(struct file *filp, unsigned long arg, int force);
+extern void f_delown(struct file *filp);
+extern pid_t f_getown(struct file *filp);
+extern int send_sigurg(struct fown_struct *fown);
+# 1311 "include/linux/fs.h"
+extern struct list_head super_blocks;
+extern spinlock_t sb_lock;
+
+
+
+struct super_block {
+ struct list_head s_list;
+ dev_t s_dev;
+ unsigned long s_blocksize;
+ unsigned char s_blocksize_bits;
+ unsigned char s_dirt;
+ loff_t s_maxbytes;
+ struct file_system_type *s_type;
+ const struct super_operations *s_op;
+ const struct dquot_operations *dq_op;
+ const struct quotactl_ops *s_qcop;
+ const struct export_operations *s_export_op;
+ unsigned long s_flags;
+ unsigned long s_magic;
+ struct dentry *s_root;
+ struct rw_semaphore s_umount;
+ struct mutex s_lock;
+ int s_count;
+ int s_need_sync;
+ atomic_t s_active;
+
+ void *s_security;
+
+ struct xattr_handler **s_xattr;
+
+ struct list_head s_inodes;
+ struct hlist_head s_anon;
+ struct list_head s_files;
+
+ struct list_head s_dentry_lru;
+ int s_nr_dentry_unused;
+
+ struct block_device *s_bdev;
+ struct backing_dev_info *s_bdi;
+ struct mtd_info *s_mtd;
+ struct list_head s_instances;
+ struct quota_info s_dquot;
+
+ int s_frozen;
+ wait_queue_head_t s_wait_unfrozen;
+
+ char s_id[32];
+
+ void *s_fs_info;
+ fmode_t s_mode;
+
+
+
+
+
+ struct mutex s_vfs_rename_mutex;
+
+
+
+ u32 s_time_gran;
+
+
+
+
+
+ char *s_subtype;
+
+
+
+
+
+ char *s_options;
+};
+
+extern struct timespec current_fs_time(struct super_block *sb);
+
+
+
+
+enum {
+ SB_UNFROZEN = 0,
+ SB_FREEZE_WRITE = 1,
+ SB_FREEZE_TRANS = 2,
+};
+# 1407 "include/linux/fs.h"
+extern void lock_super(struct super_block *);
+extern void unlock_super(struct super_block *);
+
+
+
+
+extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *);
+extern int vfs_mkdir(struct inode *, struct dentry *, int);
+extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t);
+extern int vfs_symlink(struct inode *, struct dentry *, const char *);
+extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
+extern int vfs_rmdir(struct inode *, struct dentry *);
+extern int vfs_unlink(struct inode *, struct dentry *);
+extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
+
+
+
+
+extern void dentry_unhash(struct dentry *dentry);
+
+
+
+
+extern int file_permission(struct file *, int);
+
+
+
+
+struct fiemap_extent_info {
+ unsigned int fi_flags;
+ unsigned int fi_extents_mapped;
+ unsigned int fi_extents_max;
+ struct fiemap_extent *fi_extents_start;
+
+};
+int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
+ u64 phys, u64 len, u32 flags);
+int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
+# 1468 "include/linux/fs.h"
+typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned);
+struct block_device_operations;
+# 1482 "include/linux/fs.h"
+struct file_operations {
+ struct module *owner;
+ loff_t (*llseek) (struct file *, loff_t, int);
+ ssize_t (*read) (struct file *, char *, size_t, loff_t *);
+ ssize_t (*write) (struct file *, const char *, size_t, loff_t *);
+ ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+ ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+ int (*readdir) (struct file *, void *, filldir_t);
+ unsigned int (*poll) (struct file *, struct poll_table_struct *);
+ int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long);
+ long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
+ long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
+ int (*mmap) (struct file *, struct vm_area_struct *);
+ int (*open) (struct inode *, struct file *);
+ int (*flush) (struct file *, fl_owner_t id);
+ int (*release) (struct inode *, struct file *);
+ int (*fsync) (struct file *, struct dentry *, int datasync);
+ int (*aio_fsync) (struct kiocb *, int datasync);
+ int (*fasync) (int, struct file *, int);
+ int (*lock) (struct file *, int, struct file_lock *);
+ ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
+ unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+ int (*check_flags)(int);
+ int (*flock) (struct file *, int, struct file_lock *);
+ ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
+ ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
+ int (*setlease)(struct file *, long, struct file_lock **);
+};
+
+struct inode_operations {
+ int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
+ struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
+ int (*link) (struct dentry *,struct inode *,struct dentry *);
+ int (*unlink) (struct inode *,struct dentry *);
+ int (*symlink) (struct inode *,struct dentry *,const char *);
+ int (*mkdir) (struct inode *,struct dentry *,int);
+ int (*rmdir) (struct inode *,struct dentry *);
+ int (*mknod) (struct inode *,struct dentry *,int,dev_t);
+ int (*rename) (struct inode *, struct dentry *,
+ struct inode *, struct dentry *);
+ int (*readlink) (struct dentry *, char *,int);
+ void * (*follow_link) (struct dentry *, struct nameidata *);
+ void (*put_link) (struct dentry *, struct nameidata *, void *);
+ void (*truncate) (struct inode *);
+ int (*permission) (struct inode *, int);
+ int (*check_acl)(struct inode *, int);
+ int (*setattr) (struct dentry *, struct iattr *);
+ int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
+ int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
+ ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
+ ssize_t (*listxattr) (struct dentry *, char *, size_t);
+ int (*removexattr) (struct dentry *, const char *);
+ void (*truncate_range)(struct inode *, loff_t, loff_t);
+ long (*fallocate)(struct inode *inode, int mode, loff_t offset,
+ loff_t len);
+ int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
+ u64 len);
+};
+
+struct seq_file;
+
+ssize_t rw_copy_check_uvector(int type, const struct iovec * uvector,
+ unsigned long nr_segs, unsigned long fast_segs,
+ struct iovec *fast_pointer,
+ struct iovec **ret_pointer);
+
+extern ssize_t vfs_read(struct file *, char *, size_t, loff_t *);
+extern ssize_t vfs_write(struct file *, const char *, size_t, loff_t *);
+extern ssize_t vfs_readv(struct file *, const struct iovec *,
+ unsigned long, loff_t *);
+extern ssize_t vfs_writev(struct file *, const struct iovec *,
+ unsigned long, loff_t *);
+
+struct super_operations {
+ struct inode *(*alloc_inode)(struct super_block *sb);
+ void (*destroy_inode)(struct inode *);
+
+ void (*dirty_inode) (struct inode *);
+ int (*write_inode) (struct inode *, int);
+ void (*drop_inode) (struct inode *);
+ void (*delete_inode) (struct inode *);
+ void (*put_super) (struct super_block *);
+ void (*write_super) (struct super_block *);
+ int (*sync_fs)(struct super_block *sb, int wait);
+ int (*freeze_fs) (struct super_block *);
+ int (*unfreeze_fs) (struct super_block *);
+ int (*statfs) (struct dentry *, struct kstatfs *);
+ int (*remount_fs) (struct super_block *, int *, char *);
+ void (*clear_inode) (struct inode *);
+ void (*umount_begin) (struct super_block *);
+
+ int (*show_options)(struct seq_file *, struct vfsmount *);
+ int (*show_stats)(struct seq_file *, struct vfsmount *);
+
+ ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
+ ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
+
+ int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
+};
+# 1649 "include/linux/fs.h"
+extern void __mark_inode_dirty(struct inode *, int);
+static inline __attribute__((always_inline)) void mark_inode_dirty(struct inode *inode)
+{
+ __mark_inode_dirty(inode, (1 | 2 | 4));
+}
+
+static inline __attribute__((always_inline)) void mark_inode_dirty_sync(struct inode *inode)
+{
+ __mark_inode_dirty(inode, 1);
+}
+# 1668 "include/linux/fs.h"
+static inline __attribute__((always_inline)) void inc_nlink(struct inode *inode)
+{
+ inode->i_nlink++;
+}
+
+static inline __attribute__((always_inline)) void inode_inc_link_count(struct inode *inode)
+{
+ inc_nlink(inode);
+ mark_inode_dirty(inode);
+}
+# 1690 "include/linux/fs.h"
+static inline __attribute__((always_inline)) void drop_nlink(struct inode *inode)
+{
+ inode->i_nlink--;
+}
+# 1703 "include/linux/fs.h"
+static inline __attribute__((always_inline)) void clear_nlink(struct inode *inode)
+{
+ inode->i_nlink = 0;
+}
+
+static inline __attribute__((always_inline)) void inode_dec_link_count(struct inode *inode)
+{
+ drop_nlink(inode);
+ mark_inode_dirty(inode);
+}
+# 1722 "include/linux/fs.h"
+static inline __attribute__((always_inline)) void inode_inc_iversion(struct inode *inode)
+{
+ _spin_lock(&inode->i_lock);
+ inode->i_version++;
+ _spin_unlock(&inode->i_lock);
+}
+
+extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry);
+static inline __attribute__((always_inline)) void file_accessed(struct file *file)
+{
+ if (!(file->f_flags & 01000000))
+ touch_atime(file->f_path.mnt, file->f_path.dentry);
+}
+
+int sync_inode(struct inode *inode, struct writeback_control *wbc);
+
+struct file_system_type {
+ const char *name;
+ int fs_flags;
+ int (*get_sb) (struct file_system_type *, int,
+ const char *, void *, struct vfsmount *);
+ void (*kill_sb) (struct super_block *);
+ struct module *owner;
+ struct file_system_type * next;
+ struct list_head fs_supers;
+
+ struct lock_class_key s_lock_key;
+ struct lock_class_key s_umount_key;
+
+ struct lock_class_key i_lock_key;
+ struct lock_class_key i_mutex_key;
+ struct lock_class_key i_mutex_dir_key;
+ struct lock_class_key i_alloc_sem_key;
+};
+
+extern int get_sb_ns(struct file_system_type *fs_type, int flags, void *data,
+ int (*fill_super)(struct super_block *, void *, int),
+ struct vfsmount *mnt);
+extern int get_sb_bdev(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data,
+ int (*fill_super)(struct super_block *, void *, int),
+ struct vfsmount *mnt);
+extern int get_sb_single(struct file_system_type *fs_type,
+ int flags, void *data,
+ int (*fill_super)(struct super_block *, void *, int),
+ struct vfsmount *mnt);
+extern int get_sb_nodev(struct file_system_type *fs_type,
+ int flags, void *data,
+ int (*fill_super)(struct super_block *, void *, int),
+ struct vfsmount *mnt);
+void generic_shutdown_super(struct super_block *sb);
+void kill_block_super(struct super_block *sb);
+void kill_anon_super(struct super_block *sb);
+void kill_litter_super(struct super_block *sb);
+void deactivate_super(struct super_block *sb);
+void deactivate_locked_super(struct super_block *sb);
+int set_anon_super(struct super_block *s, void *data);
+struct super_block *sget(struct file_system_type *type,
+ int (*test)(struct super_block *,void *),
+ int (*set)(struct super_block *,void *),
+ void *data);
+extern int get_sb_pseudo(struct file_system_type *, char *,
+ const struct super_operations *ops, unsigned long,
+ struct vfsmount *mnt);
+extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
+int __put_super_and_need_restart(struct super_block *sb);
+void put_super(struct super_block *sb);
+
+
+
+
+
+
+
+extern int register_filesystem(struct file_system_type *);
+extern int unregister_filesystem(struct file_system_type *);
+extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
+
+extern int may_umount_tree(struct vfsmount *);
+extern int may_umount(struct vfsmount *);
+extern long do_mount(char *, char *, char *, unsigned long, void *);
+extern struct vfsmount *collect_mounts(struct path *);
+extern void drop_collected_mounts(struct vfsmount *);
+
+extern int vfs_statfs(struct dentry *, struct kstatfs *);
+
+extern int current_umask(void);
+
+
+extern struct kobject *fs_kobj;
+
+extern int rw_verify_area(int, struct file *, loff_t *, size_t);
+
+
+
+
+
+extern int locks_mandatory_locked(struct inode *);
+extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t);
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int __mandatory_lock(struct inode *ino)
+{
+ return (ino->i_mode & (0002000 | 00010)) == 0002000;
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int mandatory_lock(struct inode *ino)
+{
+ return ((ino)->i_sb->s_flags & (64)) && __mandatory_lock(ino);
+}
+
+static inline __attribute__((always_inline)) int locks_verify_locked(struct inode *inode)
+{
+ if (mandatory_lock(inode))
+ return locks_mandatory_locked(inode);
+ return 0;
+}
+
+static inline __attribute__((always_inline)) int locks_verify_truncate(struct inode *inode,
+ struct file *filp,
+ loff_t size)
+{
+ if (inode->i_flock && mandatory_lock(inode))
+ return locks_mandatory_area(
+ 2, inode, filp,
+ size < inode->i_size ? size : inode->i_size,
+ (size < inode->i_size ? inode->i_size - size
+ : size - inode->i_size)
+ );
+ return 0;
+}
+
+static inline __attribute__((always_inline)) int break_lease(struct inode *inode, unsigned int mode)
+{
+ if (inode->i_flock)
+ return __break_lease(inode, mode);
+ return 0;
+}
+# 1912 "include/linux/fs.h"
+extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
+ struct file *filp);
+extern int do_fallocate(struct file *file, int mode, loff_t offset,
+ loff_t len);
+extern long do_sys_open(int dfd, const char *filename, int flags,
+ int mode);
+extern struct file *filp_open(const char *, int, int);
+extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
+ const struct cred *);
+extern int filp_close(struct file *, fl_owner_t id);
+extern char * getname(const char *);
+
+
+
+extern int ioctl_preallocate(struct file *filp, void *argp);
+
+
+extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vfs_caches_init_early(void);
+extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vfs_caches_init(unsigned long);
+
+extern struct kmem_cache *names_cachep;
+# 1944 "include/linux/fs.h"
+extern int register_blkdev(unsigned int, const char *);
+extern void unregister_blkdev(unsigned int, const char *);
+extern struct block_device *bdget(dev_t);
+extern struct block_device *bdgrab(struct block_device *bdev);
+extern void bd_set_size(struct block_device *, loff_t size);
+extern void bd_forget(struct inode *inode);
+extern void bdput(struct block_device *);
+extern struct block_device *open_by_devnum(dev_t, fmode_t);
+extern void invalidate_bdev(struct block_device *);
+extern int sync_blockdev(struct block_device *bdev);
+extern struct super_block *freeze_bdev(struct block_device *);
+extern void emergency_thaw_all(void);
+extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
+extern int fsync_bdev(struct block_device *);
+# 1973 "include/linux/fs.h"
+extern int sync_filesystem(struct super_block *);
+extern const struct file_operations def_blk_fops;
+extern const struct file_operations def_chr_fops;
+extern const struct file_operations bad_sock_fops;
+extern const struct file_operations def_fifo_fops;
+
+extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
+extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
+extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
+extern int blkdev_get(struct block_device *, fmode_t);
+extern int blkdev_put(struct block_device *, fmode_t);
+extern int bd_claim(struct block_device *, void *);
+extern void bd_release(struct block_device *);
+
+extern int bd_claim_by_disk(struct block_device *, void *, struct gendisk *);
+extern void bd_release_from_disk(struct block_device *, struct gendisk *);
+# 1997 "include/linux/fs.h"
+extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
+extern int register_chrdev_region(dev_t, unsigned, const char *);
+extern int __register_chrdev(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name,
+ const struct file_operations *fops);
+extern void __unregister_chrdev(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name);
+extern void unregister_chrdev_region(dev_t, unsigned);
+extern void chrdev_show(struct seq_file *,off_t);
+
+static inline __attribute__((always_inline)) int register_chrdev(unsigned int major, const char *name,
+ const struct file_operations *fops)
+{
+ return __register_chrdev(major, 0, 256, name, fops);
+}
+
+static inline __attribute__((always_inline)) void unregister_chrdev(unsigned int major, const char *name)
+{
+ __unregister_chrdev(major, 0, 256, name);
+}
+
+
+
+
+
+
+
+extern const char *__bdevname(dev_t, char *buffer);
+extern const char *bdevname(struct block_device *bdev, char *buffer);
+extern struct block_device *lookup_bdev(const char *);
+extern struct block_device *open_bdev_exclusive(const char *, fmode_t, void *);
+extern void close_bdev_exclusive(struct block_device *, fmode_t);
+extern void blkdev_show(struct seq_file *,off_t);
+
+
+
+
+
+extern void init_special_inode(struct inode *, umode_t, dev_t);
+
+
+extern void make_bad_inode(struct inode *);
+extern int is_bad_inode(struct inode *);
+
+extern const struct file_operations read_pipefifo_fops;
+extern const struct file_operations write_pipefifo_fops;
+extern const struct file_operations rdwr_pipefifo_fops;
+
+extern int fs_may_remount_ro(struct super_block *);
+# 2058 "include/linux/fs.h"
+extern void check_disk_size_change(struct gendisk *disk,
+ struct block_device *bdev);
+extern int revalidate_disk(struct gendisk *);
+extern int check_disk_change(struct block_device *);
+extern int __invalidate_device(struct block_device *);
+extern int invalidate_partition(struct gendisk *, int);
+
+extern int invalidate_inodes(struct super_block *);
+unsigned long invalidate_mapping_pages(struct address_space *mapping,
+ unsigned long start, unsigned long end);
+
+static inline __attribute__((always_inline)) unsigned long
+invalidate_inode_pages(struct address_space *mapping)
+{
+ return invalidate_mapping_pages(mapping, 0, ~0UL);
+}
+
+static inline __attribute__((always_inline)) void invalidate_remote_inode(struct inode *inode)
+{
+ if ((((inode->i_mode) & 00170000) == 0100000) || (((inode->i_mode) & 00170000) == 0040000) ||
+ (((inode->i_mode) & 00170000) == 0120000))
+ invalidate_mapping_pages(inode->i_mapping, 0, -1);
+}
+extern int invalidate_inode_pages2(struct address_space *mapping);
+extern int invalidate_inode_pages2_range(struct address_space *mapping,
+ unsigned long start, unsigned long end);
+extern int write_inode_now(struct inode *, int);
+extern int filemap_fdatawrite(struct address_space *);
+extern int filemap_flush(struct address_space *);
+extern int filemap_fdatawait(struct address_space *);
+extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
+ loff_t lend);
+extern int filemap_write_and_wait(struct address_space *mapping);
+extern int filemap_write_and_wait_range(struct address_space *mapping,
+ loff_t lstart, loff_t lend);
+extern int wait_on_page_writeback_range(struct address_space *mapping,
+ unsigned long start, unsigned long end);
+extern int __filemap_fdatawrite_range(struct address_space *mapping,
+ loff_t start, loff_t end, int sync_mode);
+extern int filemap_fdatawrite_range(struct address_space *mapping,
+ loff_t start, loff_t end);
+
+extern int vfs_fsync_range(struct file *file, struct dentry *dentry,
+ loff_t start, loff_t end, int datasync);
+extern int vfs_fsync(struct file *file, struct dentry *dentry, int datasync);
+extern int generic_write_sync(struct file *file, loff_t pos, loff_t count);
+extern void sync_supers(void);
+extern void emergency_sync(void);
+extern void emergency_remount(void);
+
+extern sector_t bmap(struct inode *, sector_t);
+
+extern int notify_change(struct dentry *, struct iattr *);
+extern int inode_permission(struct inode *, int);
+extern int generic_permission(struct inode *, int,
+ int (*check_acl)(struct inode *, int));
+
+static inline __attribute__((always_inline)) bool execute_ok(struct inode *inode)
+{
+ return (inode->i_mode & (00100|00010|00001)) || (((inode->i_mode) & 00170000) == 0040000);
+}
+
+extern int get_write_access(struct inode *);
+extern int deny_write_access(struct file *);
+static inline __attribute__((always_inline)) void put_write_access(struct inode * inode)
+{
+ atomic_dec(&inode->i_writecount);
+}
+static inline __attribute__((always_inline)) void allow_write_access(struct file *file)
+{
+ if (file)
+ atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
+}
+extern int do_pipe_flags(int *, int);
+extern struct file *create_read_pipe(struct file *f, int flags);
+extern struct file *create_write_pipe(int flags);
+extern void free_write_pipe(struct file *);
+
+extern struct file *do_filp_open(int dfd, const char *pathname,
+ int open_flag, int mode, int acc_mode);
+extern int may_open(struct path *, int, int);
+
+extern int kernel_read(struct file *, loff_t, char *, unsigned long);
+extern struct file * open_exec(const char *);
+
+
+extern int is_subdir(struct dentry *, struct dentry *);
+extern ino_t find_inode_number(struct dentry *, struct qstr *);
+
+# 1 "include/linux/err.h" 1
+
+
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/errno.h" 1
+# 7 "include/linux/err.h" 2
+# 22 "include/linux/err.h"
+static inline __attribute__((always_inline)) void *ERR_PTR(long error)
+{
+ return (void *) error;
+}
+
+static inline __attribute__((always_inline)) long PTR_ERR(const void *ptr)
+{
+ return (long) ptr;
+}
+
+static inline __attribute__((always_inline)) long IS_ERR(const void *ptr)
+{
+ return __builtin_expect(!!(((unsigned long)ptr) >= (unsigned long)-4095), 0);
+}
+# 44 "include/linux/err.h"
+static inline __attribute__((always_inline)) void *ERR_CAST(const void *ptr)
+{
+
+ return (void *) ptr;
+}
+# 2148 "include/linux/fs.h" 2
+
+
+extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
+
+extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
+
+extern int inode_init_always(struct super_block *, struct inode *);
+extern void inode_init_once(struct inode *);
+extern void inode_add_to_lists(struct super_block *, struct inode *);
+extern void iput(struct inode *);
+extern struct inode * igrab(struct inode *);
+extern ino_t iunique(struct super_block *, ino_t);
+extern int inode_needs_sync(struct inode *inode);
+extern void generic_delete_inode(struct inode *inode);
+extern void generic_drop_inode(struct inode *inode);
+extern int generic_detach_inode(struct inode *inode);
+
+extern struct inode *ilookup5_nowait(struct super_block *sb,
+ unsigned long hashval, int (*test)(struct inode *, void *),
+ void *data);
+extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
+ int (*test)(struct inode *, void *), void *data);
+extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
+
+extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
+extern struct inode * iget_locked(struct super_block *, unsigned long);
+extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
+extern int insert_inode_locked(struct inode *);
+extern void unlock_new_inode(struct inode *);
+
+extern void __iget(struct inode * inode);
+extern void iget_failed(struct inode *);
+extern void clear_inode(struct inode *);
+extern void destroy_inode(struct inode *);
+extern void __destroy_inode(struct inode *);
+extern struct inode *new_inode(struct super_block *);
+extern int should_remove_suid(struct dentry *);
+extern int file_remove_suid(struct file *);
+
+extern void __insert_inode_hash(struct inode *, unsigned long hashval);
+extern void remove_inode_hash(struct inode *);
+static inline __attribute__((always_inline)) void insert_inode_hash(struct inode *inode) {
+ __insert_inode_hash(inode, inode->i_ino);
+}
+
+extern struct file * get_empty_filp(void);
+extern void file_move(struct file *f, struct list_head *list);
+extern void file_kill(struct file *f);
+
+struct bio;
+extern void submit_bio(int, struct bio *);
+extern int bdev_read_only(struct block_device *);
+
+extern int set_blocksize(struct block_device *, int);
+extern int sb_set_blocksize(struct super_block *, int);
+extern int sb_min_blocksize(struct super_block *, int);
+
+extern int generic_file_mmap(struct file *, struct vm_area_struct *);
+extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
+extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
+int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
+extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long,
+ loff_t *);
+extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *,
+ unsigned long *, loff_t, loff_t *, size_t, size_t);
+extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *,
+ unsigned long, loff_t, loff_t *, size_t, ssize_t);
+extern ssize_t do_sync_read(struct file *filp, char *buf, size_t len, loff_t *ppos);
+extern ssize_t do_sync_write(struct file *filp, const char *buf, size_t len, loff_t *ppos);
+extern int generic_segment_checks(const struct iovec *iov,
+ unsigned long *nr_segs, size_t *count, int access_flags);
+
+
+extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos);
+
+
+extern ssize_t generic_file_splice_read(struct file *, loff_t *,
+ struct pipe_inode_info *, size_t, unsigned int);
+extern ssize_t default_file_splice_read(struct file *, loff_t *,
+ struct pipe_inode_info *, size_t, unsigned int);
+extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
+ struct file *, loff_t *, size_t, unsigned int);
+extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
+ struct file *out, loff_t *, size_t len, unsigned int flags);
+extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
+ size_t len, unsigned int flags);
+
+extern void
+file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
+extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
+extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
+extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset,
+ int origin);
+extern int generic_file_open(struct inode * inode, struct file * filp);
+extern int nonseekable_open(struct inode * inode, struct file * filp);
+# 2255 "include/linux/fs.h"
+static inline __attribute__((always_inline)) int xip_truncate_page(struct address_space *mapping, loff_t from)
+{
+ return 0;
+}
+
+
+
+ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
+ struct block_device *bdev, const struct iovec *iov, loff_t offset,
+ unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
+ int lock_type);
+
+enum {
+ DIO_LOCKING = 1,
+ DIO_NO_LOCKING,
+ DIO_OWN_LOCKING,
+};
+
+static inline __attribute__((always_inline)) ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
+ struct inode *inode, struct block_device *bdev, const struct iovec *iov,
+ loff_t offset, unsigned long nr_segs, get_block_t get_block,
+ dio_iodone_t end_io)
+{
+ return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
+ nr_segs, get_block, end_io, DIO_LOCKING);
+}
+
+static inline __attribute__((always_inline)) ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
+ struct inode *inode, struct block_device *bdev, const struct iovec *iov,
+ loff_t offset, unsigned long nr_segs, get_block_t get_block,
+ dio_iodone_t end_io)
+{
+ return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
+ nr_segs, get_block, end_io, DIO_NO_LOCKING);
+}
+
+static inline __attribute__((always_inline)) ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb,
+ struct inode *inode, struct block_device *bdev, const struct iovec *iov,
+ loff_t offset, unsigned long nr_segs, get_block_t get_block,
+ dio_iodone_t end_io)
+{
+ return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
+ nr_segs, get_block, end_io, DIO_OWN_LOCKING);
+}
+
+
+extern const struct file_operations generic_ro_fops;
+
+
+
+extern int vfs_readlink(struct dentry *, char *, int, const char *);
+extern int vfs_follow_link(struct nameidata *, const char *);
+extern int page_readlink(struct dentry *, char *, int);
+extern void *page_follow_link_light(struct dentry *, struct nameidata *);
+extern void page_put_link(struct dentry *, struct nameidata *, void *);
+extern int __page_symlink(struct inode *inode, const char *symname, int len,
+ int nofs);
+extern int page_symlink(struct inode *inode, const char *symname, int len);
+extern const struct inode_operations page_symlink_inode_operations;
+extern int generic_readlink(struct dentry *, char *, int);
+extern void generic_fillattr(struct inode *, struct kstat *);
+extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+void inode_add_bytes(struct inode *inode, loff_t bytes);
+void inode_sub_bytes(struct inode *inode, loff_t bytes);
+loff_t inode_get_bytes(struct inode *inode);
+void inode_set_bytes(struct inode *inode, loff_t bytes);
+
+extern int vfs_readdir(struct file *, filldir_t, void *);
+
+extern int vfs_stat(char *, struct kstat *);
+extern int vfs_lstat(char *, struct kstat *);
+extern int vfs_fstat(unsigned int, struct kstat *);
+extern int vfs_fstatat(int , char *, struct kstat *, int);
+
+extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
+ unsigned long arg);
+extern int __generic_block_fiemap(struct inode *inode,
+ struct fiemap_extent_info *fieinfo, u64 start,
+ u64 len, get_block_t *get_block);
+extern int generic_block_fiemap(struct inode *inode,
+ struct fiemap_extent_info *fieinfo, u64 start,
+ u64 len, get_block_t *get_block);
+
+extern void get_filesystem(struct file_system_type *fs);
+extern void put_filesystem(struct file_system_type *fs);
+extern struct file_system_type *get_fs_type(const char *name);
+extern struct super_block *get_super(struct block_device *);
+extern struct super_block *get_active_super(struct block_device *bdev);
+extern struct super_block *user_get_super(dev_t);
+extern void drop_super(struct super_block *sb);
+
+extern int dcache_dir_open(struct inode *, struct file *);
+extern int dcache_dir_close(struct inode *, struct file *);
+extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
+extern int dcache_readdir(struct file *, void *, filldir_t);
+extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+extern int simple_statfs(struct dentry *, struct kstatfs *);
+extern int simple_link(struct dentry *, struct inode *, struct dentry *);
+extern int simple_unlink(struct inode *, struct dentry *);
+extern int simple_rmdir(struct inode *, struct dentry *);
+extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
+extern int simple_sync_file(struct file *, struct dentry *, int);
+extern int simple_empty(struct dentry *);
+extern int simple_readpage(struct file *file, struct page *page);
+extern int simple_prepare_write(struct file *file, struct page *page,
+ unsigned offset, unsigned to);
+extern int simple_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata);
+extern int simple_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata);
+
+extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *);
+extern ssize_t generic_read_dir(struct file *, char *, size_t, loff_t *);
+extern const struct file_operations simple_dir_operations;
+extern const struct inode_operations simple_dir_inode_operations;
+struct tree_descr { char *name; const struct file_operations *ops; int mode; };
+struct dentry *d_alloc_name(struct dentry *, const char *);
+extern int simple_fill_super(struct super_block *, int, struct tree_descr *);
+extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
+extern void simple_release_fs(struct vfsmount **mount, int *count);
+
+extern ssize_t simple_read_from_buffer(void *to, size_t count,
+ loff_t *ppos, const void *from, size_t available);
+
+extern int simple_fsync(struct file *, struct dentry *, int);
+# 2390 "include/linux/fs.h"
+extern int inode_change_ok(const struct inode *, struct iattr *);
+extern int inode_newsize_ok(const struct inode *, loff_t offset);
+extern int __attribute__((warn_unused_result)) inode_setattr(struct inode *, struct iattr *);
+
+extern void file_update_time(struct file *file);
+
+extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt);
+extern void save_mount_options(struct super_block *sb, char *options);
+extern void replace_mount_options(struct super_block *sb, char *options);
+
+static inline __attribute__((always_inline)) ino_t parent_ino(struct dentry *dentry)
+{
+ ino_t res;
+
+ _spin_lock(&dentry->d_lock);
+ res = dentry->d_parent->d_inode->i_ino;
+ _spin_unlock(&dentry->d_lock);
+ return res;
+}
+
+
+
+
+
+
+
+struct simple_transaction_argresp {
+ ssize_t size;
+ char data[0];
+};
+
+
+
+char *simple_transaction_get(struct file *file, const char *buf,
+ size_t size);
+ssize_t simple_transaction_read(struct file *file, char *buf,
+ size_t size, loff_t *pos);
+int simple_transaction_release(struct inode *inode, struct file *file);
+
+void simple_transaction_set(struct file *file, size_t n);
+# 2461 "include/linux/fs.h"
+static inline __attribute__((always_inline)) void __attribute__((format(printf, 1, 2)))
+__simple_attr_check_format(const char *fmt, ...)
+{
+
+}
+
+int simple_attr_open(struct inode *inode, struct file *file,
+ int (*get)(void *, u64 *), int (*set)(void *, u64),
+ const char *fmt);
+int simple_attr_release(struct inode *inode, struct file *file);
+ssize_t simple_attr_read(struct file *file, char *buf,
+ size_t len, loff_t *ppos);
+ssize_t simple_attr_write(struct file *file, const char *buf,
+ size_t len, loff_t *ppos);
+
+struct ctl_table;
+int proc_nr_files(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
+
+int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) get_filesystem_list(char *buf);
+# 12 "include/linux/buffer_head.h" 2
+
+# 1 "include/linux/pagemap.h" 1
+
+
+
+
+
+
+# 1 "include/linux/mm.h" 1
+# 11 "include/linux/mm.h"
+# 1 "include/linux/rbtree.h" 1
+# 100 "include/linux/rbtree.h"
+struct rb_node
+{
+ unsigned long rb_parent_color;
+
+
+ struct rb_node *rb_right;
+ struct rb_node *rb_left;
+} __attribute__((aligned(sizeof(long))));
+
+
+struct rb_root
+{
+ struct rb_node *rb_node;
+};
+# 123 "include/linux/rbtree.h"
+static inline __attribute__((always_inline)) void rb_set_parent(struct rb_node *rb, struct rb_node *p)
+{
+ rb->rb_parent_color = (rb->rb_parent_color & 3) | (unsigned long)p;
+}
+static inline __attribute__((always_inline)) void rb_set_color(struct rb_node *rb, int color)
+{
+ rb->rb_parent_color = (rb->rb_parent_color & ~1) | color;
+}
+# 139 "include/linux/rbtree.h"
+extern void rb_insert_color(struct rb_node *, struct rb_root *);
+extern void rb_erase(struct rb_node *, struct rb_root *);
+
+
+extern struct rb_node *rb_next(const struct rb_node *);
+extern struct rb_node *rb_prev(const struct rb_node *);
+extern struct rb_node *rb_first(const struct rb_root *);
+extern struct rb_node *rb_last(const struct rb_root *);
+
+
+extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
+ struct rb_root *root);
+
+static inline __attribute__((always_inline)) void rb_link_node(struct rb_node * node, struct rb_node * parent,
+ struct rb_node ** rb_link)
+{
+ node->rb_parent_color = (unsigned long )parent;
+ node->rb_left = node->rb_right = ((void *)0);
+
+ *rb_link = node;
+}
+# 12 "include/linux/mm.h" 2
+
+
+# 1 "include/linux/mm_types.h" 1
+
+
+
+# 1 "include/linux/auxvec.h" 1
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/auxvec.h" 1
+# 1 "include/asm-generic/auxvec.h" 1
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/auxvec.h" 2
+# 5 "include/linux/auxvec.h" 2
+# 5 "include/linux/mm_types.h" 2
+# 14 "include/linux/mm_types.h"
+# 1 "include/linux/page-debug-flags.h" 1
+# 14 "include/linux/page-debug-flags.h"
+enum page_debug_flags {
+ PAGE_DEBUG_FLAG_POISON,
+};
+# 15 "include/linux/mm_types.h" 2
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/mmu.h" 1
+# 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/mmu.h"
+struct sram_list_struct {
+ struct sram_list_struct *next;
+ void *addr;
+ size_t length;
+};
+
+typedef struct {
+ unsigned long end_brk;
+ unsigned long stack_start;
+
+
+
+ void *l1_stack_save;
+
+ struct sram_list_struct *sram_list;
+
+
+ unsigned long exec_fdpic_loadmap;
+ unsigned long interp_fdpic_loadmap;
+
+
+
+
+} mm_context_t;
+# 17 "include/linux/mm_types.h" 2
+
+
+
+
+
+
+struct address_space;
+
+
+
+
+
+
+typedef unsigned long mm_counter_t;
+# 40 "include/linux/mm_types.h"
+struct page {
+ unsigned long flags;
+
+ atomic_t _count;
+ union {
+ atomic_t _mapcount;
+
+
+
+ struct {
+ u16 inuse;
+ u16 objects;
+ };
+ };
+ union {
+ struct {
+ unsigned long private;
+
+
+
+
+
+
+ struct address_space *mapping;
+
+
+
+
+
+
+ };
+
+
+
+ struct kmem_cache *slab;
+ struct page *first_page;
+ };
+ union {
+ unsigned long index;
+ void *freelist;
+ };
+ struct list_head lru;
+# 99 "include/linux/mm_types.h"
+ unsigned long debug_flags;
+# 109 "include/linux/mm_types.h"
+};
+
+
+
+
+
+
+struct vm_region {
+ struct rb_node vm_rb;
+ unsigned long vm_flags;
+ unsigned long vm_start;
+ unsigned long vm_end;
+ unsigned long vm_top;
+ unsigned long vm_pgoff;
+ struct file *vm_file;
+
+ atomic_t vm_usage;
+};
+
+
+
+
+
+
+
+struct vm_area_struct {
+ struct mm_struct * vm_mm;
+ unsigned long vm_start;
+ unsigned long vm_end;
+
+
+
+ struct vm_area_struct *vm_next;
+
+ pgprot_t vm_page_prot;
+ unsigned long vm_flags;
+
+ struct rb_node vm_rb;
+
+
+
+
+
+
+
+ union {
+ struct {
+ struct list_head list;
+ void *parent;
+ struct vm_area_struct *head;
+ } vm_set;
+
+ struct raw_prio_tree_node prio_tree_node;
+ } shared;
+
+
+
+
+
+
+
+ struct list_head anon_vma_node;
+ struct anon_vma *anon_vma;
+
+
+ const struct vm_operations_struct *vm_ops;
+
+
+ unsigned long vm_pgoff;
+
+ struct file * vm_file;
+ void * vm_private_data;
+ unsigned long vm_truncate_count;
+
+
+ struct vm_region *vm_region;
+
+
+
+
+};
+
+struct core_thread {
+ struct task_struct *task;
+ struct core_thread *next;
+};
+
+struct core_state {
+ atomic_t nr_threads;
+ struct core_thread dumper;
+ struct completion startup;
+};
+
+struct mm_struct {
+ struct vm_area_struct * mmap;
+ struct rb_root mm_rb;
+ struct vm_area_struct * mmap_cache;
+ unsigned long (*get_unmapped_area) (struct file *filp,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags);
+ void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
+ unsigned long mmap_base;
+ unsigned long task_size;
+ unsigned long cached_hole_size;
+ unsigned long free_area_cache;
+ pgd_t * pgd;
+ atomic_t mm_users;
+ atomic_t mm_count;
+ int map_count;
+ struct rw_semaphore mmap_sem;
+ spinlock_t page_table_lock;
+
+ struct list_head mmlist;
+
+
+
+
+
+
+
+ mm_counter_t _file_rss;
+ mm_counter_t _anon_rss;
+
+ unsigned long hiwater_rss;
+ unsigned long hiwater_vm;
+
+ unsigned long total_vm, locked_vm, shared_vm, exec_vm;
+ unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long start_brk, brk, start_stack;
+ unsigned long arg_start, arg_end, env_start, env_end;
+
+ unsigned long saved_auxv[(2*(0 + 19 + 1))];
+
+ struct linux_binfmt *binfmt;
+
+ cpumask_t cpu_vm_mask;
+
+
+ mm_context_t context;
+# 257 "include/linux/mm_types.h"
+ unsigned int faultstamp;
+ unsigned int token_priority;
+ unsigned int last_interval;
+
+ unsigned long flags;
+
+ struct core_state *core_state;
+
+ spinlock_t ioctx_lock;
+ struct hlist_head ioctx_list;
+# 284 "include/linux/mm_types.h"
+ struct file *exe_file;
+ unsigned long num_exe_file_vmas;
+
+
+
+
+};
+# 15 "include/linux/mm.h" 2
+
+struct mempolicy;
+struct anon_vma;
+struct file_ra_state;
+struct user_struct;
+struct writeback_control;
+struct rlimit;
+
+
+extern unsigned long max_mapnr;
+
+
+extern unsigned long num_physpages;
+extern unsigned long totalram_pages;
+extern void * high_memory;
+extern int page_cluster;
+
+
+extern int sysctl_legacy_va_layout;
+
+
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h" 1
+# 10 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h"
+# 1 "include/asm-generic/4level-fixup.h" 1
+# 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h" 2
+
+
+
+
+typedef pte_t *pte_addr_t;
+# 40 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h"
+extern void paging_init(void);
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int pte_file(pte_t pte)
+{
+ return 0;
+}
+# 68 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h"
+static inline __attribute__((always_inline)) pte_t pte_rdprotect(pte_t _pte) { _pte.pte &= ~(0x00000004); return _pte; };
+static inline __attribute__((always_inline)) pte_t pte_mkread(pte_t _pte) { _pte.pte |= (0x00000004); return _pte; };
+static inline __attribute__((always_inline)) pte_t pte_wrprotect(pte_t _pte) { _pte.pte &= ~(0x00000008); return _pte; };
+static inline __attribute__((always_inline)) pte_t pte_mkwrite(pte_t _pte) { _pte.pte |= (0x00000008); return _pte; };
+static inline __attribute__((always_inline)) pte_t pte_exprotect(pte_t _pte) { _pte.pte &= ~(0x00000004 | 0x00000008); return _pte; };
+static inline __attribute__((always_inline)) pte_t pte_mkexec(pte_t _pte) { _pte.pte |= (0x00000004 | 0x00000008); return _pte; };
+static inline __attribute__((always_inline)) pte_t pte_mkclean(pte_t _pte) { _pte.pte &= ~(0x00000080); return _pte; };
+static inline __attribute__((always_inline)) pte_t pte_mkdirty(pte_t _pte) { _pte.pte |= (0x00000080); return _pte; };
+static inline __attribute__((always_inline)) pte_t pte_mkold(pte_t _pte) { _pte.pte &= ~0x00000010 | 0x00000004 | 0x00000008; return _pte; };
+static inline __attribute__((always_inline)) pte_t pte_mkyoung(pte_t _pte) { _pte.pte |= 0x00000010 | 0x00000004 | 0x00000008; return _pte; };
+
+
+
+
+
+
+extern char empty_zero_page[];
+
+extern unsigned int kobjsize(const void *objp);
+# 103 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h"
+extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
+ unsigned long, unsigned long,
+ unsigned long);
+
+
+# 1 "include/asm-generic/pgtable.h" 1
+# 309 "include/asm-generic/pgtable.h"
+static inline __attribute__((always_inline)) int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
+ unsigned long pfn, unsigned long size)
+{
+ return 0;
+}
+# 322 "include/asm-generic/pgtable.h"
+static inline __attribute__((always_inline)) int track_pfn_vma_copy(struct vm_area_struct *vma)
+{
+ return 0;
+}
+# 335 "include/asm-generic/pgtable.h"
+static inline __attribute__((always_inline)) void untrack_pfn_vma(struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size)
+{
+}
+# 109 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/pgtable.h" 2
+# 40 "include/linux/mm.h" 2
+# 56 "include/linux/mm.h"
+extern struct kmem_cache *vm_area_cachep;
+
+
+extern struct rb_root nommu_region_tree;
+extern struct rw_semaphore nommu_region_sem;
+
+extern unsigned int kobjsize(const void *objp);
+# 134 "include/linux/mm.h"
+extern pgprot_t protection_map[16];
+# 148 "include/linux/mm.h"
+static inline __attribute__((always_inline)) int is_linear_pfn_mapping(struct vm_area_struct *vma)
+{
+ return (vma->vm_flags & 0x40000000);
+}
+
+static inline __attribute__((always_inline)) int is_pfn_mapping(struct vm_area_struct *vma)
+{
+ return (vma->vm_flags & 0x00000400);
+}
+# 167 "include/linux/mm.h"
+struct vm_fault {
+ unsigned int flags;
+ unsigned long pgoff;
+ void *virtual_address;
+
+ struct page *page;
+
+
+
+
+};
+
+
+
+
+
+
+struct vm_operations_struct {
+ void (*open)(struct vm_area_struct * area);
+ void (*close)(struct vm_area_struct * area);
+ int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+
+
+ int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+
+
+
+ int (*access)(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write);
+# 223 "include/linux/mm.h"
+};
+
+struct mmu_gather;
+struct inode;
+# 235 "include/linux/mm.h"
+# 1 "include/linux/page-flags.h" 1
+# 75 "include/linux/page-flags.h"
+enum pageflags {
+ PG_locked,
+ PG_error,
+ PG_referenced,
+ PG_uptodate,
+ PG_dirty,
+ PG_lru,
+ PG_active,
+ PG_slab,
+ PG_owner_priv_1,
+ PG_arch_1,
+ PG_reserved,
+ PG_private,
+ PG_private_2,
+ PG_writeback,
+
+ PG_head,
+ PG_tail,
+
+
+
+ PG_swapcache,
+ PG_mappedtodisk,
+ PG_reclaim,
+ PG_buddy,
+ PG_swapbacked,
+ PG_unevictable,
+# 111 "include/linux/page-flags.h"
+ __NR_PAGEFLAGS,
+
+
+ PG_checked = PG_owner_priv_1,
+
+
+
+
+
+ PG_fscache = PG_private_2,
+
+
+ PG_pinned = PG_owner_priv_1,
+ PG_savepinned = PG_dirty,
+
+
+ PG_slob_free = PG_private,
+
+
+ PG_slub_frozen = PG_active,
+ PG_slub_debug = PG_error,
+};
+# 199 "include/linux/page-flags.h"
+struct page;
+
+static inline __attribute__((always_inline)) int PageLocked(struct page *page) { return test_bit(PG_locked, &page->flags); } static inline __attribute__((always_inline)) int TestSetPageLocked(struct page *page) { return test_and_set_bit(PG_locked, &page->flags); }
+static inline __attribute__((always_inline)) int PageError(struct page *page) { return test_bit(PG_error, &page->flags); } static inline __attribute__((always_inline)) void SetPageError(struct page *page) { set_bit(PG_error, &page->flags); } static inline __attribute__((always_inline)) void ClearPageError(struct page *page) { clear_bit(PG_error, &page->flags); }
+static inline __attribute__((always_inline)) int PageReferenced(struct page *page) { return test_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) void SetPageReferenced(struct page *page) { set_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReferenced(struct page *page) { clear_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageReferenced(struct page *page) { return test_and_clear_bit(PG_referenced, &page->flags); }
+static inline __attribute__((always_inline)) int PageDirty(struct page *page) { return test_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void SetPageDirty(struct page *page) { set_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void ClearPageDirty(struct page *page) { clear_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) int TestSetPageDirty(struct page *page) { return test_and_set_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageDirty(struct page *page) { return test_and_clear_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageDirty(struct page *page) { __clear_bit(PG_dirty, &page->flags); }
+static inline __attribute__((always_inline)) int PageLRU(struct page *page) { return test_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void SetPageLRU(struct page *page) { set_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void ClearPageLRU(struct page *page) { clear_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageLRU(struct page *page) { __clear_bit(PG_lru, &page->flags); }
+static inline __attribute__((always_inline)) int PageActive(struct page *page) { return test_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void SetPageActive(struct page *page) { set_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void ClearPageActive(struct page *page) { clear_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageActive(struct page *page) { __clear_bit(PG_active, &page->flags); }
+ static inline __attribute__((always_inline)) int TestClearPageActive(struct page *page) { return test_and_clear_bit(PG_active, &page->flags); }
+static inline __attribute__((always_inline)) int PageSlab(struct page *page) { return test_bit(PG_slab, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlab(struct page *page) { __set_bit(PG_slab, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlab(struct page *page) { __clear_bit(PG_slab, &page->flags); }
+static inline __attribute__((always_inline)) int PageChecked(struct page *page) { return test_bit(PG_checked, &page->flags); } static inline __attribute__((always_inline)) void SetPageChecked(struct page *page) { set_bit(PG_checked, &page->flags); } static inline __attribute__((always_inline)) void ClearPageChecked(struct page *page) { clear_bit(PG_checked, &page->flags); }
+static inline __attribute__((always_inline)) int PagePinned(struct page *page) { return test_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) void SetPagePinned(struct page *page) { set_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePinned(struct page *page) { clear_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) int TestSetPagePinned(struct page *page) { return test_and_set_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) int TestClearPagePinned(struct page *page) { return test_and_clear_bit(PG_pinned, &page->flags); }
+static inline __attribute__((always_inline)) int PageSavePinned(struct page *page) { return test_bit(PG_savepinned, &page->flags); } static inline __attribute__((always_inline)) void SetPageSavePinned(struct page *page) { set_bit(PG_savepinned, &page->flags); } static inline __attribute__((always_inline)) void ClearPageSavePinned(struct page *page) { clear_bit(PG_savepinned, &page->flags); };
+static inline __attribute__((always_inline)) int PageReserved(struct page *page) { return test_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void SetPageReserved(struct page *page) { set_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReserved(struct page *page) { clear_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageReserved(struct page *page) { __clear_bit(PG_reserved, &page->flags); }
+static inline __attribute__((always_inline)) int PageSwapBacked(struct page *page) { return test_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void SetPageSwapBacked(struct page *page) { set_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void ClearPageSwapBacked(struct page *page) { clear_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSwapBacked(struct page *page) { __clear_bit(PG_swapbacked, &page->flags); }
+
+static inline __attribute__((always_inline)) int PageSlobFree(struct page *page) { return test_bit(PG_slob_free, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlobFree(struct page *page) { __set_bit(PG_slob_free, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlobFree(struct page *page) { __clear_bit(PG_slob_free, &page->flags); }
+
+static inline __attribute__((always_inline)) int PageSlubFrozen(struct page *page) { return test_bit(PG_slub_frozen, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlubFrozen(struct page *page) { __set_bit(PG_slub_frozen, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlubFrozen(struct page *page) { __clear_bit(PG_slub_frozen, &page->flags); }
+static inline __attribute__((always_inline)) int PageSlubDebug(struct page *page) { return test_bit(PG_slub_debug, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlubDebug(struct page *page) { __set_bit(PG_slub_debug, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlubDebug(struct page *page) { __clear_bit(PG_slub_debug, &page->flags); }
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int PagePrivate(struct page *page) { return test_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void SetPagePrivate(struct page *page) { set_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePrivate(struct page *page) { clear_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void __SetPagePrivate(struct page *page) { __set_bit(PG_private, &page->flags); }
+ static inline __attribute__((always_inline)) void __ClearPagePrivate(struct page *page) { __clear_bit(PG_private, &page->flags); }
+static inline __attribute__((always_inline)) int PagePrivate2(struct page *page) { return test_bit(PG_private_2, &page->flags); } static inline __attribute__((always_inline)) void SetPagePrivate2(struct page *page) { set_bit(PG_private_2, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePrivate2(struct page *page) { clear_bit(PG_private_2, &page->flags); } static inline __attribute__((always_inline)) int TestSetPagePrivate2(struct page *page) { return test_and_set_bit(PG_private_2, &page->flags); } static inline __attribute__((always_inline)) int TestClearPagePrivate2(struct page *page) { return test_and_clear_bit(PG_private_2, &page->flags); }
+static inline __attribute__((always_inline)) int PageOwnerPriv1(struct page *page) { return test_bit(PG_owner_priv_1, &page->flags); } static inline __attribute__((always_inline)) void SetPageOwnerPriv1(struct page *page) { set_bit(PG_owner_priv_1, &page->flags); } static inline __attribute__((always_inline)) void ClearPageOwnerPriv1(struct page *page) { clear_bit(PG_owner_priv_1, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageOwnerPriv1(struct page *page) { return test_and_clear_bit(PG_owner_priv_1, &page->flags); }
+
+
+
+
+
+static inline __attribute__((always_inline)) int PageWriteback(struct page *page) { return test_bit(PG_writeback, &page->flags); } static inline __attribute__((always_inline)) int TestSetPageWriteback(struct page *page) { return test_and_set_bit(PG_writeback, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageWriteback(struct page *page) { return test_and_clear_bit(PG_writeback, &page->flags); }
+static inline __attribute__((always_inline)) int PageBuddy(struct page *page) { return test_bit(PG_buddy, &page->flags); } static inline __attribute__((always_inline)) void __SetPageBuddy(struct page *page) { __set_bit(PG_buddy, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageBuddy(struct page *page) { __clear_bit(PG_buddy, &page->flags); }
+static inline __attribute__((always_inline)) int PageMappedToDisk(struct page *page) { return test_bit(PG_mappedtodisk, &page->flags); } static inline __attribute__((always_inline)) void SetPageMappedToDisk(struct page *page) { set_bit(PG_mappedtodisk, &page->flags); } static inline __attribute__((always_inline)) void ClearPageMappedToDisk(struct page *page) { clear_bit(PG_mappedtodisk, &page->flags); }
+
+
+static inline __attribute__((always_inline)) int PageReclaim(struct page *page) { return test_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void SetPageReclaim(struct page *page) { set_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReclaim(struct page *page) { clear_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageReclaim(struct page *page) { return test_and_clear_bit(PG_reclaim, &page->flags); }
+static inline __attribute__((always_inline)) int PageReadahead(struct page *page) { return test_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void SetPageReadahead(struct page *page) { set_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReadahead(struct page *page) { clear_bit(PG_reclaim, &page->flags); }
+# 249 "include/linux/page-flags.h"
+static inline __attribute__((always_inline)) int PageHighMem(struct page *page) { return 0; }
+
+
+
+
+
+static inline __attribute__((always_inline)) int PageSwapCache(struct page *page) { return 0; }
+ static inline __attribute__((always_inline)) void SetPageSwapCache(struct page *page) { } static inline __attribute__((always_inline)) void ClearPageSwapCache(struct page *page) { }
+
+
+static inline __attribute__((always_inline)) int PageUnevictable(struct page *page) { return test_bit(PG_unevictable, &page->flags); } static inline __attribute__((always_inline)) void SetPageUnevictable(struct page *page) { set_bit(PG_unevictable, &page->flags); } static inline __attribute__((always_inline)) void ClearPageUnevictable(struct page *page) { clear_bit(PG_unevictable, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageUnevictable(struct page *page) { __clear_bit(PG_unevictable, &page->flags); }
+ static inline __attribute__((always_inline)) int TestClearPageUnevictable(struct page *page) { return test_and_clear_bit(PG_unevictable, &page->flags); }
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int PageMlocked(struct page *page) { return 0; } static inline __attribute__((always_inline)) void SetPageMlocked(struct page *page) { }
+ static inline __attribute__((always_inline)) int TestClearPageMlocked(struct page *page) { return 0; } static inline __attribute__((always_inline)) int __TestClearPageMlocked(struct page *page) { return 0; }
+
+
+
+
+
+static inline __attribute__((always_inline)) int PageUncached(struct page *page) { return 0; }
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int PageHWPoison(struct page *page) { return 0; }
+
+
+
+static inline __attribute__((always_inline)) int PageUptodate(struct page *page)
+{
+ int ret = test_bit(PG_uptodate, &(page)->flags);
+# 299 "include/linux/page-flags.h"
+ if (ret)
+ __asm__ __volatile__("": : :"memory");
+
+ return ret;
+}
+
+static inline __attribute__((always_inline)) void __SetPageUptodate(struct page *page)
+{
+ __asm__ __volatile__("": : :"memory");
+ __set_bit(PG_uptodate, &(page)->flags);
+}
+
+static inline __attribute__((always_inline)) void SetPageUptodate(struct page *page)
+{
+# 325 "include/linux/page-flags.h"
+ __asm__ __volatile__("": : :"memory");
+ set_bit(PG_uptodate, &(page)->flags);
+
+}
+
+static inline __attribute__((always_inline)) void ClearPageUptodate(struct page *page) { clear_bit(PG_uptodate, &page->flags); }
+
+extern void cancel_dirty_page(struct page *page, unsigned int account_size);
+
+int test_clear_page_writeback(struct page *page);
+int test_set_page_writeback(struct page *page);
+
+static inline __attribute__((always_inline)) void set_page_writeback(struct page *page)
+{
+ test_set_page_writeback(page);
+}
+# 349 "include/linux/page-flags.h"
+static inline __attribute__((always_inline)) int PageHead(struct page *page) { return test_bit(PG_head, &page->flags); } static inline __attribute__((always_inline)) void __SetPageHead(struct page *page) { __set_bit(PG_head, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageHead(struct page *page) { __clear_bit(PG_head, &page->flags); }
+static inline __attribute__((always_inline)) int PageTail(struct page *page) { return test_bit(PG_tail, &page->flags); } static inline __attribute__((always_inline)) void __SetPageTail(struct page *page) { __set_bit(PG_tail, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageTail(struct page *page) { __clear_bit(PG_tail, &page->flags); }
+
+static inline __attribute__((always_inline)) int PageCompound(struct page *page)
+{
+ return page->flags & ((1L << PG_head) | (1L << PG_tail));
+
+}
+# 429 "include/linux/page-flags.h"
+static inline __attribute__((always_inline)) int page_has_private(struct page *page)
+{
+ return !!(page->flags & (1 << PG_private | 1 << PG_private_2));
+}
+# 236 "include/linux/mm.h" 2
+# 253 "include/linux/mm.h"
+static inline __attribute__((always_inline)) int put_page_testzero(struct page *page)
+{
+ do { } while (0);
+ return (atomic_sub_return(1, (&page->_count)) == 0);
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) int get_page_unless_zero(struct page *page)
+{
+ return ({ int c, old; c = (((&page->_count))->counter); while (c != (0) && (old = (((__typeof__(*((&((((&page->_count)))->counter)))))__cmpxchg_local_generic(((&((((&page->_count)))->counter))), (unsigned long)(((c))), (unsigned long)(((c + (1)))), sizeof(*((&((((&page->_count)))->counter)))))))) != c) c = old; c != (0); });
+}
+
+
+struct page *vmalloc_to_page(const void *addr);
+unsigned long vmalloc_to_pfn(const void *addr);
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int is_vmalloc_addr(const void *x)
+{
+
+
+
+
+
+ return 0;
+
+}
+
+
+
+static inline __attribute__((always_inline)) int is_vmalloc_or_module_addr(const void *x)
+{
+ return 0;
+}
+
+
+static inline __attribute__((always_inline)) struct page *compound_head(struct page *page)
+{
+ if (__builtin_expect(!!(PageTail(page)), 0))
+ return page->first_page;
+ return page;
+}
+
+static inline __attribute__((always_inline)) int page_count(struct page *page)
+{
+ return ((&compound_head(page)->_count)->counter);
+}
+
+static inline __attribute__((always_inline)) void get_page(struct page *page)
+{
+ page = compound_head(page);
+ do { } while (0);
+ atomic_inc(&page->_count);
+}
+
+static inline __attribute__((always_inline)) struct page *virt_to_head_page(const void *x)
+{
+ struct page *page = (mem_map + (((unsigned long)(x)-(0)) >> 12));
+ return compound_head(page);
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) void init_page_count(struct page *page)
+{
+ (((&page->_count)->counter) = (1));
+}
+
+void put_page(struct page *page);
+void put_pages_list(struct list_head *pages);
+
+void split_page(struct page *page, unsigned int order);
+
+
+
+
+
+
+typedef void compound_page_dtor(struct page *);
+
+static inline __attribute__((always_inline)) void set_compound_page_dtor(struct page *page,
+ compound_page_dtor *dtor)
+{
+ page[1].lru.next = (void *)dtor;
+}
+
+static inline __attribute__((always_inline)) compound_page_dtor *get_compound_page_dtor(struct page *page)
+{
+ return (compound_page_dtor *)page[1].lru.next;
+}
+
+static inline __attribute__((always_inline)) int compound_order(struct page *page)
+{
+ if (!PageHead(page))
+ return 0;
+ return (unsigned long)page[1].lru.prev;
+}
+
+static inline __attribute__((always_inline)) void set_compound_order(struct page *page, unsigned long order)
+{
+ page[1].lru.prev = (void *)order;
+}
+# 511 "include/linux/mm.h"
+static inline __attribute__((always_inline)) enum zone_type page_zonenum(struct page *page)
+{
+ return (page->flags >> (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0))) & ((1UL << 2) - 1);
+}
+# 524 "include/linux/mm.h"
+static inline __attribute__((always_inline)) int page_zone_id(struct page *page)
+{
+ return (page->flags >> ((((((sizeof(unsigned long)*8) - 0) - 0) < ((((sizeof(unsigned long)*8) - 0) - 0) - 2))? (((sizeof(unsigned long)*8) - 0) - 0) : ((((sizeof(unsigned long)*8) - 0) - 0) - 2)) * ((0 + 2) != 0))) & ((1UL << (0 + 2)) - 1);
+}
+
+static inline __attribute__((always_inline)) int zone_to_nid(struct zone *zone)
+{
+
+
+
+ return 0;
+
+}
+
+
+
+
+static inline __attribute__((always_inline)) int page_to_nid(struct page *page)
+{
+ return (page->flags >> ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0))) & ((1UL << 0) - 1);
+}
+
+
+static inline __attribute__((always_inline)) struct zone *page_zone(struct page *page)
+{
+ return &(&contig_page_data)->node_zones[page_zonenum(page)];
+}
+# 559 "include/linux/mm.h"
+static inline __attribute__((always_inline)) void set_page_zone(struct page *page, enum zone_type zone)
+{
+ page->flags &= ~(((1UL << 2) - 1) << (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0)));
+ page->flags |= (zone & ((1UL << 2) - 1)) << (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0));
+}
+
+static inline __attribute__((always_inline)) void set_page_node(struct page *page, unsigned long node)
+{
+ page->flags &= ~(((1UL << 0) - 1) << ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0)));
+ page->flags |= (node & ((1UL << 0) - 1)) << ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0));
+}
+
+static inline __attribute__((always_inline)) void set_page_section(struct page *page, unsigned long section)
+{
+ page->flags &= ~(((1UL << 0) - 1) << (((sizeof(unsigned long)*8) - 0) * (0 != 0)));
+ page->flags |= (section & ((1UL << 0) - 1)) << (((sizeof(unsigned long)*8) - 0) * (0 != 0));
+}
+
+static inline __attribute__((always_inline)) void set_page_links(struct page *page, enum zone_type zone,
+ unsigned long node, unsigned long pfn)
+{
+ set_page_zone(page, zone);
+ set_page_node(page, node);
+ set_page_section(page, ((pfn) >> 0));
+}
+
+
+
+
+# 1 "include/linux/vmstat.h" 1
+
+
+
+
+
+# 1 "include/linux/mm.h" 1
+# 7 "include/linux/vmstat.h" 2
+# 31 "include/linux/vmstat.h"
+enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
+ PGALLOC_DMA, PGALLOC_NORMAL , PGALLOC_MOVABLE,
+ PGFREE, PGACTIVATE, PGDEACTIVATE,
+ PGFAULT, PGMAJFAULT,
+ PGREFILL_DMA, PGREFILL_NORMAL , PGREFILL_MOVABLE,
+ PGSTEAL_DMA, PGSTEAL_NORMAL , PGSTEAL_MOVABLE,
+ PGSCAN_KSWAPD_DMA, PGSCAN_KSWAPD_NORMAL , PGSCAN_KSWAPD_MOVABLE,
+ PGSCAN_DIRECT_DMA, PGSCAN_DIRECT_NORMAL , PGSCAN_DIRECT_MOVABLE,
+
+
+
+ PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
+ PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+
+
+
+ UNEVICTABLE_PGCULLED,
+ UNEVICTABLE_PGSCANNED,
+ UNEVICTABLE_PGRESCUED,
+ UNEVICTABLE_PGMLOCKED,
+ UNEVICTABLE_PGMUNLOCKED,
+ UNEVICTABLE_PGCLEARED,
+ UNEVICTABLE_PGSTRANDED,
+ UNEVICTABLE_MLOCKFREED,
+ NR_VM_EVENT_ITEMS
+};
+
+extern int sysctl_stat_interval;
+# 71 "include/linux/vmstat.h"
+struct vm_event_state {
+ unsigned long event[NR_VM_EVENT_ITEMS];
+};
+
+extern __attribute__((section(".discard"), unused)) char __pcpu_scope_vm_event_states; extern __attribute__((section(".data" ""))) __typeof__(struct vm_event_state) per_cpu__vm_event_states;
+
+static inline __attribute__((always_inline)) void __count_vm_event(enum vm_event_item item)
+{
+ per_cpu__vm_event_states.event[item]++;
+}
+
+static inline __attribute__((always_inline)) void count_vm_event(enum vm_event_item item)
+{
+ (*({ extern int simple_identifier_vm_event_states(void); do { } while (0); &per_cpu__vm_event_states; })).event[item]++;
+ do { } while (0);
+}
+
+static inline __attribute__((always_inline)) void __count_vm_events(enum vm_event_item item, long delta)
+{
+ per_cpu__vm_event_states.event[item] += delta;
+}
+
+static inline __attribute__((always_inline)) void count_vm_events(enum vm_event_item item, long delta)
+{
+ (*({ extern int simple_identifier_vm_event_states(void); do { } while (0); &per_cpu__vm_event_states; })).event[item] += delta;
+ do { } while (0);
+}
+
+extern void all_vm_events(unsigned long *);
+
+
+
+static inline __attribute__((always_inline)) void vm_events_fold_cpu(int cpu)
+{
+}
+# 139 "include/linux/vmstat.h"
+extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+
+static inline __attribute__((always_inline)) void zone_page_state_add(long x, struct zone *zone,
+ enum zone_stat_item item)
+{
+ atomic_long_add(x, &zone->vm_stat[item]);
+ atomic_long_add(x, &vm_stat[item]);
+}
+
+static inline __attribute__((always_inline)) unsigned long global_page_state(enum zone_stat_item item)
+{
+ long x = atomic_long_read(&vm_stat[item]);
+
+
+
+
+ return x;
+}
+
+static inline __attribute__((always_inline)) unsigned long zone_page_state(struct zone *zone,
+ enum zone_stat_item item)
+{
+ long x = atomic_long_read(&zone->vm_stat[item]);
+
+
+
+
+ return x;
+}
+
+extern unsigned long global_reclaimable_pages(void);
+extern unsigned long zone_reclaimable_pages(struct zone *zone);
+# 209 "include/linux/vmstat.h"
+static inline __attribute__((always_inline)) void zap_zone_vm_stats(struct zone *zone)
+{
+ memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
+}
+
+extern void inc_zone_state(struct zone *, enum zone_stat_item);
+# 237 "include/linux/vmstat.h"
+static inline __attribute__((always_inline)) void __mod_zone_page_state(struct zone *zone,
+ enum zone_stat_item item, int delta)
+{
+ zone_page_state_add(delta, zone, item);
+}
+
+static inline __attribute__((always_inline)) void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
+{
+ atomic_long_inc(&zone->vm_stat[item]);
+ atomic_long_inc(&vm_stat[item]);
+}
+
+static inline __attribute__((always_inline)) void __inc_zone_page_state(struct page *page,
+ enum zone_stat_item item)
+{
+ __inc_zone_state(page_zone(page), item);
+}
+
+static inline __attribute__((always_inline)) void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
+{
+ atomic_long_dec(&zone->vm_stat[item]);
+ atomic_long_dec(&vm_stat[item]);
+}
+
+static inline __attribute__((always_inline)) void __dec_zone_page_state(struct page *page,
+ enum zone_stat_item item)
+{
+ __dec_zone_state(page_zone(page), item);
+}
+# 275 "include/linux/vmstat.h"
+static inline __attribute__((always_inline)) void refresh_cpu_vm_stats(int cpu) { }
+# 589 "include/linux/mm.h" 2
+
+static inline __attribute__((always_inline)) __attribute__((always_inline)) void *lowmem_page_address(struct page *page)
+{
+ return ((void *)((unsigned long)(((unsigned long)((page) - mem_map) + (0UL)) << 12) + (0)));
+}
+# 631 "include/linux/mm.h"
+extern struct address_space swapper_space;
+static inline __attribute__((always_inline)) struct address_space *page_mapping(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+
+ do { } while (0);
+
+
+
+
+
+ if (__builtin_expect(!!((unsigned long)mapping & 1), 0))
+ mapping = ((void *)0);
+ return mapping;
+}
+
+static inline __attribute__((always_inline)) int PageAnon(struct page *page)
+{
+ return ((unsigned long)page->mapping & 1) != 0;
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) unsigned long page_index(struct page *page)
+{
+ if (__builtin_expect(!!(PageSwapCache(page)), 0))
+ return ((page)->private);
+ return page->index;
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void reset_page_mapcount(struct page *page)
+{
+ (((&(page)->_mapcount)->counter) = (-1));
+}
+
+static inline __attribute__((always_inline)) int page_mapcount(struct page *page)
+{
+ return ((&(page)->_mapcount)->counter) + 1;
+}
+
+
+
+
+static inline __attribute__((always_inline)) int page_mapped(struct page *page)
+{
+ return ((&(page)->_mapcount)->counter) >= 0;
+}
+# 708 "include/linux/mm.h"
+extern void pagefault_out_of_memory(void);
+
+
+
+extern void show_free_areas(void);
+
+int shmem_lock(struct file *file, int lock, struct user_struct *user);
+struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
+int shmem_zero_setup(struct vm_area_struct *);
+
+
+extern unsigned long shmem_get_unmapped_area(struct file *file,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags);
+
+
+extern int can_do_mlock(void);
+extern int user_shm_lock(size_t, struct user_struct *);
+extern void user_shm_unlock(size_t, struct user_struct *);
+
+
+
+
+struct zap_details {
+ struct vm_area_struct *nonlinear_vma;
+ struct address_space *check_mapping;
+ unsigned long first_index;
+ unsigned long last_index;
+ spinlock_t *i_mmap_lock;
+ unsigned long truncate_count;
+};
+
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte);
+
+int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size);
+unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size, struct zap_details *);
+unsigned long unmap_vmas(struct mmu_gather **tlb,
+ struct vm_area_struct *start_vma, unsigned long start_addr,
+ unsigned long end_addr, unsigned long *nr_accounted,
+ struct zap_details *);
+# 764 "include/linux/mm.h"
+struct mm_walk {
+ int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
+ int (*pud_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
+ int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
+ int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
+ int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
+ struct mm_struct *mm;
+ void *private;
+};
+
+int walk_page_range(unsigned long addr, unsigned long end,
+ struct mm_walk *walk);
+void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
+ unsigned long end, unsigned long floor, unsigned long ceiling);
+int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
+ struct vm_area_struct *vma);
+void unmap_mapping_range(struct address_space *mapping,
+ loff_t const holebegin, loff_t const holelen, int even_cows);
+int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+ unsigned long *pfn);
+int follow_phys(struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags, unsigned long *prot, resource_size_t *phys);
+int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write);
+
+static inline __attribute__((always_inline)) void unmap_shared_mapping_range(struct address_space *mapping,
+ loff_t const holebegin, loff_t const holelen)
+{
+ unmap_mapping_range(mapping, holebegin, holelen, 0);
+}
+
+extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
+extern int vmtruncate(struct inode *inode, loff_t offset);
+extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
+
+int truncate_inode_page(struct address_space *mapping, struct page *page);
+int generic_error_remove_page(struct address_space *mapping, struct page *page);
+
+int invalidate_inode_page(struct page *page);
+
+
+
+
+
+static inline __attribute__((always_inline)) int handle_mm_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags)
+{
+
+ do { asm volatile( "1: .hword %0\n" " .section __bug_table,\"a\",@progbits\n" "2: .long 1b\n" " .long %1\n" " .short %2\n" " .short %3\n" " .org 2b + %4\n" " .previous" : : "i"(0xefcd), "i"("include/linux/mm.h"), "i"(813), "i"(0), "i"(sizeof(struct bug_entry))); for (;;); } while (0);
+ return 0x0002;
+}
+
+
+extern int make_pages_present(unsigned long addr, unsigned long end);
+extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
+
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int nr_pages, int write, int force,
+ struct page **pages, struct vm_area_struct **vmas);
+int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages);
+struct page *get_dump_page(unsigned long addr);
+
+extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
+extern void do_invalidatepage(struct page *page, unsigned long offset);
+
+int __set_page_dirty_nobuffers(struct page *page);
+int __set_page_dirty_no_writeback(struct page *page);
+int redirty_page_for_writepage(struct writeback_control *wbc,
+ struct page *page);
+void account_page_dirtied(struct page *page, struct address_space *mapping);
+int set_page_dirty(struct page *page);
+int set_page_dirty_lock(struct page *page);
+int clear_page_dirty_for_io(struct page *page);
+
+extern unsigned long move_page_tables(struct vm_area_struct *vma,
+ unsigned long old_addr, struct vm_area_struct *new_vma,
+ unsigned long new_addr, unsigned long len);
+extern unsigned long do_mremap(unsigned long addr,
+ unsigned long old_len, unsigned long new_len,
+ unsigned long flags, unsigned long new_addr);
+extern int mprotect_fixup(struct vm_area_struct *vma,
+ struct vm_area_struct **pprev, unsigned long start,
+ unsigned long end, unsigned long newflags);
+
+
+
+
+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages);
+# 871 "include/linux/mm.h"
+struct shrinker {
+ int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
+ int seeks;
+
+
+ struct list_head list;
+ long nr;
+};
+
+extern void register_shrinker(struct shrinker *);
+extern void unregister_shrinker(struct shrinker *);
+
+int vma_wants_writenotify(struct vm_area_struct *vma);
+
+extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
+
+
+static inline __attribute__((always_inline)) int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
+ unsigned long address)
+{
+ return 0;
+}
+# 904 "include/linux/mm.h"
+int __pmd_alloc(struct mm_struct *mm, pgd_t *pud, unsigned long address);
+
+
+int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
+int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
+# 950 "include/linux/mm.h"
+static inline __attribute__((always_inline)) void pgtable_page_ctor(struct page *page)
+{
+ do {} while (0);
+ __inc_zone_page_state(page, NR_PAGETABLE);
+}
+
+static inline __attribute__((always_inline)) void pgtable_page_dtor(struct page *page)
+{
+ do {} while (0);
+ __dec_zone_page_state(page, NR_PAGETABLE);
+}
+# 988 "include/linux/mm.h"
+extern void free_area_init(unsigned long * zones_size);
+extern void free_area_init_node(int nid, unsigned long * zones_size,
+ unsigned long zone_start_pfn, unsigned long *zholes_size);
+# 1039 "include/linux/mm.h"
+static inline __attribute__((always_inline)) int __early_pfn_to_nid(unsigned long pfn)
+{
+ return 0;
+}
+# 1052 "include/linux/mm.h"
+extern void set_dma_reserve(unsigned long new_dma_reserve);
+extern void memmap_init_zone(unsigned long, int, unsigned long,
+ unsigned long, enum memmap_context);
+extern void setup_per_zone_wmarks(void);
+extern void calculate_zone_inactive_ratio(struct zone *zone);
+extern void mem_init(void);
+extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) mmap_init(void);
+extern void show_mem(void);
+extern void si_meminfo(struct sysinfo * val);
+extern void si_meminfo_node(struct sysinfo *val, int nid);
+extern int after_bootmem;
+
+
+
+
+static inline __attribute__((always_inline)) void setup_per_cpu_pageset(void) {}
+
+
+extern void zone_pcp_update(struct zone *zone);
+
+
+extern atomic_long_t mmap_pages_allocated;
+
+
+void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
+void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
+void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *);
+struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
+ struct prio_tree_iter *iter);
+
+
+
+
+
+static inline __attribute__((always_inline)) void vma_nonlinear_insert(struct vm_area_struct *vma,
+ struct list_head *list)
+{
+ vma->shared.vm_set.parent = ((void *)0);
+ list_add_tail(&vma->shared.vm_set.list, list);
+}
+
+
+extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
+extern void vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, unsigned long pgoff, struct vm_area_struct *insert);
+extern struct vm_area_struct *vma_merge(struct mm_struct *,
+ struct vm_area_struct *prev, unsigned long addr, unsigned long end,
+ unsigned long vm_flags, struct anon_vma *, struct file *, unsigned long,
+ struct mempolicy *);
+extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
+extern int split_vma(struct mm_struct *,
+ struct vm_area_struct *, unsigned long addr, int new_below);
+extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
+extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
+ struct rb_node **, struct rb_node *);
+extern void unlink_file_vma(struct vm_area_struct *);
+extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
+ unsigned long addr, unsigned long len, unsigned long pgoff);
+extern void exit_mmap(struct mm_struct *);
+
+extern int mm_take_all_locks(struct mm_struct *mm);
+extern void mm_drop_all_locks(struct mm_struct *mm);
+
+
+
+extern void added_exe_file_vma(struct mm_struct *mm);
+extern void removed_exe_file_vma(struct mm_struct *mm);
+# 1127 "include/linux/mm.h"
+extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
+extern int install_special_mapping(struct mm_struct *mm,
+ unsigned long addr, unsigned long len,
+ unsigned long flags, struct page **pages);
+
+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+
+extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+ unsigned long flag, unsigned long pgoff);
+extern unsigned long mmap_region(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long flags,
+ unsigned int vm_flags, unsigned long pgoff);
+
+static inline __attribute__((always_inline)) unsigned long do_mmap(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+ unsigned long flag, unsigned long offset)
+{
+ unsigned long ret = -22;
+ if ((offset + (((len)+((typeof(len))((1UL << 12))-1))&~((typeof(len))((1UL << 12))-1))) < offset)
+ goto out;
+ if (!(offset & ~(~((1UL << 12)-1))))
+ ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> 12);
+out:
+ return ret;
+}
+
+extern int do_munmap(struct mm_struct *, unsigned long, size_t);
+
+extern unsigned long do_brk(unsigned long, unsigned long);
+
+
+extern unsigned long page_unuse(struct page *);
+extern void truncate_inode_pages(struct address_space *, loff_t);
+extern void truncate_inode_pages_range(struct address_space *,
+ loff_t lstart, loff_t lend);
+
+
+extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
+
+
+int write_one_page(struct page *page, int wait);
+void task_dirty_inc(struct task_struct *tsk);
+
+
+
+
+
+int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
+ unsigned long offset, unsigned long nr_to_read);
+
+void page_cache_sync_readahead(struct address_space *mapping,
+ struct file_ra_state *ra,
+ struct file *filp,
+ unsigned long offset,
+ unsigned long size);
+
+void page_cache_async_readahead(struct address_space *mapping,
+ struct file_ra_state *ra,
+ struct file *filp,
+ struct page *pg,
+ unsigned long offset,
+ unsigned long size);
+
+unsigned long max_sane_readahead(unsigned long nr);
+unsigned long ra_submit(struct file_ra_state *ra,
+ struct address_space *mapping,
+ struct file *filp);
+
+
+extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+
+
+
+extern int expand_stack_downwards(struct vm_area_struct *vma,
+ unsigned long address);
+
+
+extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
+extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
+ struct vm_area_struct **pprev);
+
+
+
+static inline __attribute__((always_inline)) struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+{
+ struct vm_area_struct * vma = find_vma(mm,start_addr);
+
+ if (vma && end_addr <= vma->vm_start)
+ vma = ((void *)0);
+ return vma;
+}
+
+static inline __attribute__((always_inline)) unsigned long vma_pages(struct vm_area_struct *vma)
+{
+ return (vma->vm_end - vma->vm_start) >> 12;
+}
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags);
+struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
+int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t);
+int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
+int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn);
+int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn);
+
+struct page *follow_page(struct vm_area_struct *, unsigned long address,
+ unsigned int foll_flags);
+
+
+
+
+
+
+typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data);
+extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
+ unsigned long size, pte_fn_t fn, void *data);
+
+
+void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
+# 1258 "include/linux/mm.h"
+extern int debug_pagealloc_enabled;
+
+extern void kernel_map_pages(struct page *page, int numpages, int enable);
+
+static inline __attribute__((always_inline)) void enable_debug_pagealloc(void)
+{
+ debug_pagealloc_enabled = 1;
+}
+# 1280 "include/linux/mm.h"
+extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk);
+
+
+
+
+int in_gate_area_no_task(unsigned long addr);
+
+
+
+int drop_caches_sysctl_handler(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
+unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
+ unsigned long lru_pages);
+void drop_pagecache(void);
+
+
+
+
+
+
+
+const char * arch_vma_name(struct vm_area_struct *vma);
+void print_vma_addr(char *prefix, unsigned long rip);
+
+struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
+pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
+pgd_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
+pmd_t *vmemmap_pmd_populate(pgd_t *pud, unsigned long addr, int node);
+pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
+void *vmemmap_alloc_block(unsigned long size, int node);
+void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
+int vmemmap_populate_basepages(struct page *start_page,
+ unsigned long pages, int node);
+int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
+void vmemmap_populate_print_last(void);
+
+extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
+ size_t size);
+extern void refund_locked_memory(struct mm_struct *mm, size_t size);
+
+extern void memory_failure(unsigned long pfn, int trapno);
+extern int __memory_failure(unsigned long pfn, int trapno, int ref);
+extern int sysctl_memory_failure_early_kill;
+extern int sysctl_memory_failure_recovery;
+extern atomic_long_t mce_bad_pages;
+# 8 "include/linux/pagemap.h" 2
+
+
+# 1 "include/linux/highmem.h" 1
+
+
+
+
+
+# 1 "include/linux/uaccess.h" 1
+
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h" 1
+# 15 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h"
+# 1 "include/linux/sched.h" 1
+# 46 "include/linux/sched.h"
+struct sched_param {
+ int sched_priority;
+};
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/param.h" 1
+# 51 "include/linux/sched.h" 2
+# 68 "include/linux/sched.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cputime.h" 1
+# 1 "include/asm-generic/cputime.h" 1
+
+
+
+
+
+
+typedef unsigned long cputime_t;
+# 25 "include/asm-generic/cputime.h"
+typedef u64 cputime64_t;
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cputime.h" 2
+# 69 "include/linux/sched.h" 2
+
+
+# 1 "include/linux/sem.h" 1
+
+
+
+# 1 "include/linux/ipc.h" 1
+# 9 "include/linux/ipc.h"
+struct ipc_perm
+{
+ __kernel_key_t key;
+ __kernel_uid_t uid;
+ __kernel_gid_t gid;
+ __kernel_uid_t cuid;
+ __kernel_gid_t cgid;
+ __kernel_mode_t mode;
+ unsigned short seq;
+};
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ipcbuf.h" 1
+# 1 "include/asm-generic/ipcbuf.h" 1
+# 19 "include/asm-generic/ipcbuf.h"
+struct ipc64_perm {
+ __kernel_key_t key;
+ __kernel_uid32_t uid;
+ __kernel_gid32_t gid;
+ __kernel_uid32_t cuid;
+ __kernel_gid32_t cgid;
+ __kernel_mode_t mode;
+
+ unsigned char __pad1[4 - sizeof(__kernel_mode_t)];
+ unsigned short seq;
+ unsigned short __pad2;
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/ipcbuf.h" 2
+# 22 "include/linux/ipc.h" 2
+# 57 "include/linux/ipc.h"
+struct ipc_kludge {
+ struct msgbuf *msgp;
+ long msgtyp;
+};
+# 86 "include/linux/ipc.h"
+struct kern_ipc_perm
+{
+ spinlock_t lock;
+ int deleted;
+ int id;
+ key_t key;
+ uid_t uid;
+ gid_t gid;
+ uid_t cuid;
+ gid_t cgid;
+ mode_t mode;
+ unsigned long seq;
+ void *security;
+};
+# 5 "include/linux/sem.h" 2
+# 23 "include/linux/sem.h"
+struct semid_ds {
+ struct ipc_perm sem_perm;
+ __kernel_time_t sem_otime;
+ __kernel_time_t sem_ctime;
+ struct sem *sem_base;
+ struct sem_queue *sem_pending;
+ struct sem_queue **sem_pending_last;
+ struct sem_undo *undo;
+ unsigned short sem_nsems;
+};
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/sembuf.h" 1
+# 1 "include/asm-generic/sembuf.h" 1
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/bitsperlong.h" 1
+# 5 "include/asm-generic/sembuf.h" 2
+# 23 "include/asm-generic/sembuf.h"
+struct semid64_ds {
+ struct ipc64_perm sem_perm;
+ __kernel_time_t sem_otime;
+
+ unsigned long __unused1;
+
+ __kernel_time_t sem_ctime;
+
+ unsigned long __unused2;
+
+ unsigned long sem_nsems;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/sembuf.h" 2
+# 36 "include/linux/sem.h" 2
+
+
+struct sembuf {
+ unsigned short sem_num;
+ short sem_op;
+ short sem_flg;
+};
+
+
+union semun {
+ int val;
+ struct semid_ds *buf;
+ unsigned short *array;
+ struct seminfo *__buf;
+ void *__pad;
+};
+
+struct seminfo {
+ int semmap;
+ int semmni;
+ int semmns;
+ int semmnu;
+ int semmsl;
+ int semopm;
+ int semume;
+ int semusz;
+ int semvmx;
+ int semaem;
+};
+# 83 "include/linux/sem.h"
+struct task_struct;
+
+
+struct sem {
+ int semval;
+ int sempid;
+};
+
+
+struct sem_array {
+ struct kern_ipc_perm sem_perm;
+ time_t sem_otime;
+ time_t sem_ctime;
+ struct sem *sem_base;
+ struct list_head sem_pending;
+ struct list_head list_id;
+ unsigned long sem_nsems;
+};
+
+
+struct sem_queue {
+ struct list_head list;
+ struct task_struct *sleeper;
+ struct sem_undo *undo;
+ int pid;
+ int status;
+ struct sembuf *sops;
+ int nsops;
+ int alter;
+};
+
+
+
+
+struct sem_undo {
+ struct list_head list_proc;
+
+ struct rcu_head rcu;
+ struct sem_undo_list *ulp;
+ struct list_head list_id;
+ int semid;
+ short * semadj;
+};
+
+
+
+
+struct sem_undo_list {
+ atomic_t refcnt;
+ spinlock_t lock;
+ struct list_head list_proc;
+};
+
+struct sysv_sem {
+ struct sem_undo_list *undo_list;
+};
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
+{
+ return 0;
+}
+
+static inline __attribute__((always_inline)) void exit_sem(struct task_struct *tsk)
+{
+ return;
+}
+# 72 "include/linux/sched.h" 2
+# 1 "include/linux/signal.h" 1
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/signal.h" 1
+
+
+
+
+# 1 "include/asm-generic/signal.h" 1
+# 93 "include/asm-generic/signal.h"
+typedef struct {
+ unsigned long sig[(64 / 32)];
+} sigset_t;
+
+
+typedef unsigned long old_sigset_t;
+
+# 1 "include/asm-generic/signal-defs.h" 1
+# 17 "include/asm-generic/signal-defs.h"
+typedef void __signalfn_t(int);
+typedef __signalfn_t *__sighandler_t;
+
+typedef void __restorefn_t(void);
+typedef __restorefn_t *__sigrestore_t;
+# 101 "include/asm-generic/signal.h" 2
+
+struct sigaction {
+ __sighandler_t sa_handler;
+ unsigned long sa_flags;
+
+ __sigrestore_t sa_restorer;
+
+ sigset_t sa_mask;
+};
+
+struct k_sigaction {
+ struct sigaction sa;
+};
+
+typedef struct sigaltstack {
+ void *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/sigcontext.h" 1
+# 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/sigcontext.h"
+struct sigcontext {
+ unsigned long sc_r0;
+ unsigned long sc_r1;
+ unsigned long sc_r2;
+ unsigned long sc_r3;
+ unsigned long sc_r4;
+ unsigned long sc_r5;
+ unsigned long sc_r6;
+ unsigned long sc_r7;
+ unsigned long sc_p0;
+ unsigned long sc_p1;
+ unsigned long sc_p2;
+ unsigned long sc_p3;
+ unsigned long sc_p4;
+ unsigned long sc_p5;
+ unsigned long sc_usp;
+ unsigned long sc_a0w;
+ unsigned long sc_a1w;
+ unsigned long sc_a0x;
+ unsigned long sc_a1x;
+ unsigned long sc_astat;
+ unsigned long sc_rets;
+ unsigned long sc_pc;
+ unsigned long sc_retx;
+ unsigned long sc_fp;
+ unsigned long sc_i0;
+ unsigned long sc_i1;
+ unsigned long sc_i2;
+ unsigned long sc_i3;
+ unsigned long sc_m0;
+ unsigned long sc_m1;
+ unsigned long sc_m2;
+ unsigned long sc_m3;
+ unsigned long sc_l0;
+ unsigned long sc_l1;
+ unsigned long sc_l2;
+ unsigned long sc_l3;
+ unsigned long sc_b0;
+ unsigned long sc_b1;
+ unsigned long sc_b2;
+ unsigned long sc_b3;
+ unsigned long sc_lc0;
+ unsigned long sc_lc1;
+ unsigned long sc_lt0;
+ unsigned long sc_lt1;
+ unsigned long sc_lb0;
+ unsigned long sc_lb1;
+ unsigned long sc_seqstat;
+};
+# 124 "include/asm-generic/signal.h" 2
+# 6 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/signal.h" 2
+# 5 "include/linux/signal.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/siginfo.h" 1
+# 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/siginfo.h"
+# 1 "include/asm-generic/siginfo.h" 1
+
+
+
+
+
+
+typedef union sigval {
+ int sival_int;
+ void *sival_ptr;
+} sigval_t;
+# 40 "include/asm-generic/siginfo.h"
+typedef struct siginfo {
+ int si_signo;
+ int si_errno;
+ int si_code;
+
+ union {
+ int _pad[((128 - (3 * sizeof(int))) / sizeof(int))];
+
+
+ struct {
+ __kernel_pid_t _pid;
+ __kernel_uid32_t _uid;
+ } _kill;
+
+
+ struct {
+ __kernel_timer_t _tid;
+ int _overrun;
+ char _pad[sizeof( __kernel_uid32_t) - sizeof(int)];
+ sigval_t _sigval;
+ int _sys_private;
+ } _timer;
+
+
+ struct {
+ __kernel_pid_t _pid;
+ __kernel_uid32_t _uid;
+ sigval_t _sigval;
+ } _rt;
+
+
+ struct {
+ __kernel_pid_t _pid;
+ __kernel_uid32_t _uid;
+ int _status;
+ __kernel_clock_t _utime;
+ __kernel_clock_t _stime;
+ } _sigchld;
+
+
+ struct {
+ void *_addr;
+
+
+
+ short _addr_lsb;
+ } _sigfault;
+
+
+ struct {
+ long _band;
+ int _fd;
+ } _sigpoll;
+ } _sifields;
+} siginfo_t;
+# 259 "include/asm-generic/siginfo.h"
+typedef struct sigevent {
+ sigval_t sigev_value;
+ int sigev_signo;
+ int sigev_notify;
+ union {
+ int _pad[((64 - (sizeof(int) * 2 + sizeof(sigval_t))) / sizeof(int))];
+ int _tid;
+
+ struct {
+ void (*_function)(sigval_t);
+ void *_attribute;
+ } _sigev_thread;
+ } _sigev_un;
+} sigevent_t;
+
+
+
+
+
+
+
+struct siginfo;
+void do_schedule_next_timer(struct siginfo *info);
+
+
+
+
+
+static inline __attribute__((always_inline)) void copy_siginfo(struct siginfo *to, struct siginfo *from)
+{
+ if (from->si_code < 0)
+ memcpy(to, from, sizeof(*to));
+ else
+
+ memcpy(to, from, (3 * sizeof(int)) + sizeof(from->_sifields._sigchld));
+}
+
+
+
+extern int copy_siginfo_to_user(struct siginfo *to, struct siginfo *from);
+# 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/siginfo.h" 2
+# 6 "include/linux/signal.h" 2
+# 14 "include/linux/signal.h"
+struct sigqueue {
+ struct list_head list;
+ int flags;
+ siginfo_t info;
+ struct user_struct *user;
+};
+
+
+
+
+struct sigpending {
+ struct list_head list;
+ sigset_t signal;
+};
+# 38 "include/linux/signal.h"
+static inline __attribute__((always_inline)) void sigaddset(sigset_t *set, int _sig)
+{
+ unsigned long sig = _sig - 1;
+ if ((64 / 32) == 1)
+ set->sig[0] |= 1UL << sig;
+ else
+ set->sig[sig / 32] |= 1UL << (sig % 32);
+}
+
+static inline __attribute__((always_inline)) void sigdelset(sigset_t *set, int _sig)
+{
+ unsigned long sig = _sig - 1;
+ if ((64 / 32) == 1)
+ set->sig[0] &= ~(1UL << sig);
+ else
+ set->sig[sig / 32] &= ~(1UL << (sig % 32));
+}
+
+static inline __attribute__((always_inline)) int sigismember(sigset_t *set, int _sig)
+{
+ unsigned long sig = _sig - 1;
+ if ((64 / 32) == 1)
+ return 1 & (set->sig[0] >> sig);
+ else
+ return 1 & (set->sig[sig / 32] >> (sig % 32));
+}
+
+static inline __attribute__((always_inline)) int sigfindinword(unsigned long word)
+{
+ return __ffs(~(~word));
+}
+
+
+
+static inline __attribute__((always_inline)) int sigisemptyset(sigset_t *set)
+{
+ extern void _NSIG_WORDS_is_unsupported_size(void);
+ switch ((64 / 32)) {
+ case 4:
+ return (set->sig[3] | set->sig[2] |
+ set->sig[1] | set->sig[0]) == 0;
+ case 2:
+ return (set->sig[1] | set->sig[0]) == 0;
+ case 1:
+ return set->sig[0] == 0;
+ default:
+ _NSIG_WORDS_is_unsupported_size();
+ return 0;
+ }
+}
+# 119 "include/linux/signal.h"
+static inline __attribute__((always_inline)) void sigorsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) | (b3)); r->sig[2] = ((a2) | (b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) | (b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) | (b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } }
+
+
+static inline __attribute__((always_inline)) void sigandsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & (b3)); r->sig[2] = ((a2) & (b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & (b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & (b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } }
+
+
+static inline __attribute__((always_inline)) void signandsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & ~(b3)); r->sig[2] = ((a2) & ~(b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & ~(b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & ~(b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } }
+# 149 "include/linux/signal.h"
+static inline __attribute__((always_inline)) void signotset(sigset_t *set) { extern void _NSIG_WORDS_is_unsupported_size(void); switch ((64 / 32)) { case 4: set->sig[3] = (~(set->sig[3])); set->sig[2] = (~(set->sig[2])); case 2: set->sig[1] = (~(set->sig[1])); case 1: set->sig[0] = (~(set->sig[0])); break; default: _NSIG_WORDS_is_unsupported_size(); } }
+
+
+
+
+static inline __attribute__((always_inline)) void sigemptyset(sigset_t *set)
+{
+ switch ((64 / 32)) {
+ default:
+ memset(set, 0, sizeof(sigset_t));
+ break;
+ case 2: set->sig[1] = 0;
+ case 1: set->sig[0] = 0;
+ break;
+ }
+}
+
+static inline __attribute__((always_inline)) void sigfillset(sigset_t *set)
+{
+ switch ((64 / 32)) {
+ default:
+ memset(set, -1, sizeof(sigset_t));
+ break;
+ case 2: set->sig[1] = -1;
+ case 1: set->sig[0] = -1;
+ break;
+ }
+}
+
+
+
+static inline __attribute__((always_inline)) void sigaddsetmask(sigset_t *set, unsigned long mask)
+{
+ set->sig[0] |= mask;
+}
+
+static inline __attribute__((always_inline)) void sigdelsetmask(sigset_t *set, unsigned long mask)
+{
+ set->sig[0] &= ~mask;
+}
+
+static inline __attribute__((always_inline)) int sigtestsetmask(sigset_t *set, unsigned long mask)
+{
+ return (set->sig[0] & mask) != 0;
+}
+
+static inline __attribute__((always_inline)) void siginitset(sigset_t *set, unsigned long mask)
+{
+ set->sig[0] = mask;
+ switch ((64 / 32)) {
+ default:
+ memset(&set->sig[1], 0, sizeof(long)*((64 / 32)-1));
+ break;
+ case 2: set->sig[1] = 0;
+ case 1: ;
+ }
+}
+
+static inline __attribute__((always_inline)) void siginitsetinv(sigset_t *set, unsigned long mask)
+{
+ set->sig[0] = ~mask;
+ switch ((64 / 32)) {
+ default:
+ memset(&set->sig[1], -1, sizeof(long)*((64 / 32)-1));
+ break;
+ case 2: set->sig[1] = -1;
+ case 1: ;
+ }
+}
+
+
+
+static inline __attribute__((always_inline)) void init_sigpending(struct sigpending *sig)
+{
+ sigemptyset(&sig->signal);
+ INIT_LIST_HEAD(&sig->list);
+}
+
+extern void flush_sigqueue(struct sigpending *queue);
+
+
+static inline __attribute__((always_inline)) int valid_signal(unsigned long sig)
+{
+ return sig <= 64 ? 1 : 0;
+}
+
+extern int next_signal(struct sigpending *pending, sigset_t *mask);
+extern int do_send_sig_info(int sig, struct siginfo *info,
+ struct task_struct *p, bool group);
+extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
+extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *);
+extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
+ siginfo_t *info);
+extern long do_sigpending(void *, unsigned long);
+extern int sigprocmask(int, sigset_t *, sigset_t *);
+extern int show_unhandled_signals;
+
+struct pt_regs;
+extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie);
+extern void exit_signals(struct task_struct *tsk);
+
+extern struct kmem_cache *sighand_cachep;
+
+int unhandled_signal(struct task_struct *tsk, int sig);
+# 377 "include/linux/signal.h"
+void signals_init(void);
+# 73 "include/linux/sched.h" 2
+
+
+
+
+
+
+# 1 "include/linux/proportions.h" 1
+# 12 "include/linux/proportions.h"
+# 1 "include/linux/percpu_counter.h" 1
+# 82 "include/linux/percpu_counter.h"
+struct percpu_counter {
+ s64 count;
+};
+
+static inline __attribute__((always_inline)) int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
+{
+ fbc->count = amount;
+ return 0;
+}
+
+static inline __attribute__((always_inline)) void percpu_counter_destroy(struct percpu_counter *fbc)
+{
+}
+
+static inline __attribute__((always_inline)) void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
+{
+ fbc->count = amount;
+}
+
+
+
+
+static inline __attribute__((always_inline)) void
+percpu_counter_add(struct percpu_counter *fbc, s64 amount)
+{
+ do { } while (0);
+ fbc->count += amount;
+ do { } while (0);
+}
+
+static inline __attribute__((always_inline)) s64 percpu_counter_read(struct percpu_counter *fbc)
+{
+ return fbc->count;
+}
+
+static inline __attribute__((always_inline)) s64 percpu_counter_read_positive(struct percpu_counter *fbc)
+{
+ return fbc->count;
+}
+
+static inline __attribute__((always_inline)) s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
+{
+ return percpu_counter_read_positive(fbc);
+}
+
+static inline __attribute__((always_inline)) s64 percpu_counter_sum(struct percpu_counter *fbc)
+{
+ return percpu_counter_read(fbc);
+}
+
+
+
+static inline __attribute__((always_inline)) void percpu_counter_inc(struct percpu_counter *fbc)
+{
+ percpu_counter_add(fbc, 1);
+}
+
+static inline __attribute__((always_inline)) void percpu_counter_dec(struct percpu_counter *fbc)
+{
+ percpu_counter_add(fbc, -1);
+}
+
+static inline __attribute__((always_inline)) void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add(fbc, -amount);
+}
+# 13 "include/linux/proportions.h" 2
+
+
+
+struct prop_global {
+
+
+
+
+
+ int shift;
+
+
+
+
+
+
+ struct percpu_counter events;
+};
+
+
+
+
+
+
+struct prop_descriptor {
+ int index;
+ struct prop_global pg[2];
+ struct mutex mutex;
+};
+
+int prop_descriptor_init(struct prop_descriptor *pd, int shift);
+void prop_change_shift(struct prop_descriptor *pd, int new_shift);
+
+
+
+
+
+struct prop_local_percpu {
+
+
+
+ struct percpu_counter events;
+
+
+
+
+ int shift;
+ unsigned long period;
+ spinlock_t lock;
+};
+
+int prop_local_init_percpu(struct prop_local_percpu *pl);
+void prop_local_destroy_percpu(struct prop_local_percpu *pl);
+void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
+void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
+ long *numerator, long *denominator);
+
+static inline __attribute__((always_inline))
+void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
+{
+ unsigned long flags;
+
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { (flags) = __raw_local_irq_save(); } while (0); do { } while (0); } while (0);
+ __prop_inc_percpu(pd, pl);
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if ((((flags) & ~0x3f) == 0)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0);
+}
+# 89 "include/linux/proportions.h"
+void __prop_inc_percpu_max(struct prop_descriptor *pd,
+ struct prop_local_percpu *pl, long frac);
+
+
+
+
+
+
+struct prop_local_single {
+
+
+
+ unsigned long events;
+
+
+
+
+
+ unsigned long period;
+ int shift;
+ spinlock_t lock;
+};
+
+
+
+
+
+int prop_local_init_single(struct prop_local_single *pl);
+void prop_local_destroy_single(struct prop_local_single *pl);
+void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
+void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
+ long *numerator, long *denominator);
+
+static inline __attribute__((always_inline))
+void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
+{
+ unsigned long flags;
+
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); do { (flags) = __raw_local_irq_save(); } while (0); do { } while (0); } while (0);
+ __prop_inc_single(pd, pl);
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if ((((flags) & ~0x3f) == 0)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0);
+}
+# 80 "include/linux/sched.h" 2
+# 1 "include/linux/seccomp.h" 1
+# 26 "include/linux/seccomp.h"
+typedef struct { } seccomp_t;
+
+
+
+static inline __attribute__((always_inline)) long prctl_get_seccomp(void)
+{
+ return -22;
+}
+
+static inline __attribute__((always_inline)) long prctl_set_seccomp(unsigned long arg2)
+{
+ return -22;
+}
+# 81 "include/linux/sched.h" 2
+
+
+# 1 "include/linux/rtmutex.h" 1
+# 16 "include/linux/rtmutex.h"
+# 1 "include/linux/plist.h" 1
+# 80 "include/linux/plist.h"
+struct plist_head {
+ struct list_head prio_list;
+ struct list_head node_list;
+
+
+
+};
+
+struct plist_node {
+ int prio;
+ struct plist_head plist;
+};
+# 130 "include/linux/plist.h"
+static inline __attribute__((always_inline)) void
+plist_head_init(struct plist_head *head, spinlock_t *lock)
+{
+ INIT_LIST_HEAD(&head->prio_list);
+ INIT_LIST_HEAD(&head->node_list);
+
+
+
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void plist_node_init(struct plist_node *node, int prio)
+{
+ node->prio = prio;
+ plist_head_init(&node->plist, ((void *)0));
+}
+
+extern void plist_add(struct plist_node *node, struct plist_head *head);
+extern void plist_del(struct plist_node *node, struct plist_head *head);
+# 198 "include/linux/plist.h"
+static inline __attribute__((always_inline)) int plist_head_empty(const struct plist_head *head)
+{
+ return list_empty(&head->node_list);
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) int plist_node_empty(const struct plist_node *node)
+{
+ return plist_head_empty(&node->plist);
+}
+# 237 "include/linux/plist.h"
+static inline __attribute__((always_inline)) struct plist_node* plist_first(const struct plist_head *head)
+{
+ return ({ const typeof( ((struct plist_node *)0)->plist.node_list ) *__mptr = (head->node_list.next); (struct plist_node *)( (char *)__mptr - __builtin_offsetof(struct plist_node,plist.node_list) );});
+
+}
+# 17 "include/linux/rtmutex.h" 2
+# 26 "include/linux/rtmutex.h"
+struct rt_mutex {
+ spinlock_t wait_lock;
+ struct plist_head wait_list;
+ struct task_struct *owner;
+
+
+
+
+
+
+};
+
+struct rt_mutex_waiter;
+struct hrtimer_sleeper;
+
+
+
+
+
+
+ static inline __attribute__((always_inline)) int rt_mutex_debug_check_no_locks_freed(const void *from,
+ unsigned long len)
+ {
+ return 0;
+ }
+# 80 "include/linux/rtmutex.h"
+static inline __attribute__((always_inline)) int rt_mutex_is_locked(struct rt_mutex *lock)
+{
+ return lock->owner != ((void *)0);
+}
+
+extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
+extern void rt_mutex_destroy(struct rt_mutex *lock);
+
+extern void rt_mutex_lock(struct rt_mutex *lock);
+extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
+ int detect_deadlock);
+extern int rt_mutex_timed_lock(struct rt_mutex *lock,
+ struct hrtimer_sleeper *timeout,
+ int detect_deadlock);
+
+extern int rt_mutex_trylock(struct rt_mutex *lock);
+
+extern void rt_mutex_unlock(struct rt_mutex *lock);
+# 84 "include/linux/sched.h" 2
+
+
+
+# 1 "include/linux/resource.h" 1
+
+
+
+
+
+struct task_struct;
+# 24 "include/linux/resource.h"
+struct rusage {
+ struct timeval ru_utime;
+ struct timeval ru_stime;
+ long ru_maxrss;
+ long ru_ixrss;
+ long ru_idrss;
+ long ru_isrss;
+ long ru_minflt;
+ long ru_majflt;
+ long ru_nswap;
+ long ru_inblock;
+ long ru_oublock;
+ long ru_msgsnd;
+ long ru_msgrcv;
+ long ru_nsignals;
+ long ru_nvcsw;
+ long ru_nivcsw;
+};
+
+struct rlimit {
+ unsigned long rlim_cur;
+ unsigned long rlim_max;
+};
+# 71 "include/linux/resource.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/resource.h" 1
+# 1 "include/asm-generic/resource.h" 1
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/resource.h" 2
+# 72 "include/linux/resource.h" 2
+
+int getrusage(struct task_struct *p, int who, struct rusage *ru);
+# 88 "include/linux/sched.h" 2
+
+# 1 "include/linux/hrtimer.h" 1
+# 27 "include/linux/hrtimer.h"
+struct hrtimer_clock_base;
+struct hrtimer_cpu_base;
+
+
+
+
+enum hrtimer_mode {
+ HRTIMER_MODE_ABS = 0x0,
+ HRTIMER_MODE_REL = 0x1,
+ HRTIMER_MODE_PINNED = 0x02,
+ HRTIMER_MODE_ABS_PINNED = 0x02,
+ HRTIMER_MODE_REL_PINNED = 0x03,
+};
+
+
+
+
+enum hrtimer_restart {
+ HRTIMER_NORESTART,
+ HRTIMER_RESTART,
+};
+# 103 "include/linux/hrtimer.h"
+struct hrtimer {
+ struct rb_node node;
+ ktime_t _expires;
+ ktime_t _softexpires;
+ enum hrtimer_restart (*function)(struct hrtimer *);
+ struct hrtimer_clock_base *base;
+ unsigned long state;
+
+ int start_pid;
+ void *start_site;
+ char start_comm[16];
+
+};
+# 124 "include/linux/hrtimer.h"
+struct hrtimer_sleeper {
+ struct hrtimer timer;
+ struct task_struct *task;
+};
+# 141 "include/linux/hrtimer.h"
+struct hrtimer_clock_base {
+ struct hrtimer_cpu_base *cpu_base;
+ clockid_t index;
+ struct rb_root active;
+ struct rb_node *first;
+ ktime_t resolution;
+ ktime_t (*get_time)(void);
+ ktime_t softirq_time;
+
+ ktime_t offset;
+
+};
+# 170 "include/linux/hrtimer.h"
+struct hrtimer_cpu_base {
+ spinlock_t lock;
+ struct hrtimer_clock_base clock_base[2];
+
+ ktime_t expires_next;
+ int hres_active;
+ unsigned long nr_events;
+
+};
+
+static inline __attribute__((always_inline)) void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
+{
+ timer->_expires = time;
+ timer->_softexpires = time;
+}
+
+static inline __attribute__((always_inline)) void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta)
+{
+ timer->_softexpires = time;
+ timer->_expires = ktime_add_safe(time, delta);
+}
+
+static inline __attribute__((always_inline)) void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta)
+{
+ timer->_softexpires = time;
+ timer->_expires = ktime_add_safe(time, ns_to_ktime(delta));
+}
+
+static inline __attribute__((always_inline)) void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
+{
+ timer->_expires.tv64 = tv64;
+ timer->_softexpires.tv64 = tv64;
+}
+
+static inline __attribute__((always_inline)) void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
+{
+ timer->_expires = ktime_add_safe(timer->_expires, time);
+ timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
+}
+
+static inline __attribute__((always_inline)) void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns)
+{
+ timer->_expires = ktime_add_ns(timer->_expires, ns);
+ timer->_softexpires = ktime_add_ns(timer->_softexpires, ns);
+}
+
+static inline __attribute__((always_inline)) ktime_t hrtimer_get_expires(const struct hrtimer *timer)
+{
+ return timer->_expires;
+}
+
+static inline __attribute__((always_inline)) ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
+{
+ return timer->_softexpires;
+}
+
+static inline __attribute__((always_inline)) s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
+{
+ return timer->_expires.tv64;
+}
+static inline __attribute__((always_inline)) s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
+{
+ return timer->_softexpires.tv64;
+}
+
+static inline __attribute__((always_inline)) s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
+{
+ return ktime_to_ns(timer->_expires);
+}
+
+static inline __attribute__((always_inline)) ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
+{
+ return ktime_sub(timer->_expires, timer->base->get_time());
+}
+
+
+struct clock_event_device;
+
+extern void clock_was_set(void);
+extern void hres_timers_resume(void);
+extern void hrtimer_interrupt(struct clock_event_device *dev);
+
+
+
+
+static inline __attribute__((always_inline)) ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
+{
+ return timer->base->get_time();
+}
+
+static inline __attribute__((always_inline)) int hrtimer_is_hres_active(struct hrtimer *timer)
+{
+ return timer->base->cpu_base->hres_active;
+}
+
+extern void hrtimer_peek_ahead_timers(void);
+# 308 "include/linux/hrtimer.h"
+extern ktime_t ktime_get(void);
+extern ktime_t ktime_get_real(void);
+
+
+extern __attribute__((section(".discard"), unused)) char __pcpu_scope_tick_cpu_device; extern __attribute__((section(".data" ""))) __typeof__(struct tick_device) per_cpu__tick_cpu_device;
+
+
+
+
+
+extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
+ enum hrtimer_mode mode);
+
+
+extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock,
+ enum hrtimer_mode mode);
+
+extern void destroy_hrtimer_on_stack(struct hrtimer *timer);
+# 337 "include/linux/hrtimer.h"
+extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
+ const enum hrtimer_mode mode);
+extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ unsigned long range_ns, const enum hrtimer_mode mode);
+extern int
+__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ unsigned long delta_ns,
+ const enum hrtimer_mode mode, int wakeup);
+
+extern int hrtimer_cancel(struct hrtimer *timer);
+extern int hrtimer_try_to_cancel(struct hrtimer *timer);
+
+static inline __attribute__((always_inline)) int hrtimer_start_expires(struct hrtimer *timer,
+ enum hrtimer_mode mode)
+{
+ unsigned long delta;
+ ktime_t soft, hard;
+ soft = hrtimer_get_softexpires(timer);
+ hard = hrtimer_get_expires(timer);
+ delta = ktime_to_ns(ktime_sub(hard, soft));
+ return hrtimer_start_range_ns(timer, soft, delta, mode);
+}
+
+static inline __attribute__((always_inline)) int hrtimer_restart(struct hrtimer *timer)
+{
+ return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
+}
+
+
+extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
+extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
+
+extern ktime_t hrtimer_get_next_event(void);
+
+
+
+
+
+static inline __attribute__((always_inline)) int hrtimer_active(const struct hrtimer *timer)
+{
+ return timer->state != 0x00;
+}
+
+
+
+
+static inline __attribute__((always_inline)) int hrtimer_is_queued(struct hrtimer *timer)
+{
+ return timer->state & 0x01;
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) int hrtimer_callback_running(struct hrtimer *timer)
+{
+ return timer->state & 0x02;
+}
+
+
+extern u64
+hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
+
+
+static inline __attribute__((always_inline)) u64 hrtimer_forward_now(struct hrtimer *timer,
+ ktime_t interval)
+{
+ return hrtimer_forward(timer, timer->base->get_time(), interval);
+}
+
+
+extern long hrtimer_nanosleep(struct timespec *rqtp,
+ struct timespec *rmtp,
+ const enum hrtimer_mode mode,
+ const clockid_t clockid);
+extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
+
+extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
+ struct task_struct *tsk);
+
+extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
+ const enum hrtimer_mode mode);
+extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
+
+
+extern void hrtimer_run_queues(void);
+extern void hrtimer_run_pending(void);
+
+
+extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) hrtimers_init(void);
+
+
+extern u64 ktime_divns(const ktime_t kt, s64 div);
+
+
+
+
+
+extern void sysrq_timer_list_show(void);
+
+
+
+
+
+
+extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
+ void *timerf, char *comm,
+ unsigned int timer_flag);
+
+static inline __attribute__((always_inline)) void timer_stats_account_hrtimer(struct hrtimer *timer)
+{
+ if (__builtin_expect(!!(!timer->start_site), 1))
+ return;
+ timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
+ timer->function, timer->start_comm, 0);
+}
+
+extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer,
+ void *addr);
+
+static inline __attribute__((always_inline)) void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
+{
+ if (__builtin_expect(!!(!timer_stats_active), 1))
+ return;
+ __timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0));
+}
+
+static inline __attribute__((always_inline)) void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
+{
+ timer->start_site = ((void *)0);
+}
+# 90 "include/linux/sched.h" 2
+# 1 "include/linux/task_io_accounting.h" 1
+# 11 "include/linux/task_io_accounting.h"
+struct task_io_accounting {
+# 45 "include/linux/task_io_accounting.h"
+};
+# 91 "include/linux/sched.h" 2
+
+# 1 "include/linux/latencytop.h" 1
+# 41 "include/linux/latencytop.h"
+static inline __attribute__((always_inline)) void
+account_scheduler_latency(struct task_struct *task, int usecs, int inter)
+{
+}
+
+static inline __attribute__((always_inline)) void clear_all_latency_tracing(struct task_struct *p)
+{
+}
+# 93 "include/linux/sched.h" 2
+# 1 "include/linux/cred.h" 1
+# 17 "include/linux/cred.h"
+# 1 "include/linux/key.h" 1
+# 22 "include/linux/key.h"
+# 1 "include/linux/sysctl.h" 1
+# 32 "include/linux/sysctl.h"
+struct completion;
+
+
+
+
+
+
+struct __sysctl_args {
+ int *name;
+ int nlen;
+ void *oldval;
+ size_t *oldlenp;
+ void *newval;
+ size_t newlen;
+ unsigned long __unused[4];
+};
+# 59 "include/linux/sysctl.h"
+enum
+{
+ CTL_KERN=1,
+ CTL_VM=2,
+ CTL_NET=3,
+ CTL_PROC=4,
+ CTL_FS=5,
+ CTL_DEBUG=6,
+ CTL_DEV=7,
+ CTL_BUS=8,
+ CTL_ABI=9,
+ CTL_CPU=10,
+ CTL_ARLAN=254,
+ CTL_S390DBF=5677,
+ CTL_SUNRPC=7249,
+ CTL_PM=9899,
+ CTL_FRV=9898,
+};
+
+
+enum
+{
+ CTL_BUS_ISA=1
+};
+
+
+enum
+{
+ INOTIFY_MAX_USER_INSTANCES=1,
+ INOTIFY_MAX_USER_WATCHES=2,
+ INOTIFY_MAX_QUEUED_EVENTS=3
+};
+
+
+enum
+{
+ KERN_OSTYPE=1,
+ KERN_OSRELEASE=2,
+ KERN_OSREV=3,
+ KERN_VERSION=4,
+ KERN_SECUREMASK=5,
+ KERN_PROF=6,
+ KERN_NODENAME=7,
+ KERN_DOMAINNAME=8,
+
+ KERN_PANIC=15,
+ KERN_REALROOTDEV=16,
+
+ KERN_SPARC_REBOOT=21,
+ KERN_CTLALTDEL=22,
+ KERN_PRINTK=23,
+ KERN_NAMETRANS=24,
+ KERN_PPC_HTABRECLAIM=25,
+ KERN_PPC_ZEROPAGED=26,
+ KERN_PPC_POWERSAVE_NAP=27,
+ KERN_MODPROBE=28,
+ KERN_SG_BIG_BUFF=29,
+ KERN_ACCT=30,
+ KERN_PPC_L2CR=31,
+
+ KERN_RTSIGNR=32,
+ KERN_RTSIGMAX=33,
+
+ KERN_SHMMAX=34,
+ KERN_MSGMAX=35,
+ KERN_MSGMNB=36,
+ KERN_MSGPOOL=37,
+ KERN_SYSRQ=38,
+ KERN_MAX_THREADS=39,
+ KERN_RANDOM=40,
+ KERN_SHMALL=41,
+ KERN_MSGMNI=42,
+ KERN_SEM=43,
+ KERN_SPARC_STOP_A=44,
+ KERN_SHMMNI=45,
+ KERN_OVERFLOWUID=46,
+ KERN_OVERFLOWGID=47,
+ KERN_SHMPATH=48,
+ KERN_HOTPLUG=49,
+ KERN_IEEE_EMULATION_WARNINGS=50,
+ KERN_S390_USER_DEBUG_LOGGING=51,
+ KERN_CORE_USES_PID=52,
+ KERN_TAINTED=53,
+ KERN_CADPID=54,
+ KERN_PIDMAX=55,
+ KERN_CORE_PATTERN=56,
+ KERN_PANIC_ON_OOPS=57,
+ KERN_HPPA_PWRSW=58,
+ KERN_HPPA_UNALIGNED=59,
+ KERN_PRINTK_RATELIMIT=60,
+ KERN_PRINTK_RATELIMIT_BURST=61,
+ KERN_PTY=62,
+ KERN_NGROUPS_MAX=63,
+ KERN_SPARC_SCONS_PWROFF=64,
+ KERN_HZ_TIMER=65,
+ KERN_UNKNOWN_NMI_PANIC=66,
+ KERN_BOOTLOADER_TYPE=67,
+ KERN_RANDOMIZE=68,
+ KERN_SETUID_DUMPABLE=69,
+ KERN_SPIN_RETRY=70,
+ KERN_ACPI_VIDEO_FLAGS=71,
+ KERN_IA64_UNALIGNED=72,
+ KERN_COMPAT_LOG=73,
+ KERN_MAX_LOCK_DEPTH=74,
+ KERN_NMI_WATCHDOG=75,
+ KERN_PANIC_ON_NMI=76,
+};
+
+
+
+
+enum
+{
+ VM_UNUSED1=1,
+ VM_UNUSED2=2,
+ VM_UNUSED3=3,
+ VM_UNUSED4=4,
+ VM_OVERCOMMIT_MEMORY=5,
+ VM_UNUSED5=6,
+ VM_UNUSED7=7,
+ VM_UNUSED8=8,
+ VM_UNUSED9=9,
+ VM_PAGE_CLUSTER=10,
+ VM_DIRTY_BACKGROUND=11,
+ VM_DIRTY_RATIO=12,
+ VM_DIRTY_WB_CS=13,
+ VM_DIRTY_EXPIRE_CS=14,
+ VM_NR_PDFLUSH_THREADS=15,
+ VM_OVERCOMMIT_RATIO=16,
+ VM_PAGEBUF=17,
+ VM_HUGETLB_PAGES=18,
+ VM_SWAPPINESS=19,
+ VM_LOWMEM_RESERVE_RATIO=20,
+ VM_MIN_FREE_KBYTES=21,
+ VM_MAX_MAP_COUNT=22,
+ VM_LAPTOP_MODE=23,
+ VM_BLOCK_DUMP=24,
+ VM_HUGETLB_GROUP=25,
+ VM_VFS_CACHE_PRESSURE=26,
+ VM_LEGACY_VA_LAYOUT=27,
+ VM_SWAP_TOKEN_TIMEOUT=28,
+ VM_DROP_PAGECACHE=29,
+ VM_PERCPU_PAGELIST_FRACTION=30,
+ VM_ZONE_RECLAIM_MODE=31,
+ VM_MIN_UNMAPPED=32,
+ VM_PANIC_ON_OOM=33,
+ VM_VDSO_ENABLED=34,
+ VM_MIN_SLAB=35,
+};
+
+
+
+enum
+{
+ NET_CORE=1,
+ NET_ETHER=2,
+ NET_802=3,
+ NET_UNIX=4,
+ NET_IPV4=5,
+ NET_IPX=6,
+ NET_ATALK=7,
+ NET_NETROM=8,
+ NET_AX25=9,
+ NET_BRIDGE=10,
+ NET_ROSE=11,
+ NET_IPV6=12,
+ NET_X25=13,
+ NET_TR=14,
+ NET_DECNET=15,
+ NET_ECONET=16,
+ NET_SCTP=17,
+ NET_LLC=18,
+ NET_NETFILTER=19,
+ NET_DCCP=20,
+ NET_IRDA=412,
+};
+
+
+enum
+{
+ RANDOM_POOLSIZE=1,
+ RANDOM_ENTROPY_COUNT=2,
+ RANDOM_READ_THRESH=3,
+ RANDOM_WRITE_THRESH=4,
+ RANDOM_BOOT_ID=5,
+ RANDOM_UUID=6
+};
+
+
+enum
+{
+ PTY_MAX=1,
+ PTY_NR=2
+};
+
+
+enum
+{
+ BUS_ISA_MEM_BASE=1,
+ BUS_ISA_PORT_BASE=2,
+ BUS_ISA_PORT_SHIFT=3
+};
+
+
+enum
+{
+ NET_CORE_WMEM_MAX=1,
+ NET_CORE_RMEM_MAX=2,
+ NET_CORE_WMEM_DEFAULT=3,
+ NET_CORE_RMEM_DEFAULT=4,
+
+ NET_CORE_MAX_BACKLOG=6,
+ NET_CORE_FASTROUTE=7,
+ NET_CORE_MSG_COST=8,
+ NET_CORE_MSG_BURST=9,
+ NET_CORE_OPTMEM_MAX=10,
+ NET_CORE_HOT_LIST_LENGTH=11,
+ NET_CORE_DIVERT_VERSION=12,
+ NET_CORE_NO_CONG_THRESH=13,
+ NET_CORE_NO_CONG=14,
+ NET_CORE_LO_CONG=15,
+ NET_CORE_MOD_CONG=16,
+ NET_CORE_DEV_WEIGHT=17,
+ NET_CORE_SOMAXCONN=18,
+ NET_CORE_BUDGET=19,
+ NET_CORE_AEVENT_ETIME=20,
+ NET_CORE_AEVENT_RSEQTH=21,
+ NET_CORE_WARNINGS=22,
+};
+
+
+
+
+
+
+
+enum
+{
+ NET_UNIX_DESTROY_DELAY=1,
+ NET_UNIX_DELETE_DELAY=2,
+ NET_UNIX_MAX_DGRAM_QLEN=3,
+};
+
+
+enum
+{
+ NET_NF_CONNTRACK_MAX=1,
+ NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2,
+ NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3,
+ NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4,
+ NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5,
+ NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6,
+ NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7,
+ NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8,
+ NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9,
+ NET_NF_CONNTRACK_UDP_TIMEOUT=10,
+ NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11,
+ NET_NF_CONNTRACK_ICMP_TIMEOUT=12,
+ NET_NF_CONNTRACK_GENERIC_TIMEOUT=13,
+ NET_NF_CONNTRACK_BUCKETS=14,
+ NET_NF_CONNTRACK_LOG_INVALID=15,
+ NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16,
+ NET_NF_CONNTRACK_TCP_LOOSE=17,
+ NET_NF_CONNTRACK_TCP_BE_LIBERAL=18,
+ NET_NF_CONNTRACK_TCP_MAX_RETRANS=19,
+ NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20,
+ NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21,
+ NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22,
+ NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23,
+ NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24,
+ NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25,
+ NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26,
+ NET_NF_CONNTRACK_COUNT=27,
+ NET_NF_CONNTRACK_ICMPV6_TIMEOUT=28,
+ NET_NF_CONNTRACK_FRAG6_TIMEOUT=29,
+ NET_NF_CONNTRACK_FRAG6_LOW_THRESH=30,
+ NET_NF_CONNTRACK_FRAG6_HIGH_THRESH=31,
+ NET_NF_CONNTRACK_CHECKSUM=32,
+};
+
+
+enum
+{
+
+ NET_IPV4_FORWARD=8,
+ NET_IPV4_DYNADDR=9,
+
+ NET_IPV4_CONF=16,
+ NET_IPV4_NEIGH=17,
+ NET_IPV4_ROUTE=18,
+ NET_IPV4_FIB_HASH=19,
+ NET_IPV4_NETFILTER=20,
+
+ NET_IPV4_TCP_TIMESTAMPS=33,
+ NET_IPV4_TCP_WINDOW_SCALING=34,
+ NET_IPV4_TCP_SACK=35,
+ NET_IPV4_TCP_RETRANS_COLLAPSE=36,
+ NET_IPV4_DEFAULT_TTL=37,
+ NET_IPV4_AUTOCONFIG=38,
+ NET_IPV4_NO_PMTU_DISC=39,
+ NET_IPV4_TCP_SYN_RETRIES=40,
+ NET_IPV4_IPFRAG_HIGH_THRESH=41,
+ NET_IPV4_IPFRAG_LOW_THRESH=42,
+ NET_IPV4_IPFRAG_TIME=43,
+ NET_IPV4_TCP_MAX_KA_PROBES=44,
+ NET_IPV4_TCP_KEEPALIVE_TIME=45,
+ NET_IPV4_TCP_KEEPALIVE_PROBES=46,
+ NET_IPV4_TCP_RETRIES1=47,
+ NET_IPV4_TCP_RETRIES2=48,
+ NET_IPV4_TCP_FIN_TIMEOUT=49,
+ NET_IPV4_IP_MASQ_DEBUG=50,
+ NET_TCP_SYNCOOKIES=51,
+ NET_TCP_STDURG=52,
+ NET_TCP_RFC1337=53,
+ NET_TCP_SYN_TAILDROP=54,
+ NET_TCP_MAX_SYN_BACKLOG=55,
+ NET_IPV4_LOCAL_PORT_RANGE=56,
+ NET_IPV4_ICMP_ECHO_IGNORE_ALL=57,
+ NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS=58,
+ NET_IPV4_ICMP_SOURCEQUENCH_RATE=59,
+ NET_IPV4_ICMP_DESTUNREACH_RATE=60,
+ NET_IPV4_ICMP_TIMEEXCEED_RATE=61,
+ NET_IPV4_ICMP_PARAMPROB_RATE=62,
+ NET_IPV4_ICMP_ECHOREPLY_RATE=63,
+ NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES=64,
+ NET_IPV4_IGMP_MAX_MEMBERSHIPS=65,
+ NET_TCP_TW_RECYCLE=66,
+ NET_IPV4_ALWAYS_DEFRAG=67,
+ NET_IPV4_TCP_KEEPALIVE_INTVL=68,
+ NET_IPV4_INET_PEER_THRESHOLD=69,
+ NET_IPV4_INET_PEER_MINTTL=70,
+ NET_IPV4_INET_PEER_MAXTTL=71,
+ NET_IPV4_INET_PEER_GC_MINTIME=72,
+ NET_IPV4_INET_PEER_GC_MAXTIME=73,
+ NET_TCP_ORPHAN_RETRIES=74,
+ NET_TCP_ABORT_ON_OVERFLOW=75,
+ NET_TCP_SYNACK_RETRIES=76,
+ NET_TCP_MAX_ORPHANS=77,
+ NET_TCP_MAX_TW_BUCKETS=78,
+ NET_TCP_FACK=79,
+ NET_TCP_REORDERING=80,
+ NET_TCP_ECN=81,
+ NET_TCP_DSACK=82,
+ NET_TCP_MEM=83,
+ NET_TCP_WMEM=84,
+ NET_TCP_RMEM=85,
+ NET_TCP_APP_WIN=86,
+ NET_TCP_ADV_WIN_SCALE=87,
+ NET_IPV4_NONLOCAL_BIND=88,
+ NET_IPV4_ICMP_RATELIMIT=89,
+ NET_IPV4_ICMP_RATEMASK=90,
+ NET_TCP_TW_REUSE=91,
+ NET_TCP_FRTO=92,
+ NET_TCP_LOW_LATENCY=93,
+ NET_IPV4_IPFRAG_SECRET_INTERVAL=94,
+ NET_IPV4_IGMP_MAX_MSF=96,
+ NET_TCP_NO_METRICS_SAVE=97,
+ NET_TCP_DEFAULT_WIN_SCALE=105,
+ NET_TCP_MODERATE_RCVBUF=106,
+ NET_TCP_TSO_WIN_DIVISOR=107,
+ NET_TCP_BIC_BETA=108,
+ NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR=109,
+ NET_TCP_CONG_CONTROL=110,
+ NET_TCP_ABC=111,
+ NET_IPV4_IPFRAG_MAX_DIST=112,
+ NET_TCP_MTU_PROBING=113,
+ NET_TCP_BASE_MSS=114,
+ NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS=115,
+ NET_TCP_DMA_COPYBREAK=116,
+ NET_TCP_SLOW_START_AFTER_IDLE=117,
+ NET_CIPSOV4_CACHE_ENABLE=118,
+ NET_CIPSOV4_CACHE_BUCKET_SIZE=119,
+ NET_CIPSOV4_RBM_OPTFMT=120,
+ NET_CIPSOV4_RBM_STRICTVALID=121,
+ NET_TCP_AVAIL_CONG_CONTROL=122,
+ NET_TCP_ALLOWED_CONG_CONTROL=123,
+ NET_TCP_MAX_SSTHRESH=124,
+ NET_TCP_FRTO_RESPONSE=125,
+};
+
+enum {
+ NET_IPV4_ROUTE_FLUSH=1,
+ NET_IPV4_ROUTE_MIN_DELAY=2,
+ NET_IPV4_ROUTE_MAX_DELAY=3,
+ NET_IPV4_ROUTE_GC_THRESH=4,
+ NET_IPV4_ROUTE_MAX_SIZE=5,
+ NET_IPV4_ROUTE_GC_MIN_INTERVAL=6,
+ NET_IPV4_ROUTE_GC_TIMEOUT=7,
+ NET_IPV4_ROUTE_GC_INTERVAL=8,
+ NET_IPV4_ROUTE_REDIRECT_LOAD=9,
+ NET_IPV4_ROUTE_REDIRECT_NUMBER=10,
+ NET_IPV4_ROUTE_REDIRECT_SILENCE=11,
+ NET_IPV4_ROUTE_ERROR_COST=12,
+ NET_IPV4_ROUTE_ERROR_BURST=13,
+ NET_IPV4_ROUTE_GC_ELASTICITY=14,
+ NET_IPV4_ROUTE_MTU_EXPIRES=15,
+ NET_IPV4_ROUTE_MIN_PMTU=16,
+ NET_IPV4_ROUTE_MIN_ADVMSS=17,
+ NET_IPV4_ROUTE_SECRET_INTERVAL=18,
+ NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS=19,
+};
+
+enum
+{
+ NET_PROTO_CONF_ALL=-2,
+ NET_PROTO_CONF_DEFAULT=-3
+
+
+};
+
+enum
+{
+ NET_IPV4_CONF_FORWARDING=1,
+ NET_IPV4_CONF_MC_FORWARDING=2,
+ NET_IPV4_CONF_PROXY_ARP=3,
+ NET_IPV4_CONF_ACCEPT_REDIRECTS=4,
+ NET_IPV4_CONF_SECURE_REDIRECTS=5,
+ NET_IPV4_CONF_SEND_REDIRECTS=6,
+ NET_IPV4_CONF_SHARED_MEDIA=7,
+ NET_IPV4_CONF_RP_FILTER=8,
+ NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE=9,
+ NET_IPV4_CONF_BOOTP_RELAY=10,
+ NET_IPV4_CONF_LOG_MARTIANS=11,
+ NET_IPV4_CONF_TAG=12,
+ NET_IPV4_CONF_ARPFILTER=13,
+ NET_IPV4_CONF_MEDIUM_ID=14,
+ NET_IPV4_CONF_NOXFRM=15,
+ NET_IPV4_CONF_NOPOLICY=16,
+ NET_IPV4_CONF_FORCE_IGMP_VERSION=17,
+ NET_IPV4_CONF_ARP_ANNOUNCE=18,
+ NET_IPV4_CONF_ARP_IGNORE=19,
+ NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
+ NET_IPV4_CONF_ARP_ACCEPT=21,
+ NET_IPV4_CONF_ARP_NOTIFY=22,
+ __NET_IPV4_CONF_MAX
+};
+
+
+enum
+{
+ NET_IPV4_NF_CONNTRACK_MAX=1,
+ NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2,
+ NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3,
+ NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4,
+ NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5,
+ NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6,
+ NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7,
+ NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8,
+ NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9,
+ NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT=10,
+ NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11,
+ NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT=12,
+ NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT=13,
+ NET_IPV4_NF_CONNTRACK_BUCKETS=14,
+ NET_IPV4_NF_CONNTRACK_LOG_INVALID=15,
+ NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16,
+ NET_IPV4_NF_CONNTRACK_TCP_LOOSE=17,
+ NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL=18,
+ NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS=19,
+ NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20,
+ NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21,
+ NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22,
+ NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23,
+ NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24,
+ NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25,
+ NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26,
+ NET_IPV4_NF_CONNTRACK_COUNT=27,
+ NET_IPV4_NF_CONNTRACK_CHECKSUM=28,
+};
+
+
+enum {
+ NET_IPV6_CONF=16,
+ NET_IPV6_NEIGH=17,
+ NET_IPV6_ROUTE=18,
+ NET_IPV6_ICMP=19,
+ NET_IPV6_BINDV6ONLY=20,
+ NET_IPV6_IP6FRAG_HIGH_THRESH=21,
+ NET_IPV6_IP6FRAG_LOW_THRESH=22,
+ NET_IPV6_IP6FRAG_TIME=23,
+ NET_IPV6_IP6FRAG_SECRET_INTERVAL=24,
+ NET_IPV6_MLD_MAX_MSF=25,
+};
+
+enum {
+ NET_IPV6_ROUTE_FLUSH=1,
+ NET_IPV6_ROUTE_GC_THRESH=2,
+ NET_IPV6_ROUTE_MAX_SIZE=3,
+ NET_IPV6_ROUTE_GC_MIN_INTERVAL=4,
+ NET_IPV6_ROUTE_GC_TIMEOUT=5,
+ NET_IPV6_ROUTE_GC_INTERVAL=6,
+ NET_IPV6_ROUTE_GC_ELASTICITY=7,
+ NET_IPV6_ROUTE_MTU_EXPIRES=8,
+ NET_IPV6_ROUTE_MIN_ADVMSS=9,
+ NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS=10
+};
+
+enum {
+ NET_IPV6_FORWARDING=1,
+ NET_IPV6_HOP_LIMIT=2,
+ NET_IPV6_MTU=3,
+ NET_IPV6_ACCEPT_RA=4,
+ NET_IPV6_ACCEPT_REDIRECTS=5,
+ NET_IPV6_AUTOCONF=6,
+ NET_IPV6_DAD_TRANSMITS=7,
+ NET_IPV6_RTR_SOLICITS=8,
+ NET_IPV6_RTR_SOLICIT_INTERVAL=9,
+ NET_IPV6_RTR_SOLICIT_DELAY=10,
+ NET_IPV6_USE_TEMPADDR=11,
+ NET_IPV6_TEMP_VALID_LFT=12,
+ NET_IPV6_TEMP_PREFERED_LFT=13,
+ NET_IPV6_REGEN_MAX_RETRY=14,
+ NET_IPV6_MAX_DESYNC_FACTOR=15,
+ NET_IPV6_MAX_ADDRESSES=16,
+ NET_IPV6_FORCE_MLD_VERSION=17,
+ NET_IPV6_ACCEPT_RA_DEFRTR=18,
+ NET_IPV6_ACCEPT_RA_PINFO=19,
+ NET_IPV6_ACCEPT_RA_RTR_PREF=20,
+ NET_IPV6_RTR_PROBE_INTERVAL=21,
+ NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN=22,
+ NET_IPV6_PROXY_NDP=23,
+ NET_IPV6_ACCEPT_SOURCE_ROUTE=25,
+ __NET_IPV6_MAX
+};
+
+
+enum {
+ NET_IPV6_ICMP_RATELIMIT=1
+};
+
+
+enum {
+ NET_NEIGH_MCAST_SOLICIT=1,
+ NET_NEIGH_UCAST_SOLICIT=2,
+ NET_NEIGH_APP_SOLICIT=3,
+ NET_NEIGH_RETRANS_TIME=4,
+ NET_NEIGH_REACHABLE_TIME=5,
+ NET_NEIGH_DELAY_PROBE_TIME=6,
+ NET_NEIGH_GC_STALE_TIME=7,
+ NET_NEIGH_UNRES_QLEN=8,
+ NET_NEIGH_PROXY_QLEN=9,
+ NET_NEIGH_ANYCAST_DELAY=10,
+ NET_NEIGH_PROXY_DELAY=11,
+ NET_NEIGH_LOCKTIME=12,
+ NET_NEIGH_GC_INTERVAL=13,
+ NET_NEIGH_GC_THRESH1=14,
+ NET_NEIGH_GC_THRESH2=15,
+ NET_NEIGH_GC_THRESH3=16,
+ NET_NEIGH_RETRANS_TIME_MS=17,
+ NET_NEIGH_REACHABLE_TIME_MS=18,
+ __NET_NEIGH_MAX
+};
+
+
+enum {
+ NET_DCCP_DEFAULT=1,
+};
+
+
+enum {
+ NET_IPX_PPROP_BROADCASTING=1,
+ NET_IPX_FORWARDING=2
+};
+
+
+enum {
+ NET_LLC2=1,
+ NET_LLC_STATION=2,
+};
+
+
+enum {
+ NET_LLC2_TIMEOUT=1,
+};
+
+
+enum {
+ NET_LLC_STATION_ACK_TIMEOUT=1,
+};
+
+
+enum {
+ NET_LLC2_ACK_TIMEOUT=1,
+ NET_LLC2_P_TIMEOUT=2,
+ NET_LLC2_REJ_TIMEOUT=3,
+ NET_LLC2_BUSY_TIMEOUT=4,
+};
+
+
+enum {
+ NET_ATALK_AARP_EXPIRY_TIME=1,
+ NET_ATALK_AARP_TICK_TIME=2,
+ NET_ATALK_AARP_RETRANSMIT_LIMIT=3,
+ NET_ATALK_AARP_RESOLVE_TIME=4
+};
+
+
+
+enum {
+ NET_NETROM_DEFAULT_PATH_QUALITY=1,
+ NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER=2,
+ NET_NETROM_NETWORK_TTL_INITIALISER=3,
+ NET_NETROM_TRANSPORT_TIMEOUT=4,
+ NET_NETROM_TRANSPORT_MAXIMUM_TRIES=5,
+ NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY=6,
+ NET_NETROM_TRANSPORT_BUSY_DELAY=7,
+ NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE=8,
+ NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT=9,
+ NET_NETROM_ROUTING_CONTROL=10,
+ NET_NETROM_LINK_FAILS_COUNT=11,
+ NET_NETROM_RESET=12
+};
+
+
+enum {
+ NET_AX25_IP_DEFAULT_MODE=1,
+ NET_AX25_DEFAULT_MODE=2,
+ NET_AX25_BACKOFF_TYPE=3,
+ NET_AX25_CONNECT_MODE=4,
+ NET_AX25_STANDARD_WINDOW=5,
+ NET_AX25_EXTENDED_WINDOW=6,
+ NET_AX25_T1_TIMEOUT=7,
+ NET_AX25_T2_TIMEOUT=8,
+ NET_AX25_T3_TIMEOUT=9,
+ NET_AX25_IDLE_TIMEOUT=10,
+ NET_AX25_N2=11,
+ NET_AX25_PACLEN=12,
+ NET_AX25_PROTOCOL=13,
+ NET_AX25_DAMA_SLAVE_TIMEOUT=14
+};
+
+
+enum {
+ NET_ROSE_RESTART_REQUEST_TIMEOUT=1,
+ NET_ROSE_CALL_REQUEST_TIMEOUT=2,
+ NET_ROSE_RESET_REQUEST_TIMEOUT=3,
+ NET_ROSE_CLEAR_REQUEST_TIMEOUT=4,
+ NET_ROSE_ACK_HOLD_BACK_TIMEOUT=5,
+ NET_ROSE_ROUTING_CONTROL=6,
+ NET_ROSE_LINK_FAIL_TIMEOUT=7,
+ NET_ROSE_MAX_VCS=8,
+ NET_ROSE_WINDOW_SIZE=9,
+ NET_ROSE_NO_ACTIVITY_TIMEOUT=10
+};
+
+
+enum {
+ NET_X25_RESTART_REQUEST_TIMEOUT=1,
+ NET_X25_CALL_REQUEST_TIMEOUT=2,
+ NET_X25_RESET_REQUEST_TIMEOUT=3,
+ NET_X25_CLEAR_REQUEST_TIMEOUT=4,
+ NET_X25_ACK_HOLD_BACK_TIMEOUT=5,
+ NET_X25_FORWARD=6
+};
+
+
+enum
+{
+ NET_TR_RIF_TIMEOUT=1
+};
+
+
+enum {
+ NET_DECNET_NODE_TYPE = 1,
+ NET_DECNET_NODE_ADDRESS = 2,
+ NET_DECNET_NODE_NAME = 3,
+ NET_DECNET_DEFAULT_DEVICE = 4,
+ NET_DECNET_TIME_WAIT = 5,
+ NET_DECNET_DN_COUNT = 6,
+ NET_DECNET_DI_COUNT = 7,
+ NET_DECNET_DR_COUNT = 8,
+ NET_DECNET_DST_GC_INTERVAL = 9,
+ NET_DECNET_CONF = 10,
+ NET_DECNET_NO_FC_MAX_CWND = 11,
+ NET_DECNET_MEM = 12,
+ NET_DECNET_RMEM = 13,
+ NET_DECNET_WMEM = 14,
+ NET_DECNET_DEBUG_LEVEL = 255
+};
+
+
+enum {
+ NET_DECNET_CONF_LOOPBACK = -2,
+ NET_DECNET_CONF_DDCMP = -3,
+ NET_DECNET_CONF_PPP = -4,
+ NET_DECNET_CONF_X25 = -5,
+ NET_DECNET_CONF_GRE = -6,
+ NET_DECNET_CONF_ETHER = -7
+
+
+};
+
+
+enum {
+ NET_DECNET_CONF_DEV_PRIORITY = 1,
+ NET_DECNET_CONF_DEV_T1 = 2,
+ NET_DECNET_CONF_DEV_T2 = 3,
+ NET_DECNET_CONF_DEV_T3 = 4,
+ NET_DECNET_CONF_DEV_FORWARDING = 5,
+ NET_DECNET_CONF_DEV_BLKSIZE = 6,
+ NET_DECNET_CONF_DEV_STATE = 7
+};
+
+
+enum {
+ NET_SCTP_RTO_INITIAL = 1,
+ NET_SCTP_RTO_MIN = 2,
+ NET_SCTP_RTO_MAX = 3,
+ NET_SCTP_RTO_ALPHA = 4,
+ NET_SCTP_RTO_BETA = 5,
+ NET_SCTP_VALID_COOKIE_LIFE = 6,
+ NET_SCTP_ASSOCIATION_MAX_RETRANS = 7,
+ NET_SCTP_PATH_MAX_RETRANS = 8,
+ NET_SCTP_MAX_INIT_RETRANSMITS = 9,
+ NET_SCTP_HB_INTERVAL = 10,
+ NET_SCTP_PRESERVE_ENABLE = 11,
+ NET_SCTP_MAX_BURST = 12,
+ NET_SCTP_ADDIP_ENABLE = 13,
+ NET_SCTP_PRSCTP_ENABLE = 14,
+ NET_SCTP_SNDBUF_POLICY = 15,
+ NET_SCTP_SACK_TIMEOUT = 16,
+ NET_SCTP_RCVBUF_POLICY = 17,
+};
+
+
+enum {
+ NET_BRIDGE_NF_CALL_ARPTABLES = 1,
+ NET_BRIDGE_NF_CALL_IPTABLES = 2,
+ NET_BRIDGE_NF_CALL_IP6TABLES = 3,
+ NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4,
+ NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5,
+};
+
+
+enum {
+ NET_IRDA_DISCOVERY=1,
+ NET_IRDA_DEVNAME=2,
+ NET_IRDA_DEBUG=3,
+ NET_IRDA_FAST_POLL=4,
+ NET_IRDA_DISCOVERY_SLOTS=5,
+ NET_IRDA_DISCOVERY_TIMEOUT=6,
+ NET_IRDA_SLOT_TIMEOUT=7,
+ NET_IRDA_MAX_BAUD_RATE=8,
+ NET_IRDA_MIN_TX_TURN_TIME=9,
+ NET_IRDA_MAX_TX_DATA_SIZE=10,
+ NET_IRDA_MAX_TX_WINDOW=11,
+ NET_IRDA_MAX_NOREPLY_TIME=12,
+ NET_IRDA_WARN_NOREPLY_TIME=13,
+ NET_IRDA_LAP_KEEPALIVE_TIME=14,
+};
+
+
+
+enum
+{
+ FS_NRINODE=1,
+ FS_STATINODE=2,
+ FS_MAXINODE=3,
+ FS_NRDQUOT=4,
+ FS_MAXDQUOT=5,
+ FS_NRFILE=6,
+ FS_MAXFILE=7,
+ FS_DENTRY=8,
+ FS_NRSUPER=9,
+ FS_MAXSUPER=10,
+ FS_OVERFLOWUID=11,
+ FS_OVERFLOWGID=12,
+ FS_LEASES=13,
+ FS_DIR_NOTIFY=14,
+ FS_LEASE_TIME=15,
+ FS_DQSTATS=16,
+ FS_XFS=17,
+ FS_AIO_NR=18,
+ FS_AIO_MAX_NR=19,
+ FS_INOTIFY=20,
+ FS_OCFS2=988,
+};
+
+
+enum {
+ FS_DQ_LOOKUPS = 1,
+ FS_DQ_DROPS = 2,
+ FS_DQ_READS = 3,
+ FS_DQ_WRITES = 4,
+ FS_DQ_CACHE_HITS = 5,
+ FS_DQ_ALLOCATED = 6,
+ FS_DQ_FREE = 7,
+ FS_DQ_SYNCS = 8,
+ FS_DQ_WARNINGS = 9,
+};
+
+
+
+
+enum {
+ DEV_CDROM=1,
+ DEV_HWMON=2,
+ DEV_PARPORT=3,
+ DEV_RAID=4,
+ DEV_MAC_HID=5,
+ DEV_SCSI=6,
+ DEV_IPMI=7,
+};
+
+
+enum {
+ DEV_CDROM_INFO=1,
+ DEV_CDROM_AUTOCLOSE=2,
+ DEV_CDROM_AUTOEJECT=3,
+ DEV_CDROM_DEBUG=4,
+ DEV_CDROM_LOCK=5,
+ DEV_CDROM_CHECK_MEDIA=6
+};
+
+
+enum {
+ DEV_PARPORT_DEFAULT=-3
+};
+
+
+enum {
+ DEV_RAID_SPEED_LIMIT_MIN=1,
+ DEV_RAID_SPEED_LIMIT_MAX=2
+};
+
+
+enum {
+ DEV_PARPORT_DEFAULT_TIMESLICE=1,
+ DEV_PARPORT_DEFAULT_SPINTIME=2
+};
+
+
+enum {
+ DEV_PARPORT_SPINTIME=1,
+ DEV_PARPORT_BASE_ADDR=2,
+ DEV_PARPORT_IRQ=3,
+ DEV_PARPORT_DMA=4,
+ DEV_PARPORT_MODES=5,
+ DEV_PARPORT_DEVICES=6,
+ DEV_PARPORT_AUTOPROBE=16
+};
+
+
+enum {
+ DEV_PARPORT_DEVICES_ACTIVE=-3,
+};
+
+
+enum {
+ DEV_PARPORT_DEVICE_TIMESLICE=1,
+};
+
+
+enum {
+ DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES=1,
+ DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES=2,
+ DEV_MAC_HID_MOUSE_BUTTON_EMULATION=3,
+ DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE=4,
+ DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE=5,
+ DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES=6
+};
+
+
+enum {
+ DEV_SCSI_LOGGING_LEVEL=1,
+};
+
+
+enum {
+ DEV_IPMI_POWEROFF_POWERCYCLE=1,
+};
+
+
+enum
+{
+ ABI_DEFHANDLER_COFF=1,
+ ABI_DEFHANDLER_ELF=2,
+ ABI_DEFHANDLER_LCALL7=3,
+ ABI_DEFHANDLER_LIBCSO=4,
+ ABI_TRACE=5,
+ ABI_FAKE_UTSNAME=6,
+};
+
+
+
+
+
+struct ctl_table;
+struct nsproxy;
+struct ctl_table_root;
+
+struct ctl_table_set {
+ struct list_head list;
+ struct ctl_table_set *parent;
+ int (*is_seen)(struct ctl_table_set *);
+};
+
+extern void setup_sysctl_set(struct ctl_table_set *p,
+ struct ctl_table_set *parent,
+ int (*is_seen)(struct ctl_table_set *));
+
+struct ctl_table_header;
+
+extern void sysctl_head_get(struct ctl_table_header *);
+extern void sysctl_head_put(struct ctl_table_header *);
+extern int sysctl_is_seen(struct ctl_table_header *);
+extern struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *);
+extern struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev);
+extern struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces,
+ struct ctl_table_header *prev);
+extern void sysctl_head_finish(struct ctl_table_header *prev);
+extern int sysctl_perm(struct ctl_table_root *root,
+ struct ctl_table *table, int op);
+
+typedef struct ctl_table ctl_table;
+
+typedef int ctl_handler (struct ctl_table *table,
+ void *oldval, size_t *oldlenp,
+ void *newval, size_t newlen);
+
+typedef int proc_handler (struct ctl_table *ctl, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
+
+extern int proc_dostring(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
+extern int proc_dointvec(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
+extern int proc_dointvec_minmax(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
+extern int proc_dointvec_jiffies(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
+extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
+extern int proc_dointvec_ms_jiffies(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
+extern int proc_doulongvec_minmax(struct ctl_table *, int,
+ void *, size_t *, loff_t *);
+extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int,
+ void *, size_t *, loff_t *);
+
+extern int do_sysctl (int *name, int nlen,
+ void *oldval, size_t *oldlenp,
+ void *newval, size_t newlen);
+
+extern ctl_handler sysctl_data;
+extern ctl_handler sysctl_string;
+extern ctl_handler sysctl_intvec;
+extern ctl_handler sysctl_jiffies;
+extern ctl_handler sysctl_ms_jiffies;
+# 1051 "include/linux/sysctl.h"
+struct ctl_table
+{
+ int ctl_name;
+ const char *procname;
+ void *data;
+ int maxlen;
+ mode_t mode;
+ struct ctl_table *child;
+ struct ctl_table *parent;
+ proc_handler *proc_handler;
+ ctl_handler *strategy;
+ void *extra1;
+ void *extra2;
+};
+
+struct ctl_table_root {
+ struct list_head root_list;
+ struct ctl_table_set default_set;
+ struct ctl_table_set *(*lookup)(struct ctl_table_root *root,
+ struct nsproxy *namespaces);
+ int (*permissions)(struct ctl_table_root *root,
+ struct nsproxy *namespaces, struct ctl_table *table);
+};
+
+
+
+struct ctl_table_header
+{
+ struct ctl_table *ctl_table;
+ struct list_head ctl_entry;
+ int used;
+ int count;
+ struct completion *unregistering;
+ struct ctl_table *ctl_table_arg;
+ struct ctl_table_root *root;
+ struct ctl_table_set *set;
+ struct ctl_table *attached_by;
+ struct ctl_table *attached_to;
+ struct ctl_table_header *parent;
+};
+
+
+struct ctl_path {
+ const char *procname;
+ int ctl_name;
+};
+
+void register_sysctl_root(struct ctl_table_root *root);
+struct ctl_table_header *__register_sysctl_paths(
+ struct ctl_table_root *root, struct nsproxy *namespaces,
+ const struct ctl_path *path, struct ctl_table *table);
+struct ctl_table_header *register_sysctl_table(struct ctl_table * table);
+struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
+ struct ctl_table *table);
+
+void unregister_sysctl_table(struct ctl_table_header * table);
+int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table);
+# 23 "include/linux/key.h" 2
+
+
+
+
+
+
+typedef int32_t key_serial_t;
+
+
+typedef uint32_t key_perm_t;
+
+struct key;
+# 74 "include/linux/key.h"
+struct seq_file;
+struct user_struct;
+struct signal_struct;
+struct cred;
+
+struct key_type;
+struct key_owner;
+struct keyring_list;
+struct keyring_name;
+# 98 "include/linux/key.h"
+typedef struct __key_reference_with_attributes *key_ref_t;
+
+static inline __attribute__((always_inline)) key_ref_t make_key_ref(const struct key *key,
+ unsigned long possession)
+{
+ return (key_ref_t) ((unsigned long) key | possession);
+}
+
+static inline __attribute__((always_inline)) struct key *key_ref_to_ptr(const key_ref_t key_ref)
+{
+ return (struct key *) ((unsigned long) key_ref & ~1UL);
+}
+
+static inline __attribute__((always_inline)) unsigned long is_key_possessed(const key_ref_t key_ref)
+{
+ return (unsigned long) key_ref & 1UL;
+}
+# 124 "include/linux/key.h"
+struct key {
+ atomic_t usage;
+ key_serial_t serial;
+ struct rb_node serial_node;
+ struct key_type *type;
+ struct rw_semaphore sem;
+ struct key_user *user;
+ void *security;
+ union {
+ time_t expiry;
+ time_t revoked_at;
+ };
+ uid_t uid;
+ gid_t gid;
+ key_perm_t perm;
+ unsigned short quotalen;
+ unsigned short datalen;
+# 151 "include/linux/key.h"
+ unsigned long flags;
+# 164 "include/linux/key.h"
+ char *description;
+
+
+
+
+ union {
+ struct list_head link;
+ unsigned long x[2];
+ void *p[2];
+ } type_data;
+
+
+
+
+
+ union {
+ unsigned long value;
+ void *data;
+ struct keyring_list *subscriptions;
+ } payload;
+};
+
+extern struct key *key_alloc(struct key_type *type,
+ const char *desc,
+ uid_t uid, gid_t gid,
+ const struct cred *cred,
+ key_perm_t perm,
+ unsigned long flags);
+
+
+
+
+
+
+extern void key_revoke(struct key *key);
+extern void key_put(struct key *key);
+
+static inline __attribute__((always_inline)) struct key *key_get(struct key *key)
+{
+ if (key)
+ atomic_inc(&key->usage);
+ return key;
+}
+
+static inline __attribute__((always_inline)) void key_ref_put(key_ref_t key_ref)
+{
+ key_put(key_ref_to_ptr(key_ref));
+}
+
+extern struct key *request_key(struct key_type *type,
+ const char *description,
+ const char *callout_info);
+
+extern struct key *request_key_with_auxdata(struct key_type *type,
+ const char *description,
+ const void *callout_info,
+ size_t callout_len,
+ void *aux);
+
+extern struct key *request_key_async(struct key_type *type,
+ const char *description,
+ const void *callout_info,
+ size_t callout_len);
+
+extern struct key *request_key_async_with_auxdata(struct key_type *type,
+ const char *description,
+ const void *callout_info,
+ size_t callout_len,
+ void *aux);
+
+extern int wait_for_key_construction(struct key *key, bool intr);
+
+extern int key_validate(struct key *key);
+
+extern key_ref_t key_create_or_update(key_ref_t keyring,
+ const char *type,
+ const char *description,
+ const void *payload,
+ size_t plen,
+ key_perm_t perm,
+ unsigned long flags);
+
+extern int key_update(key_ref_t key,
+ const void *payload,
+ size_t plen);
+
+extern int key_link(struct key *keyring,
+ struct key *key);
+
+extern int key_unlink(struct key *keyring,
+ struct key *key);
+
+extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
+ const struct cred *cred,
+ unsigned long flags,
+ struct key *dest);
+
+extern int keyring_clear(struct key *keyring);
+
+extern key_ref_t keyring_search(key_ref_t keyring,
+ struct key_type *type,
+ const char *description);
+
+extern int keyring_add_key(struct key *keyring,
+ struct key *key);
+
+extern struct key *key_lookup(key_serial_t id);
+
+static inline __attribute__((always_inline)) key_serial_t key_serial(struct key *key)
+{
+ return key ? key->serial : 0;
+}
+
+
+extern ctl_table key_sysctls[];
+
+
+extern void key_replace_session_keyring(void);
+
+
+
+
+extern int install_thread_keyring_to_cred(struct cred *cred);
+extern void key_fsuid_changed(struct task_struct *tsk);
+extern void key_fsgid_changed(struct task_struct *tsk);
+extern void key_init(void);
+# 18 "include/linux/cred.h" 2
+# 1 "include/linux/selinux.h" 1
+# 17 "include/linux/selinux.h"
+struct selinux_audit_rule;
+struct audit_context;
+struct kern_ipc_perm;
+# 71 "include/linux/selinux.h"
+static inline __attribute__((always_inline)) int selinux_string_to_sid(const char *str, u32 *sid)
+{
+ *sid = 0;
+ return 0;
+}
+
+static inline __attribute__((always_inline)) int selinux_secmark_relabel_packet_permission(u32 sid)
+{
+ return 0;
+}
+
+static inline __attribute__((always_inline)) void selinux_secmark_refcount_inc(void)
+{
+ return;
+}
+
+static inline __attribute__((always_inline)) void selinux_secmark_refcount_dec(void)
+{
+ return;
+}
+
+static inline __attribute__((always_inline)) bool selinux_is_enabled(void)
+{
+ return false;
+}
+# 19 "include/linux/cred.h" 2
+
+
+struct user_struct;
+struct cred;
+struct inode;
+
+
+
+
+
+
+
+struct group_info {
+ atomic_t usage;
+ int ngroups;
+ int nblocks;
+ gid_t small_block[32];
+ gid_t *blocks[0];
+};
+# 48 "include/linux/cred.h"
+static inline __attribute__((always_inline)) struct group_info *get_group_info(struct group_info *gi)
+{
+ atomic_inc(&gi->usage);
+ return gi;
+}
+# 64 "include/linux/cred.h"
+extern struct group_info *groups_alloc(int);
+extern struct group_info init_groups;
+extern void groups_free(struct group_info *);
+extern int set_current_groups(struct group_info *);
+extern int set_groups(struct cred *, struct group_info *);
+extern int groups_search(const struct group_info *, gid_t);
+
+
+
+
+
+extern int in_group_p(gid_t);
+extern int in_egroup_p(gid_t);
+
+
+
+
+
+
+struct thread_group_cred {
+ atomic_t usage;
+ pid_t tgid;
+ spinlock_t lock;
+ struct key *session_keyring;
+ struct key *process_keyring;
+ struct rcu_head rcu;
+};
+# 116 "include/linux/cred.h"
+struct cred {
+ atomic_t usage;
+
+ atomic_t subscribers;
+ void *put_addr;
+ unsigned magic;
+
+
+
+ uid_t uid;
+ gid_t gid;
+ uid_t suid;
+ gid_t sgid;
+ uid_t euid;
+ gid_t egid;
+ uid_t fsuid;
+ gid_t fsgid;
+ unsigned securebits;
+ kernel_cap_t cap_inheritable;
+ kernel_cap_t cap_permitted;
+ kernel_cap_t cap_effective;
+ kernel_cap_t cap_bset;
+
+ unsigned char jit_keyring;
+
+ struct key *thread_keyring;
+ struct key *request_key_auth;
+ struct thread_group_cred *tgcred;
+
+
+ void *security;
+
+ struct user_struct *user;
+ struct group_info *group_info;
+ struct rcu_head rcu;
+};
+
+extern void __put_cred(struct cred *);
+extern void exit_creds(struct task_struct *);
+extern int copy_creds(struct task_struct *, unsigned long);
+extern struct cred *cred_alloc_blank(void);
+extern struct cred *prepare_creds(void);
+extern struct cred *prepare_exec_creds(void);
+extern struct cred *prepare_usermodehelper_creds(void);
+extern int commit_creds(struct cred *);
+extern void abort_creds(struct cred *);
+extern const struct cred *override_creds(const struct cred *);
+extern void revert_creds(const struct cred *);
+extern struct cred *prepare_kernel_cred(struct task_struct *);
+extern int change_create_files_as(struct cred *, struct inode *);
+extern int set_security_override(struct cred *, u32);
+extern int set_security_override_from_ctx(struct cred *, const char *);
+extern int set_create_files_as(struct cred *, struct inode *);
+extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) cred_init(void);
+
+
+
+
+
+extern void __invalid_creds(const struct cred *, const char *, unsigned);
+extern void __validate_process_creds(struct task_struct *,
+ const char *, unsigned);
+
+extern bool creds_are_invalid(const struct cred *cred);
+
+static inline __attribute__((always_inline)) void __validate_creds(const struct cred *cred,
+ const char *file, unsigned line)
+{
+ if (__builtin_expect(!!(creds_are_invalid(cred)), 0))
+ __invalid_creds(cred, file, line);
+}
+# 198 "include/linux/cred.h"
+extern void validate_creds_for_do_exit(struct task_struct *);
+# 218 "include/linux/cred.h"
+static inline __attribute__((always_inline)) struct cred *get_new_cred(struct cred *cred)
+{
+ atomic_inc(&cred->usage);
+ return cred;
+}
+# 237 "include/linux/cred.h"
+static inline __attribute__((always_inline)) const struct cred *get_cred(const struct cred *cred)
+{
+ struct cred *nonconst_cred = (struct cred *) cred;
+ do { __validate_creds((cred), "include/linux/cred.h", 240); } while(0);
+ return get_new_cred(nonconst_cred);
+}
+# 255 "include/linux/cred.h"
+static inline __attribute__((always_inline)) void put_cred(const struct cred *_cred)
+{
+ struct cred *cred = (struct cred *) _cred;
+
+ do { __validate_creds((cred), "include/linux/cred.h", 259); } while(0);
+ if ((atomic_sub_return(1, (&(cred)->usage)) == 0))
+ __put_cred(cred);
+}
+# 94 "include/linux/sched.h" 2
+
+
+
+struct exec_domain;
+struct futex_pi_state;
+struct robust_list_head;
+struct bio;
+struct fs_struct;
+struct bts_context;
+struct perf_event_context;
+# 121 "include/linux/sched.h"
+extern unsigned long avenrun[];
+extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
+# 136 "include/linux/sched.h"
+extern unsigned long total_forks;
+extern int nr_threads;
+extern __attribute__((section(".discard"), unused)) char __pcpu_scope_process_counts; extern __attribute__((section(".data" ""))) __typeof__(unsigned long) per_cpu__process_counts;
+extern int nr_processes(void);
+extern unsigned long nr_running(void);
+extern unsigned long nr_uninterruptible(void);
+extern unsigned long nr_iowait(void);
+extern unsigned long nr_iowait_cpu(void);
+extern unsigned long this_cpu_load(void);
+
+
+extern void calc_global_load(void);
+extern u64 cpu_nr_migrations(int cpu);
+
+extern unsigned long get_parent_ip(unsigned long addr);
+
+struct seq_file;
+struct cfs_rq;
+struct task_group;
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void
+proc_sched_show_task(struct task_struct *p, struct seq_file *m)
+{
+}
+static inline __attribute__((always_inline)) void proc_sched_set_task(struct task_struct *p)
+{
+}
+static inline __attribute__((always_inline)) void
+print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+{
+}
+
+
+extern unsigned long long time_sync_thresh;
+# 253 "include/linux/sched.h"
+extern rwlock_t tasklist_lock;
+extern spinlock_t mmlist_lock;
+
+struct task_struct;
+
+extern void sched_init(void);
+extern void sched_init_smp(void);
+extern void schedule_tail(struct task_struct *prev);
+extern void init_idle(struct task_struct *idle, int cpu);
+extern void init_idle_bootup_task(struct task_struct *idle);
+
+extern int runqueue_is_locked(int cpu);
+extern void task_rq_unlock_wait(struct task_struct *p);
+
+extern cpumask_var_t nohz_cpu_mask;
+
+
+
+
+static inline __attribute__((always_inline)) int select_nohz_load_balancer(int cpu)
+{
+ return 0;
+}
+
+
+
+
+
+extern void show_state_filter(unsigned long state_filter);
+
+static inline __attribute__((always_inline)) void show_state(void)
+{
+ show_state_filter(0);
+}
+
+extern void show_regs(struct pt_regs *);
+
+
+
+
+
+
+extern void show_stack(struct task_struct *task, unsigned long *sp);
+
+void io_schedule(void);
+long io_schedule_timeout(long timeout);
+
+extern void cpu_init (void);
+extern void trap_init(void);
+extern void update_process_times(int user);
+extern void scheduler_tick(void);
+
+extern void sched_show_task(struct task_struct *p);
+# 317 "include/linux/sched.h"
+static inline __attribute__((always_inline)) void softlockup_tick(void)
+{
+}
+static inline __attribute__((always_inline)) void touch_softlockup_watchdog(void)
+{
+}
+static inline __attribute__((always_inline)) void touch_all_softlockup_watchdogs(void)
+{
+}
+
+
+
+extern unsigned int sysctl_hung_task_panic;
+extern unsigned long sysctl_hung_task_check_count;
+extern unsigned long sysctl_hung_task_timeout_secs;
+extern unsigned long sysctl_hung_task_warnings;
+extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
+ void *buffer,
+ size_t *lenp, loff_t *ppos);
+
+
+
+
+
+
+extern char __sched_text_start[], __sched_text_end[];
+
+
+extern int in_sched_functions(unsigned long addr);
+
+
+extern signed long schedule_timeout(signed long timeout);
+extern signed long schedule_timeout_interruptible(signed long timeout);
+extern signed long schedule_timeout_killable(signed long timeout);
+extern signed long schedule_timeout_uninterruptible(signed long timeout);
+ void __schedule(void);
+ void schedule(void);
+extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
+
+struct nsproxy;
+struct user_namespace;
+# 374 "include/linux/sched.h"
+extern int sysctl_max_map_count;
+
+# 1 "include/linux/aio.h" 1
+
+
+
+
+
+# 1 "include/linux/aio_abi.h" 1
+# 31 "include/linux/aio_abi.h"
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/byteorder.h" 1
+# 32 "include/linux/aio_abi.h" 2
+
+typedef unsigned long aio_context_t;
+
+enum {
+ IOCB_CMD_PREAD = 0,
+ IOCB_CMD_PWRITE = 1,
+ IOCB_CMD_FSYNC = 2,
+ IOCB_CMD_FDSYNC = 3,
+
+
+
+
+ IOCB_CMD_NOOP = 6,
+ IOCB_CMD_PREADV = 7,
+ IOCB_CMD_PWRITEV = 8,
+};
+# 58 "include/linux/aio_abi.h"
+struct io_event {
+ __u64 data;
+ __u64 obj;
+ __s64 res;
+ __s64 res2;
+};
+# 79 "include/linux/aio_abi.h"
+struct iocb {
+
+ __u64 aio_data;
+ __u32 aio_key, aio_reserved1;
+
+
+
+ __u16 aio_lio_opcode;
+ __s16 aio_reqprio;
+ __u32 aio_fildes;
+
+ __u64 aio_buf;
+ __u64 aio_nbytes;
+ __s64 aio_offset;
+
+
+ __u64 aio_reserved2;
+
+
+ __u32 aio_flags;
+
+
+
+
+
+ __u32 aio_resfd;
+};
+# 7 "include/linux/aio.h" 2
+# 1 "include/linux/uio.h" 1
+# 16 "include/linux/uio.h"
+struct iovec
+{
+ void *iov_base;
+ __kernel_size_t iov_len;
+};
+# 31 "include/linux/uio.h"
+struct kvec {
+ void *iov_base;
+ size_t iov_len;
+};
+# 43 "include/linux/uio.h"
+static inline __attribute__((always_inline)) size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
+{
+ unsigned long seg;
+ size_t ret = 0;
+
+ for (seg = 0; seg < nr_segs; seg++)
+ ret += iov[seg].iov_len;
+ return ret;
+}
+
+unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
+# 8 "include/linux/aio.h" 2
+
+
+
+
+
+
+
+struct kioctx;
+# 87 "include/linux/aio.h"
+struct kiocb {
+ struct list_head ki_run_list;
+ unsigned long ki_flags;
+ int ki_users;
+ unsigned ki_key;
+
+ struct file *ki_filp;
+ struct kioctx *ki_ctx;
+ int (*ki_cancel)(struct kiocb *, struct io_event *);
+ ssize_t (*ki_retry)(struct kiocb *);
+ void (*ki_dtor)(struct kiocb *);
+
+ union {
+ void *user;
+ struct task_struct *tsk;
+ } ki_obj;
+
+ __u64 ki_user_data;
+ wait_queue_t ki_wait;
+ loff_t ki_pos;
+
+ void *private;
+
+ unsigned short ki_opcode;
+ size_t ki_nbytes;
+ char *ki_buf;
+ size_t ki_left;
+ struct iovec ki_inline_vec;
+ struct iovec *ki_iovec;
+ unsigned long ki_nr_segs;
+ unsigned long ki_cur_seg;
+
+ struct list_head ki_list;
+
+
+
+
+
+
+ struct eventfd_ctx *ki_eventfd;
+};
+# 149 "include/linux/aio.h"
+struct aio_ring {
+ unsigned id;
+ unsigned nr;
+ unsigned head;
+ unsigned tail;
+
+ unsigned magic;
+ unsigned compat_features;
+ unsigned incompat_features;
+ unsigned header_length;
+
+
+ struct io_event io_events[0];
+};
+
+
+
+
+struct aio_ring_info {
+ unsigned long mmap_base;
+ unsigned long mmap_size;
+
+ struct page **ring_pages;
+ spinlock_t ring_lock;
+ long nr_pages;
+
+ unsigned nr, tail;
+
+ struct page *internal_pages[8];
+};
+
+struct kioctx {
+ atomic_t users;
+ int dead;
+ struct mm_struct *mm;
+
+
+ unsigned long user_id;
+ struct hlist_node list;
+
+ wait_queue_head_t wait;
+
+ spinlock_t ctx_lock;
+
+ int reqs_active;
+ struct list_head active_reqs;
+ struct list_head run_list;
+
+
+ unsigned max_reqs;
+
+ struct aio_ring_info ring_info;
+
+ struct delayed_work wq;
+
+ struct rcu_head rcu_head;
+};
+
+
+extern unsigned aio_max_size;
+
+
+extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
+extern int aio_put_req(struct kiocb *iocb);
+extern void kick_iocb(struct kiocb *iocb);
+extern int aio_complete(struct kiocb *iocb, long res, long res2);
+struct mm_struct;
+extern void exit_aio(struct mm_struct *mm);
+# 228 "include/linux/aio.h"
+static inline __attribute__((always_inline)) struct kiocb *list_kiocb(struct list_head *h)
+{
+ return ({ const typeof( ((struct kiocb *)0)->ki_list ) *__mptr = (h); (struct kiocb *)( (char *)__mptr - __builtin_offsetof(struct kiocb,ki_list) );});
+}
+
+
+extern unsigned long aio_nr;
+extern unsigned long aio_max_nr;
+# 377 "include/linux/sched.h" 2
+
+extern unsigned long
+arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
+ unsigned long, unsigned long);
+extern unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+extern void arch_unmap_area(struct mm_struct *, unsigned long);
+extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
+# 424 "include/linux/sched.h"
+static inline __attribute__((always_inline)) unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
+{
+ return ({ typeof(mm->hiwater_rss) _max1 = (mm->hiwater_rss); typeof((((mm)->_file_rss) + ((mm)->_anon_rss))) _max2 = ((((mm)->_file_rss) + ((mm)->_anon_rss))); (void) (&_max1 == &_max2); _max1 > _max2 ? _max1 : _max2; });
+}
+
+static inline __attribute__((always_inline)) void setmax_mm_hiwater_rss(unsigned long *maxrss,
+ struct mm_struct *mm)
+{
+ unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
+
+ if (*maxrss < hiwater_rss)
+ *maxrss = hiwater_rss;
+}
+
+static inline __attribute__((always_inline)) unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
+{
+ return ({ typeof(mm->hiwater_vm) _max1 = (mm->hiwater_vm); typeof(mm->total_vm) _max2 = (mm->total_vm); (void) (&_max1 == &_max2); _max1 > _max2 ? _max1 : _max2; });
+}
+
+extern void set_dumpable(struct mm_struct *mm, int value);
+extern int get_dumpable(struct mm_struct *mm);
+# 481 "include/linux/sched.h"
+struct sighand_struct {
+ atomic_t count;
+ struct k_sigaction action[64];
+ spinlock_t siglock;
+ wait_queue_head_t signalfd_wqh;
+};
+
+struct pacct_struct {
+ int ac_flag;
+ long ac_exitcode;
+ unsigned long ac_mem;
+ cputime_t ac_utime, ac_stime;
+ unsigned long ac_minflt, ac_majflt;
+};
+
+struct cpu_itimer {
+ cputime_t expires;
+ cputime_t incr;
+ u32 error;
+ u32 incr_error;
+};
+# 514 "include/linux/sched.h"
+struct task_cputime {
+ cputime_t utime;
+ cputime_t stime;
+ unsigned long long sum_exec_runtime;
+};
+# 550 "include/linux/sched.h"
+struct thread_group_cputimer {
+ struct task_cputime cputime;
+ int running;
+ spinlock_t lock;
+};
+# 563 "include/linux/sched.h"
+struct signal_struct {
+ atomic_t count;
+ atomic_t live;
+
+ wait_queue_head_t wait_chldexit;
+
+
+ struct task_struct *curr_target;
+
+
+ struct sigpending shared_pending;
+
+
+ int group_exit_code;
+
+
+
+
+
+ int notify_count;
+ struct task_struct *group_exit_task;
+
+
+ int group_stop_count;
+ unsigned int flags;
+
+
+ struct list_head posix_timers;
+
+
+ struct hrtimer real_timer;
+ struct pid *leader_pid;
+ ktime_t it_real_incr;
+
+
+
+
+
+
+ struct cpu_itimer it[2];
+
+
+
+
+
+ struct thread_group_cputimer cputimer;
+
+
+ struct task_cputime cputime_expires;
+
+ struct list_head cpu_timers[3];
+
+ struct pid *tty_old_pgrp;
+
+
+ int leader;
+
+ struct tty_struct *tty;
+
+
+
+
+
+
+
+ cputime_t utime, stime, cutime, cstime;
+ cputime_t gtime;
+ cputime_t cgtime;
+ unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
+ unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
+ unsigned long inblock, oublock, cinblock, coublock;
+ unsigned long maxrss, cmaxrss;
+ struct task_io_accounting ioac;
+
+
+
+
+
+
+
+ unsigned long long sum_sched_runtime;
+# 654 "include/linux/sched.h"
+ struct rlimit rlim[16];
+# 663 "include/linux/sched.h"
+ unsigned audit_tty;
+ struct tty_audit_buf *tty_audit_buf;
+
+
+ int oom_adj;
+};
+# 692 "include/linux/sched.h"
+static inline __attribute__((always_inline)) int signal_group_exit(const struct signal_struct *sig)
+{
+ return (sig->flags & 0x00000008) ||
+ (sig->group_exit_task != ((void *)0));
+}
+
+
+
+
+struct user_struct {
+ atomic_t __count;
+ atomic_t processes;
+ atomic_t files;
+ atomic_t sigpending;
+
+ atomic_t inotify_watches;
+ atomic_t inotify_devs;
+
+
+ atomic_t epoll_watches;
+
+
+
+ unsigned long mq_bytes;
+
+ unsigned long locked_shm;
+
+
+ struct key *uid_keyring;
+ struct key *session_keyring;
+
+
+
+ struct hlist_node uidhash_node;
+ uid_t uid;
+ struct user_namespace *user_ns;
+
+
+ struct task_group *tg;
+
+ struct kobject kobj;
+ struct delayed_work work;
+
+
+
+
+
+
+};
+
+extern int uids_sysfs_init(void);
+
+extern struct user_struct *find_user(uid_t);
+
+extern struct user_struct root_user;
+
+
+
+struct backing_dev_info;
+struct reclaim_state;
+
+
+struct sched_info {
+
+ unsigned long pcount;
+ unsigned long long run_delay;
+
+
+ unsigned long long last_arrival,
+ last_queued;
+
+
+ unsigned int bkl_count;
+
+};
+# 803 "include/linux/sched.h"
+static inline __attribute__((always_inline)) int sched_info_on(void)
+{
+
+ return 1;
+
+
+
+
+
+
+}
+
+enum cpu_idle_type {
+ CPU_IDLE,
+ CPU_NOT_IDLE,
+ CPU_NEWLY_IDLE,
+ CPU_MAX_IDLE_TYPES
+};
+# 1033 "include/linux/sched.h"
+struct sched_domain_attr;
+
+static inline __attribute__((always_inline)) void
+partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
+ struct sched_domain_attr *dattr_new)
+{
+}
+
+
+
+struct io_context;
+
+
+
+
+
+static inline __attribute__((always_inline)) void prefetch_stack(struct task_struct *t) { }
+
+
+struct audit_context;
+struct mempolicy;
+struct pipe_inode_info;
+struct uts_namespace;
+
+struct rq;
+struct sched_domain;
+
+
+
+
+
+
+
+struct sched_class {
+ const struct sched_class *next;
+
+ void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
+ void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
+ void (*yield_task) (struct rq *rq);
+
+ void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
+
+ struct task_struct * (*pick_next_task) (struct rq *rq);
+ void (*put_prev_task) (struct rq *rq, struct task_struct *p);
+# 1100 "include/linux/sched.h"
+ void (*set_curr_task) (struct rq *rq);
+ void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
+ void (*task_new) (struct rq *rq, struct task_struct *p);
+
+ void (*switched_from) (struct rq *this_rq, struct task_struct *task,
+ int running);
+ void (*switched_to) (struct rq *this_rq, struct task_struct *task,
+ int running);
+ void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
+ int oldprio, int running);
+
+ unsigned int (*get_rr_interval) (struct task_struct *task);
+
+
+ void (*moved_group) (struct task_struct *p);
+
+};
+
+struct load_weight {
+ unsigned long weight, inv_weight;
+};
+# 1132 "include/linux/sched.h"
+struct sched_entity {
+ struct load_weight load;
+ struct rb_node run_node;
+ struct list_head group_node;
+ unsigned int on_rq;
+
+ u64 exec_start;
+ u64 sum_exec_runtime;
+ u64 vruntime;
+ u64 prev_sum_exec_runtime;
+
+ u64 last_wakeup;
+ u64 avg_overlap;
+
+ u64 nr_migrations;
+
+ u64 start_runtime;
+ u64 avg_wakeup;
+
+ u64 avg_running;
+
+
+ u64 wait_start;
+ u64 wait_max;
+ u64 wait_count;
+ u64 wait_sum;
+ u64 iowait_count;
+ u64 iowait_sum;
+
+ u64 sleep_start;
+ u64 sleep_max;
+ s64 sum_sleep_runtime;
+
+ u64 block_start;
+ u64 block_max;
+ u64 exec_max;
+ u64 slice_max;
+
+ u64 nr_migrations_cold;
+ u64 nr_failed_migrations_affine;
+ u64 nr_failed_migrations_running;
+ u64 nr_failed_migrations_hot;
+ u64 nr_forced_migrations;
+ u64 nr_forced2_migrations;
+
+ u64 nr_wakeups;
+ u64 nr_wakeups_sync;
+ u64 nr_wakeups_migrate;
+ u64 nr_wakeups_local;
+ u64 nr_wakeups_remote;
+ u64 nr_wakeups_affine;
+ u64 nr_wakeups_affine_attempts;
+ u64 nr_wakeups_passive;
+ u64 nr_wakeups_idle;
+
+
+
+ struct sched_entity *parent;
+
+ struct cfs_rq *cfs_rq;
+
+ struct cfs_rq *my_q;
+
+};
+
+struct sched_rt_entity {
+ struct list_head run_list;
+ unsigned long timeout;
+ unsigned int time_slice;
+ int nr_cpus_allowed;
+
+ struct sched_rt_entity *back;
+
+
+
+
+
+
+
+};
+
+struct rcu_node;
+
+struct task_struct {
+ volatile long state;
+ void *stack;
+ atomic_t usage;
+ unsigned int flags;
+ unsigned int ptrace;
+
+ int lock_depth;
+
+
+
+
+
+
+
+ int prio, static_prio, normal_prio;
+ unsigned int rt_priority;
+ const struct sched_class *sched_class;
+ struct sched_entity se;
+ struct sched_rt_entity rt;
+# 1249 "include/linux/sched.h"
+ unsigned char fpu_counter;
+
+
+
+
+ unsigned int policy;
+ cpumask_t cpus_allowed;
+# 1265 "include/linux/sched.h"
+ struct sched_info sched_info;
+
+
+ struct list_head tasks;
+ struct plist_node pushable_tasks;
+
+ struct mm_struct *mm, *active_mm;
+
+
+ int exit_state;
+ int exit_code, exit_signal;
+ int pdeath_signal;
+
+ unsigned int personality;
+ unsigned did_exec:1;
+ unsigned in_execve:1;
+
+ unsigned in_iowait:1;
+
+
+
+ unsigned sched_reset_on_fork:1;
+
+ pid_t pid;
+ pid_t tgid;
+# 1301 "include/linux/sched.h"
+ struct task_struct *real_parent;
+ struct task_struct *parent;
+
+
+
+ struct list_head children;
+ struct list_head sibling;
+ struct task_struct *group_leader;
+
+
+
+
+
+
+ struct list_head ptraced;
+ struct list_head ptrace_entry;
+
+
+
+
+
+ struct bts_context *bts;
+
+
+ struct pid_link pids[PIDTYPE_MAX];
+ struct list_head thread_group;
+
+ struct completion *vfork_done;
+ int *set_child_tid;
+ int *clear_child_tid;
+
+ cputime_t utime, stime, utimescaled, stimescaled;
+ cputime_t gtime;
+ cputime_t prev_utime, prev_stime;
+ unsigned long nvcsw, nivcsw;
+ struct timespec start_time;
+ struct timespec real_start_time;
+
+ unsigned long min_flt, maj_flt;
+
+ struct task_cputime cputime_expires;
+ struct list_head cpu_timers[3];
+
+
+ const struct cred *real_cred;
+
+ const struct cred *cred;
+
+ struct mutex cred_guard_mutex;
+
+
+ struct cred *replacement_session_keyring;
+
+ char comm[16];
+
+
+
+
+ int link_count, total_link_count;
+
+
+
+
+
+
+ unsigned long last_switch_count;
+
+
+ struct thread_struct thread;
+
+ struct fs_struct *fs;
+
+ struct files_struct *files;
+
+ struct nsproxy *nsproxy;
+
+ struct signal_struct *signal;
+ struct sighand_struct *sighand;
+
+ sigset_t blocked, real_blocked;
+ sigset_t saved_sigmask;
+ struct sigpending pending;
+
+ unsigned long sas_ss_sp;
+ size_t sas_ss_size;
+ int (*notifier)(void *priv);
+ void *notifier_data;
+ sigset_t *notifier_mask;
+ struct audit_context *audit_context;
+
+
+
+
+ seccomp_t seccomp;
+
+
+ u32 parent_exec_id;
+ u32 self_exec_id;
+
+
+ spinlock_t alloc_lock;
+
+
+
+ struct irqaction *irqaction;
+
+
+
+ spinlock_t pi_lock;
+
+
+
+ struct plist_head pi_waiters;
+
+ struct rt_mutex_waiter *pi_blocked_on;
+
+
+
+
+ struct mutex_waiter *blocked_on;
+# 1439 "include/linux/sched.h"
+ u64 curr_chain_key;
+ int lockdep_depth;
+ unsigned int lockdep_recursion;
+ struct held_lock held_locks[48UL];
+ gfp_t lockdep_reclaim_gfp;
+
+
+
+ void *journal_info;
+
+
+ struct bio *bio_list, **bio_tail;
+
+
+ struct reclaim_state *reclaim_state;
+
+ struct backing_dev_info *backing_dev_info;
+
+ struct io_context *io_context;
+
+ unsigned long ptrace_message;
+ siginfo_t *last_siginfo;
+ struct task_io_accounting ioac;
+# 1478 "include/linux/sched.h"
+ struct robust_list_head *robust_list;
+
+
+
+ struct list_head pi_state_list;
+ struct futex_pi_state *pi_state_cache;
+# 1494 "include/linux/sched.h"
+ atomic_t fs_excl;
+ struct rcu_head rcu;
+
+
+
+
+ struct pipe_inode_info *splice_pipe;
+
+
+
+
+ int make_it_fail;
+
+ struct prop_local_single dirties;
+# 1516 "include/linux/sched.h"
+ unsigned long timer_slack_ns;
+ unsigned long default_timer_slack_ns;
+
+ struct list_head *scm_work_list;
+# 1537 "include/linux/sched.h"
+ unsigned long trace;
+
+ unsigned long trace_recursion;
+
+ unsigned long stack_start;
+};
+# 1566 "include/linux/sched.h"
+static inline __attribute__((always_inline)) int rt_prio(int prio)
+{
+ if (__builtin_expect(!!(prio < 100), 0))
+ return 1;
+ return 0;
+}
+
+static inline __attribute__((always_inline)) int rt_task(struct task_struct *p)
+{
+ return rt_prio(p->prio);
+}
+
+static inline __attribute__((always_inline)) struct pid *task_pid(struct task_struct *task)
+{
+ return task->pids[PIDTYPE_PID].pid;
+}
+
+static inline __attribute__((always_inline)) struct pid *task_tgid(struct task_struct *task)
+{
+ return task->group_leader->pids[PIDTYPE_PID].pid;
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) struct pid *task_pgrp(struct task_struct *task)
+{
+ return task->group_leader->pids[PIDTYPE_PGID].pid;
+}
+
+static inline __attribute__((always_inline)) struct pid *task_session(struct task_struct *task)
+{
+ return task->group_leader->pids[PIDTYPE_SID].pid;
+}
+
+struct pid_namespace;
+# 1618 "include/linux/sched.h"
+pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
+ struct pid_namespace *ns);
+
+static inline __attribute__((always_inline)) pid_t task_pid_nr(struct task_struct *tsk)
+{
+ return tsk->pid;
+}
+
+static inline __attribute__((always_inline)) pid_t task_pid_nr_ns(struct task_struct *tsk,
+ struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
+}
+
+static inline __attribute__((always_inline)) pid_t task_pid_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PID, ((void *)0));
+}
+
+
+static inline __attribute__((always_inline)) pid_t task_tgid_nr(struct task_struct *tsk)
+{
+ return tsk->tgid;
+}
+
+pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
+
+static inline __attribute__((always_inline)) pid_t task_tgid_vnr(struct task_struct *tsk)
+{
+ return pid_vnr(task_tgid(tsk));
+}
+
+
+static inline __attribute__((always_inline)) pid_t task_pgrp_nr_ns(struct task_struct *tsk,
+ struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
+}
+
+static inline __attribute__((always_inline)) pid_t task_pgrp_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ((void *)0));
+}
+
+
+static inline __attribute__((always_inline)) pid_t task_session_nr_ns(struct task_struct *tsk,
+ struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
+}
+
+static inline __attribute__((always_inline)) pid_t task_session_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_SID, ((void *)0));
+}
+
+
+static inline __attribute__((always_inline)) pid_t task_pgrp_nr(struct task_struct *tsk)
+{
+ return task_pgrp_nr_ns(tsk, &init_pid_ns);
+}
+# 1688 "include/linux/sched.h"
+static inline __attribute__((always_inline)) int pid_alive(struct task_struct *p)
+{
+ return p->pids[PIDTYPE_PID].pid != ((void *)0);
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int is_global_init(struct task_struct *tsk)
+{
+ return tsk->pid == 1;
+}
+
+
+
+
+
+extern int is_container_init(struct task_struct *tsk);
+
+extern struct pid *cad_pid;
+
+extern void free_task(struct task_struct *tsk);
+
+
+extern void __put_task_struct(struct task_struct *t);
+
+static inline __attribute__((always_inline)) void put_task_struct(struct task_struct *t)
+{
+ if ((atomic_sub_return(1, (&t->usage)) == 0))
+ __put_task_struct(t);
+}
+
+extern cputime_t task_utime(struct task_struct *p);
+extern cputime_t task_stime(struct task_struct *p);
+extern cputime_t task_gtime(struct task_struct *p);
+# 1803 "include/linux/sched.h"
+static inline __attribute__((always_inline)) void rcu_copy_process(struct task_struct *p)
+{
+}
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask)
+{
+ if (!test_bit(cpumask_check(0), (((new_mask))->bits)))
+ return -22;
+ return 0;
+}
+
+
+
+static inline __attribute__((always_inline)) int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
+{
+ return set_cpus_allowed_ptr(p, &new_mask);
+}
+# 1839 "include/linux/sched.h"
+extern unsigned long long sched_clock(void);
+
+extern void sched_clock_init(void);
+extern u64 sched_clock_cpu(int cpu);
+
+
+static inline __attribute__((always_inline)) void sched_clock_tick(void)
+{
+}
+
+static inline __attribute__((always_inline)) void sched_clock_idle_sleep_event(void)
+{
+}
+
+static inline __attribute__((always_inline)) void sched_clock_idle_wakeup_event(u64 delta_ns)
+{
+}
+# 1866 "include/linux/sched.h"
+extern unsigned long long cpu_clock(int cpu);
+
+extern unsigned long long
+task_sched_runtime(struct task_struct *task);
+extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
+# 1879 "include/linux/sched.h"
+extern void sched_clock_idle_sleep_event(void);
+extern void sched_clock_idle_wakeup_event(u64 delta_ns);
+
+
+
+
+static inline __attribute__((always_inline)) void idle_task_exit(void) {}
+
+
+extern void sched_idle_next(void);
+
+
+
+
+static inline __attribute__((always_inline)) void wake_up_idle_cpu(int cpu) { }
+
+
+extern unsigned int sysctl_sched_latency;
+extern unsigned int sysctl_sched_min_granularity;
+extern unsigned int sysctl_sched_wakeup_granularity;
+extern unsigned int sysctl_sched_shares_ratelimit;
+extern unsigned int sysctl_sched_shares_thresh;
+extern unsigned int sysctl_sched_child_runs_first;
+# 1919 "include/linux/sched.h"
+static inline __attribute__((always_inline)) unsigned int get_sysctl_timer_migration(void)
+{
+ return 1;
+}
+
+extern unsigned int sysctl_sched_rt_period;
+extern int sysctl_sched_rt_runtime;
+
+int sched_rt_handler(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp,
+ loff_t *ppos);
+
+extern unsigned int sysctl_sched_compat_yield;
+
+
+extern int rt_mutex_getprio(struct task_struct *p);
+extern void rt_mutex_setprio(struct task_struct *p, int prio);
+extern void rt_mutex_adjust_pi(struct task_struct *p);
+# 1945 "include/linux/sched.h"
+extern void set_user_nice(struct task_struct *p, long nice);
+extern int task_prio(const struct task_struct *p);
+extern int task_nice(const struct task_struct *p);
+extern int can_nice(const struct task_struct *p, const int nice);
+extern int task_curr(const struct task_struct *p);
+extern int idle_cpu(int cpu);
+extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
+extern int sched_setscheduler_nocheck(struct task_struct *, int,
+ struct sched_param *);
+extern struct task_struct *idle_task(int cpu);
+extern struct task_struct *curr_task(int cpu);
+extern void set_curr_task(int cpu, struct task_struct *p);
+
+void yield(void);
+
+
+
+
+extern struct exec_domain default_exec_domain;
+
+union thread_union {
+ struct thread_info thread_info;
+ unsigned long stack[8192/sizeof(long)];
+};
+
+
+static inline __attribute__((always_inline)) int kstack_end(void *addr)
+{
+
+
+
+ return !(((unsigned long)addr+sizeof(void*)-1) & (8192 -sizeof(void*)));
+}
+
+
+extern union thread_union init_thread_union;
+extern struct task_struct init_task;
+
+extern struct mm_struct init_mm;
+
+extern struct pid_namespace init_pid_ns;
+# 1998 "include/linux/sched.h"
+extern struct task_struct *find_task_by_vpid(pid_t nr);
+extern struct task_struct *find_task_by_pid_ns(pid_t nr,
+ struct pid_namespace *ns);
+
+extern void __set_special_pids(struct pid *pid);
+
+
+extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
+static inline __attribute__((always_inline)) struct user_struct *get_uid(struct user_struct *u)
+{
+ atomic_inc(&u->__count);
+ return u;
+}
+extern void free_uid(struct user_struct *);
+extern void release_uids(struct user_namespace *ns);
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/current.h" 1
+# 2015 "include/linux/sched.h" 2
+
+extern void do_timer(unsigned long ticks);
+
+extern int wake_up_state(struct task_struct *tsk, unsigned int state);
+extern int wake_up_process(struct task_struct *tsk);
+extern void wake_up_new_task(struct task_struct *tsk,
+ unsigned long clone_flags);
+
+
+
+ static inline __attribute__((always_inline)) void kick_process(struct task_struct *tsk) { }
+
+extern void sched_fork(struct task_struct *p, int clone_flags);
+extern void sched_dead(struct task_struct *p);
+
+extern void proc_caches_init(void);
+extern void flush_signals(struct task_struct *);
+extern void __flush_signals(struct task_struct *);
+extern void ignore_signals(struct task_struct *);
+extern void flush_signal_handlers(struct task_struct *, int force_default);
+extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
+
+static inline __attribute__((always_inline)) int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+{
+ unsigned long flags;
+ int ret;
+
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _spin_lock_irqsave(&tsk->sighand->siglock); } while (0);
+ ret = dequeue_signal(tsk, mask, info);
+ do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _spin_unlock_irqrestore(&tsk->sighand->siglock, flags); } while (0);
+
+ return ret;
+}
+
+extern void block_all_signals(int (*notifier)(void *priv), void *priv,
+ sigset_t *mask);
+extern void unblock_all_signals(void);
+extern void release_task(struct task_struct * p);
+extern int send_sig_info(int, struct siginfo *, struct task_struct *);
+extern int force_sigsegv(int, struct task_struct *);
+extern int force_sig_info(int, struct siginfo *, struct task_struct *);
+extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
+extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
+extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
+extern int kill_pgrp(struct pid *pid, int sig, int priv);
+extern int kill_pid(struct pid *pid, int sig, int priv);
+extern int kill_proc_info(int, struct siginfo *, pid_t);
+extern int do_notify_parent(struct task_struct *, int);
+extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
+extern void force_sig(int, struct task_struct *);
+extern void force_sig_specific(int, struct task_struct *);
+extern int send_sig(int, struct task_struct *, int);
+extern void zap_other_threads(struct task_struct *p);
+extern struct sigqueue *sigqueue_alloc(void);
+extern void sigqueue_free(struct sigqueue *);
+extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
+extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
+extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long);
+
+static inline __attribute__((always_inline)) int kill_cad_pid(int sig, int priv)
+{
+ return kill_pid(cad_pid, sig, priv);
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int is_si_special(const struct siginfo *info)
+{
+ return info <= ((struct siginfo *) 2);
+}
+
+
+
+static inline __attribute__((always_inline)) int on_sig_stack(unsigned long sp)
+{
+ return (sp - (current_thread_info()->task)->sas_ss_sp < (current_thread_info()->task)->sas_ss_size);
+}
+
+static inline __attribute__((always_inline)) int sas_ss_flags(unsigned long sp)
+{
+ return ((current_thread_info()->task)->sas_ss_size == 0 ? 2
+ : on_sig_stack(sp) ? 1 : 0);
+}
+
+
+
+
+extern struct mm_struct * mm_alloc(void);
+
+
+extern void __mmdrop(struct mm_struct *);
+static inline __attribute__((always_inline)) void mmdrop(struct mm_struct * mm)
+{
+ if (__builtin_expect(!!((atomic_sub_return(1, (&mm->mm_count)) == 0)), 0))
+ __mmdrop(mm);
+}
+
+
+extern void mmput(struct mm_struct *);
+
+extern struct mm_struct *get_task_mm(struct task_struct *task);
+
+extern void mm_release(struct task_struct *, struct mm_struct *);
+
+extern struct mm_struct *dup_mm(struct task_struct *tsk);
+
+extern int copy_thread(unsigned long, unsigned long, unsigned long,
+ struct task_struct *, struct pt_regs *);
+extern void flush_thread(void);
+extern void exit_thread(void);
+
+extern void exit_files(struct task_struct *);
+extern void __cleanup_signal(struct signal_struct *);
+extern void __cleanup_sighand(struct sighand_struct *);
+
+extern void exit_itimers(struct signal_struct *);
+extern void flush_itimer_signals(void);
+
+extern void do_group_exit(int);
+
+extern void daemonize(const char *, ...);
+extern int allow_signal(int);
+extern int disallow_signal(int);
+
+extern int do_execve(char *, char * *, char * *, struct pt_regs *);
+extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int *, int *);
+struct task_struct *fork_idle(int);
+
+extern void set_task_comm(struct task_struct *tsk, char *from);
+extern char *get_task_comm(char *to, struct task_struct *tsk);
+
+
+
+
+
+static inline __attribute__((always_inline)) void wait_task_context_switch(struct task_struct *p) {}
+static inline __attribute__((always_inline)) unsigned long wait_task_inactive(struct task_struct *p,
+ long match_state)
+{
+ return 1;
+}
+# 2167 "include/linux/sched.h"
+extern bool current_is_single_threaded(void);
+# 2188 "include/linux/sched.h"
+static inline __attribute__((always_inline)) int has_group_leader_pid(struct task_struct *p)
+{
+ return p->pid == p->tgid;
+}
+
+static inline __attribute__((always_inline))
+int same_thread_group(struct task_struct *p1, struct task_struct *p2)
+{
+ return p1->tgid == p2->tgid;
+}
+
+static inline __attribute__((always_inline)) struct task_struct *next_thread(const struct task_struct *p)
+{
+ return ({ const typeof( ((struct task_struct *)0)->thread_group ) *__mptr = (({ typeof(p->thread_group.next) _________p1 = (*(volatile typeof(p->thread_group.next) *)&(p->thread_group.next)); do { } while(0); (_________p1); })); (struct task_struct *)( (char *)__mptr - __builtin_offsetof(struct task_struct,thread_group) );});
+
+}
+
+static inline __attribute__((always_inline)) int thread_group_empty(struct task_struct *p)
+{
+ return list_empty(&p->thread_group);
+}
+
+
+
+
+static inline __attribute__((always_inline)) int task_detached(struct task_struct *p)
+{
+ return p->exit_signal == -1;
+}
+# 2228 "include/linux/sched.h"
+static inline __attribute__((always_inline)) void task_lock(struct task_struct *p)
+{
+ _spin_lock(&p->alloc_lock);
+}
+
+static inline __attribute__((always_inline)) void task_unlock(struct task_struct *p)
+{
+ _spin_unlock(&p->alloc_lock);
+}
+
+extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
+ unsigned long *flags);
+
+static inline __attribute__((always_inline)) void unlock_task_sighand(struct task_struct *tsk,
+ unsigned long *flags)
+{
+ do { ({ unsigned long __dummy; typeof(*flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); } while (0);
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void setup_thread_stack(struct task_struct *p, struct task_struct *org)
+{
+ *((struct thread_info *)(p)->stack) = *((struct thread_info *)(org)->stack);
+ ((struct thread_info *)(p)->stack)->task = p;
+}
+
+static inline __attribute__((always_inline)) unsigned long *end_of_stack(struct task_struct *p)
+{
+ return (unsigned long *)(((struct thread_info *)(p)->stack) + 1);
+}
+
+
+
+static inline __attribute__((always_inline)) int object_is_on_stack(void *obj)
+{
+ void *stack = (((current_thread_info()->task))->stack);
+
+ return (obj >= stack) && (obj < (stack + 8192));
+}
+
+extern void thread_info_cache_init(void);
+
+
+static inline __attribute__((always_inline)) unsigned long stack_not_used(struct task_struct *p)
+{
+ unsigned long *n = end_of_stack(p);
+
+ do {
+ n++;
+ } while (!*n);
+
+ return (unsigned long)n - (unsigned long)end_of_stack(p);
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) void set_tsk_thread_flag(struct task_struct *tsk, int flag)
+{
+ set_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
+}
+
+static inline __attribute__((always_inline)) void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
+{
+ clear_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
+}
+
+static inline __attribute__((always_inline)) int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
+{
+ return test_and_set_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
+}
+
+static inline __attribute__((always_inline)) int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
+{
+ return test_and_clear_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
+}
+
+static inline __attribute__((always_inline)) int test_tsk_thread_flag(struct task_struct *tsk, int flag)
+{
+ return test_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
+}
+
+static inline __attribute__((always_inline)) void set_tsk_need_resched(struct task_struct *tsk)
+{
+ set_tsk_thread_flag(tsk,2);
+}
+
+static inline __attribute__((always_inline)) void clear_tsk_need_resched(struct task_struct *tsk)
+{
+ clear_tsk_thread_flag(tsk,2);
+}
+
+static inline __attribute__((always_inline)) int test_tsk_need_resched(struct task_struct *tsk)
+{
+ return __builtin_expect(!!(test_tsk_thread_flag(tsk,2)), 0);
+}
+
+static inline __attribute__((always_inline)) int restart_syscall(void)
+{
+ set_tsk_thread_flag((current_thread_info()->task), 1);
+ return -513;
+}
+
+static inline __attribute__((always_inline)) int signal_pending(struct task_struct *p)
+{
+ return __builtin_expect(!!(test_tsk_thread_flag(p,1)), 0);
+}
+
+static inline __attribute__((always_inline)) int __fatal_signal_pending(struct task_struct *p)
+{
+ return __builtin_expect(!!(sigismember(&p->pending.signal, 9)), 0);
+}
+
+static inline __attribute__((always_inline)) int fatal_signal_pending(struct task_struct *p)
+{
+ return signal_pending(p) && __fatal_signal_pending(p);
+}
+
+static inline __attribute__((always_inline)) int signal_pending_state(long state, struct task_struct *p)
+{
+ if (!(state & (1 | 128)))
+ return 0;
+ if (!signal_pending(p))
+ return 0;
+
+ return (state & 1) || __fatal_signal_pending(p);
+}
+
+static inline __attribute__((always_inline)) int need_resched(void)
+{
+ return __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 2)), 0);
+}
+# 2373 "include/linux/sched.h"
+extern int _cond_resched(void);
+
+
+
+
+
+
+extern int __cond_resched_lock(spinlock_t *lock);
+# 2393 "include/linux/sched.h"
+extern int __cond_resched_softirq(void);
+# 2405 "include/linux/sched.h"
+static inline __attribute__((always_inline)) int spin_needbreak(spinlock_t *lock)
+{
+
+
+
+ return 0;
+
+}
+
+
+
+
+void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
+void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
+
+static inline __attribute__((always_inline)) void thread_group_cputime_init(struct signal_struct *sig)
+{
+ sig->cputimer.cputime = (struct task_cputime) { .utime = (0UL), .stime = (0UL), .sum_exec_runtime = 0, };
+ do { static struct lock_class_key __key; __spin_lock_init((&sig->cputimer.lock), "&sig->cputimer.lock", &__key); } while (0);
+ sig->cputimer.running = 0;
+}
+
+static inline __attribute__((always_inline)) void thread_group_cputime_free(struct signal_struct *sig)
+{
+}
+
+
+
+
+
+
+
+extern void recalc_sigpending_and_wake(struct task_struct *t);
+extern void recalc_sigpending(void);
+
+extern void signal_wake_up(struct task_struct *t, int resume_stopped);
+# 2456 "include/linux/sched.h"
+static inline __attribute__((always_inline)) unsigned int task_cpu(const struct task_struct *p)
+{
+ return 0;
+}
+
+static inline __attribute__((always_inline)) void set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
+}
+
+
+
+extern void arch_pick_mmap_layout(struct mm_struct *mm);
+
+
+extern void
+__trace_special(void *__tr, void *__data,
+ unsigned long arg1, unsigned long arg2, unsigned long arg3);
+# 2481 "include/linux/sched.h"
+extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
+extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
+
+extern void normalize_rt_tasks(void);
+
+
+
+extern struct task_group init_task_group;
+
+extern struct task_group root_task_group;
+extern void set_tg_uid(struct user_struct *user);
+
+
+extern struct task_group *sched_create_group(struct task_group *parent);
+extern void sched_destroy_group(struct task_group *tg);
+extern void sched_move_task(struct task_struct *tsk);
+
+extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
+extern unsigned long sched_group_shares(struct task_group *tg);
+# 2512 "include/linux/sched.h"
+extern int task_can_switch_user(struct user_struct *up,
+ struct task_struct *tsk);
+# 2536 "include/linux/sched.h"
+static inline __attribute__((always_inline)) void add_rchar(struct task_struct *tsk, ssize_t amt)
+{
+}
+
+static inline __attribute__((always_inline)) void add_wchar(struct task_struct *tsk, ssize_t amt)
+{
+}
+
+static inline __attribute__((always_inline)) void inc_syscr(struct task_struct *tsk)
+{
+}
+
+static inline __attribute__((always_inline)) void inc_syscw(struct task_struct *tsk)
+{
+}
+# 2560 "include/linux/sched.h"
+extern void task_oncpu_function_call(struct task_struct *p,
+ void (*func) (void *info), void *info);
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void mm_update_next_owner(struct mm_struct *mm)
+{
+}
+
+static inline __attribute__((always_inline)) void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
+{
+}
+# 16 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h" 2
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/segment.h" 1
+# 20 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/sections.h" 1
+# 11 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/sections.h"
+extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size;
+
+extern unsigned long _ramstart, _ramend, _rambase;
+extern unsigned long memory_start, memory_end, physical_mem_end;
+
+
+
+
+
+
+extern char _stext_l1[], _etext_l1[], _text_l1_lma[], __attribute__((weak)) _text_l1_len[];
+extern char _sdata_l1[], _edata_l1[], _sbss_l1[], _ebss_l1[],
+ _data_l1_lma[], __attribute__((weak)) _data_l1_len[];
+extern char _sdata_b_l1[], _edata_b_l1[], _sbss_b_l1[], _ebss_b_l1[],
+ _data_b_l1_lma[], __attribute__((weak)) _data_b_l1_len[];
+extern char _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[],
+ _sbss_l2[], _ebss_l2[], _l2_lma[], __attribute__((weak)) _l2_len[];
+
+
+
+
+static inline __attribute__((always_inline)) int arch_is_kernel_text(unsigned long addr)
+{
+ return
+ ((0x14000 - 0x4000) &&
+ addr >= (unsigned long)_stext_l1 &&
+ addr < (unsigned long)_etext_l1)
+ ||
+ (0 &&
+ addr >= (unsigned long)_stext_l2 &&
+ addr < (unsigned long)_etext_l2);
+}
+
+
+static inline __attribute__((always_inline)) int arch_is_kernel_data(unsigned long addr)
+{
+ return
+ ((0x8000 - 0x4000) &&
+ addr >= (unsigned long)_sdata_l1 &&
+ addr < (unsigned long)_ebss_l1)
+ ||
+ (0x8000 &&
+ addr >= (unsigned long)_sdata_b_l1 &&
+ addr < (unsigned long)_ebss_b_l1)
+ ||
+ (0 &&
+ addr >= (unsigned long)_sdata_l2 &&
+ addr < (unsigned long)_ebss_l2);
+}
+
+
+# 1 "include/asm-generic/sections.h" 1
+
+
+
+
+
+extern char _text[], _stext[], _etext[];
+extern char _data[], _sdata[], _edata[];
+extern char __bss_start[], __bss_stop[];
+extern char __init_begin[], __init_end[];
+extern char _sinittext[], _einittext[];
+extern char _end[];
+extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
+extern char __kprobes_text_start[], __kprobes_text_end[];
+extern char __initdata_begin[], __initdata_end[];
+extern char __start_rodata[], __end_rodata[];
+
+
+extern char __ctors_start[], __ctors_end[];
+# 63 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/sections.h" 2
+# 21 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h" 2
+
+
+
+
+static inline __attribute__((always_inline)) void set_fs(mm_segment_t fs)
+{
+ current_thread_info()->addr_limit = fs;
+}
+# 37 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h"
+static inline __attribute__((always_inline)) int is_in_rom(unsigned long addr)
+{
+# 47 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h"
+ if ((addr < _ramstart) || (addr >= _ramend))
+ return (1);
+
+
+ return (0);
+}
+# 63 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h"
+extern int _access_ok(unsigned long addr, unsigned long size);
+# 79 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h"
+struct exception_table_entry {
+ unsigned long insn, fixup;
+};
+# 123 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h"
+static inline __attribute__((always_inline)) int bad_user_access_length(void)
+{
+ panic("bad_user_access_length");
+ return -1;
+}
+# 194 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h"
+static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result))
+copy_from_user(void *to, const void *from, unsigned long n)
+{
+ if (_access_ok((unsigned long)(from), (n)))
+ memcpy(to, from, n);
+ else
+ return n;
+ return 0;
+}
+
+static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result))
+copy_to_user(void *to, const void *from, unsigned long n)
+{
+ if (_access_ok((unsigned long)(to), (n)))
+ memcpy(to, from, n);
+ else
+ return n;
+ return 0;
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) long __attribute__((warn_unused_result))
+strncpy_from_user(char *dst, const char *src, long count)
+{
+ char *tmp;
+ if (!_access_ok((unsigned long)(src), (1)))
+ return -14;
+ strncpy(dst, src, count);
+ for (tmp = dst; *tmp && count > 0; tmp++, count--) ;
+ return (tmp - dst);
+}
+# 240 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h"
+static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) strnlen_user(const char *src, long n)
+{
+ if (!_access_ok((unsigned long)(src), (1)))
+ return 0;
+ return strnlen(src, n) + 1;
+}
+
+static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) strlen_user(const char *src)
+{
+ if (!_access_ok((unsigned long)(src), (1)))
+ return 0;
+ return strlen(src) + 1;
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result))
+__clear_user(void *to, unsigned long n)
+{
+ if (!_access_ok((unsigned long)(to), (n)))
+ return n;
+ memset(to, 0, n);
+ return 0;
+}
+# 276 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/uaccess.h"
+enum {
+ BFIN_MEM_ACCESS_CORE = 0,
+ BFIN_MEM_ACCESS_CORE_ONLY,
+ BFIN_MEM_ACCESS_DMA,
+ BFIN_MEM_ACCESS_IDMA,
+ BFIN_MEM_ACCESS_ITEST,
+};
+
+
+
+
+
+
+int bfin_mem_access_type(unsigned long addr, unsigned long size);
+# 6 "include/linux/uaccess.h" 2
+# 16 "include/linux/uaccess.h"
+static inline __attribute__((always_inline)) void pagefault_disable(void)
+{
+ do { (current_thread_info()->preempt_count) += (1); } while (0);
+
+
+
+
+ __asm__ __volatile__("": : :"memory");
+}
+
+static inline __attribute__((always_inline)) void pagefault_enable(void)
+{
+
+
+
+
+ __asm__ __volatile__("": : :"memory");
+ do { (current_thread_info()->preempt_count) -= (1); } while (0);
+
+
+
+ __asm__ __volatile__("": : :"memory");
+ do { } while (0);
+}
+
+
+
+static inline __attribute__((always_inline)) unsigned long __copy_from_user_inatomic_nocache(void *to,
+ const void *from, unsigned long n)
+{
+ return copy_from_user(to, from, n);
+}
+
+static inline __attribute__((always_inline)) unsigned long __copy_from_user_nocache(void *to,
+ const void *from, unsigned long n)
+{
+ return copy_from_user(to, from, n);
+}
+# 96 "include/linux/uaccess.h"
+extern long probe_kernel_read(void *dst, void *src, size_t size);
+# 107 "include/linux/uaccess.h"
+extern long probe_kernel_write(void *dst, void *src, size_t size);
+# 7 "include/linux/highmem.h" 2
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cacheflush.h" 1
+# 15 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cacheflush.h"
+extern void blackfin_icache_flush_range(unsigned long start_address, unsigned long end_address);
+extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address);
+extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address);
+extern void blackfin_dflush_page(void *page);
+extern void blackfin_invalidate_entire_dcache(void);
+extern void blackfin_invalidate_entire_icache(void);
+# 37 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cacheflush.h"
+static inline __attribute__((always_inline)) void flush_icache_range(unsigned start, unsigned end)
+{
+# 56 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cacheflush.h"
+ SSYNC();
+
+ if (end <= physical_mem_end) {
+ blackfin_icache_flush_range(start, end);
+ do { } while (0);
+ }
+
+
+
+
+
+
+
+}
+# 91 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/cacheflush.h"
+extern unsigned long reserved_mem_dcache_on;
+extern unsigned long reserved_mem_icache_on;
+
+static inline __attribute__((always_inline)) int bfin_addr_dcacheable(unsigned long addr)
+{
+
+ if (addr < (_ramend - (1024 * 1024)))
+ return 1;
+
+
+ if (reserved_mem_dcache_on &&
+ addr >= _ramend && addr < physical_mem_end)
+ return 1;
+
+
+
+
+
+
+ return 0;
+}
+# 9 "include/linux/highmem.h" 2
+
+
+static inline __attribute__((always_inline)) void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
+{
+}
+
+
+
+static inline __attribute__((always_inline)) void flush_kernel_dcache_page(struct page *page)
+{
+}
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/kmap_types.h" 1
+# 1 "include/asm-generic/kmap_types.h" 1
+# 10 "include/asm-generic/kmap_types.h"
+enum km_type {
+ KM_BOUNCE_READ,
+ KM_SKB_SUNRPC_DATA,
+ KM_SKB_DATA_SOFTIRQ,
+ KM_USER0,
+ KM_USER1,
+ KM_BIO_SRC_IRQ,
+ KM_BIO_DST_IRQ,
+ KM_PTE0,
+ KM_PTE1,
+ KM_IRQ0,
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
+ KM_SYNC_ICACHE,
+ KM_SYNC_DCACHE,
+
+ KM_UML_USERCOPY,
+ KM_IRQ_PTE,
+ KM_NMI,
+ KM_NMI_PTE,
+ KM_TYPE_NR
+};
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/kmap_types.h" 2
+# 23 "include/linux/highmem.h" 2
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void debug_kmap_atomic(enum km_type type)
+{
+}
+# 47 "include/linux/highmem.h"
+static inline __attribute__((always_inline)) unsigned int nr_free_highpages(void) { return 0; }
+
+
+
+
+static inline __attribute__((always_inline)) void *kmap(struct page *page)
+{
+ do { __might_sleep("include/linux/highmem.h", 54, 0); _cond_resched(); } while (0);
+ return lowmem_page_address(page);
+}
+
+static inline __attribute__((always_inline)) void kunmap(struct page *page)
+{
+}
+
+static inline __attribute__((always_inline)) void *kmap_atomic(struct page *page, enum km_type idx)
+{
+ pagefault_disable();
+ return lowmem_page_address(page);
+}
+# 80 "include/linux/highmem.h"
+static inline __attribute__((always_inline)) void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+ void *addr = kmap_atomic(page, KM_USER0);
+ memset((addr), 0, (1UL << 12));
+ do { pagefault_enable(); } while (0);
+}
+# 103 "include/linux/highmem.h"
+static inline __attribute__((always_inline)) struct page *
+__alloc_zeroed_user_highpage(gfp_t movableflags,
+ struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ struct page *page = alloc_pages_node((((void)(0),0)), ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u) | (( gfp_t)0x20000u) | (( gfp_t)0x02u)) | movableflags, 0);
+
+
+ if (page)
+ clear_user_highpage(page, vaddr);
+
+ return page;
+}
+# 126 "include/linux/highmem.h"
+static inline __attribute__((always_inline)) struct page *
+alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ return __alloc_zeroed_user_highpage((( gfp_t)0x08u), vma, vaddr);
+}
+
+static inline __attribute__((always_inline)) void clear_highpage(struct page *page)
+{
+ void *kaddr = kmap_atomic(page, KM_USER0);
+ memset((kaddr), 0, (1UL << 12));
+ do { pagefault_enable(); } while (0);
+}
+
+static inline __attribute__((always_inline)) void zero_user_segments(struct page *page,
+ unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2)
+{
+ void *kaddr = kmap_atomic(page, KM_USER0);
+
+ do { if (__builtin_expect(!!(end1 > (1UL << 12) || end2 > (1UL << 12)), 0)) do { asm volatile( "1: .hword %0\n" " .section __bug_table,\"a\",@progbits\n" "2: .long 1b\n" " .long %1\n" " .short %2\n" " .short %3\n" " .org 2b + %4\n" " .previous" : : "i"(0xefcd), "i"("include/linux/highmem.h"), "i"(146), "i"(0), "i"(sizeof(struct bug_entry))); for (;;); } while (0); } while(0);
+
+ if (end1 > start1)
+ memset(kaddr + start1, 0, end1 - start1);
+
+ if (end2 > start2)
+ memset(kaddr + start2, 0, end2 - start2);
+
+ do { pagefault_enable(); } while (0);
+ do { } while (0);
+}
+
+static inline __attribute__((always_inline)) void zero_user_segment(struct page *page,
+ unsigned start, unsigned end)
+{
+ zero_user_segments(page, start, end, 0, 0);
+}
+
+static inline __attribute__((always_inline)) void zero_user(struct page *page,
+ unsigned start, unsigned size)
+{
+ zero_user_segments(page, start, start + size, 0, 0);
+}
+
+static inline __attribute__((always_inline)) void memclear_highpage_flush(struct page *page,
+ unsigned int offset, unsigned int size)
+{
+ zero_user(page, offset, size);
+}
+
+
+
+static inline __attribute__((always_inline)) void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ char *vfrom, *vto;
+
+ vfrom = kmap_atomic(from, KM_USER0);
+ vto = kmap_atomic(to, KM_USER1);
+ memcpy((vto), (vfrom), (1UL << 12));
+ do { pagefault_enable(); } while (0);
+ do { pagefault_enable(); } while (0);
+}
+
+
+
+static inline __attribute__((always_inline)) void copy_highpage(struct page *to, struct page *from)
+{
+ char *vfrom, *vto;
+
+ vfrom = kmap_atomic(from, KM_USER0);
+ vto = kmap_atomic(to, KM_USER1);
+ memcpy((vto), (vfrom), (1UL << 12));
+ do { pagefault_enable(); } while (0);
+ do { pagefault_enable(); } while (0);
+}
+# 11 "include/linux/pagemap.h" 2
+
+
+
+
+# 1 "include/linux/hardirq.h" 1
+# 9 "include/linux/hardirq.h"
+# 1 "include/linux/ftrace_irq.h" 1
+# 9 "include/linux/ftrace_irq.h"
+static inline __attribute__((always_inline)) void ftrace_nmi_enter(void) { }
+static inline __attribute__((always_inline)) void ftrace_nmi_exit(void) { }
+# 10 "include/linux/hardirq.h" 2
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/hardirq.h" 1
+# 12 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/hardirq.h"
+extern void ack_bad_irq(unsigned int irq);
+
+
+
+
+
+# 1 "include/asm-generic/hardirq.h" 1
+
+
+
+
+
+# 1 "include/linux/irq.h" 1
+# 21 "include/linux/irq.h"
+# 1 "include/linux/irqreturn.h" 1
+# 10 "include/linux/irqreturn.h"
+enum irqreturn {
+ IRQ_NONE,
+ IRQ_HANDLED,
+ IRQ_WAKE_THREAD,
+};
+
+typedef enum irqreturn irqreturn_t;
+# 22 "include/linux/irq.h" 2
+# 1 "include/linux/irqnr.h" 1
+# 26 "include/linux/irqnr.h"
+extern int nr_irqs;
+extern struct irq_desc *irq_to_desc(unsigned int irq);
+# 23 "include/linux/irq.h" 2
+
+
+
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irq_regs.h" 1
+# 1 "include/asm-generic/irq_regs.h" 1
+# 21 "include/asm-generic/irq_regs.h"
+extern __attribute__((section(".discard"), unused)) char __pcpu_scope___irq_regs; extern __attribute__((section(".data" ""))) __typeof__(struct pt_regs *) per_cpu____irq_regs;
+
+static inline __attribute__((always_inline)) struct pt_regs *get_irq_regs(void)
+{
+ return per_cpu____irq_regs;
+}
+
+static inline __attribute__((always_inline)) struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
+{
+ struct pt_regs *old_regs, **pp_regs = &per_cpu____irq_regs;
+
+ old_regs = *pp_regs;
+ *pp_regs = new_regs;
+ return old_regs;
+}
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/irq_regs.h" 2
+# 30 "include/linux/irq.h" 2
+
+struct irq_desc;
+typedef void (*irq_flow_handler_t)(unsigned int irq,
+ struct irq_desc *desc);
+# 83 "include/linux/irq.h"
+struct proc_dir_entry;
+struct msi_desc;
+# 111 "include/linux/irq.h"
+struct irq_chip {
+ const char *name;
+ unsigned int (*startup)(unsigned int irq);
+ void (*shutdown)(unsigned int irq);
+ void (*enable)(unsigned int irq);
+ void (*disable)(unsigned int irq);
+
+ void (*ack)(unsigned int irq);
+ void (*mask)(unsigned int irq);
+ void (*mask_ack)(unsigned int irq);
+ void (*unmask)(unsigned int irq);
+ void (*eoi)(unsigned int irq);
+
+ void (*end)(unsigned int irq);
+ int (*set_affinity)(unsigned int irq,
+ const struct cpumask *dest);
+ int (*retrigger)(unsigned int irq);
+ int (*set_type)(unsigned int irq, unsigned int flow_type);
+ int (*set_wake)(unsigned int irq, unsigned int on);
+
+ void (*bus_lock)(unsigned int irq);
+ void (*bus_sync_unlock)(unsigned int irq);
+# 142 "include/linux/irq.h"
+ const char *typename;
+};
+
+struct timer_rand_state;
+struct irq_2_iommu;
+# 175 "include/linux/irq.h"
+struct irq_desc {
+ unsigned int irq;
+ struct timer_rand_state *timer_rand_state;
+ unsigned int *kstat_irqs;
+
+
+
+ irq_flow_handler_t handle_irq;
+ struct irq_chip *chip;
+ struct msi_desc *msi_desc;
+ void *handler_data;
+ void *chip_data;
+ struct irqaction *action;
+ unsigned int status;
+
+ unsigned int depth;
+ unsigned int wake_depth;
+ unsigned int irq_count;
+ unsigned long last_unhandled;
+ unsigned int irqs_unhandled;
+ spinlock_t lock;
+
+
+
+
+
+
+
+ atomic_t threads_active;
+ wait_queue_head_t wait_for_threads;
+
+ struct proc_dir_entry *dir;
+
+ const char *name;
+} ;
+
+extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
+ struct irq_desc *desc, int node);
+extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
+
+
+extern struct irq_desc irq_desc[(48 +1)];
+
+
+
+
+
+static inline __attribute__((always_inline)) struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
+{
+ return desc;
+}
+
+
+extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
+
+
+
+
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/hw_irq.h" 1
+# 1 "include/asm-generic/hw_irq.h" 1
+# 1 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/hw_irq.h" 2
+# 234 "include/linux/irq.h" 2
+
+extern int setup_irq(unsigned int irq, struct irqaction *new);
+extern void remove_irq(unsigned int irq, struct irqaction *act);
+# 270 "include/linux/irq.h"
+extern int no_irq_affinity;
+
+static inline __attribute__((always_inline)) int irq_balancing_disabled(unsigned int irq)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ return desc->status & 0x00400000;
+}
+
+
+extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action);
+
+
+
+
+
+extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_nested_irq(unsigned int irq);
+# 308 "include/linux/irq.h"
+static inline __attribute__((always_inline)) void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
+{
+
+ desc->handle_irq(irq, desc);
+
+
+
+
+
+
+}
+
+static inline __attribute__((always_inline)) void generic_handle_irq(unsigned int irq)
+{
+ generic_handle_irq_desc(irq, irq_to_desc(irq));
+}
+
+
+extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
+ irqreturn_t action_ret);
+
+
+void check_irq_resend(struct irq_desc *desc, unsigned int irq);
+
+
+extern int noirqdebug_setup(char *str);
+
+
+extern int can_request_irq(unsigned int irq, unsigned long irqflags);
+
+
+extern struct irq_chip no_irq_chip;
+extern struct irq_chip dummy_irq_chip;
+
+extern void
+set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
+ irq_flow_handler_t handle);
+extern void
+set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
+ irq_flow_handler_t handle, const char *name);
+
+extern void
+__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
+ const char *name);
+
+
+static inline __attribute__((always_inline)) void __set_irq_handler_unlocked(int irq,
+ irq_flow_handler_t handler)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ desc->handle_irq = handler;
+}
+
+
+
+
+static inline __attribute__((always_inline)) void
+set_irq_handler(unsigned int irq, irq_flow_handler_t handle)
+{
+ __set_irq_handler(irq, handle, 0, ((void *)0));
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void
+set_irq_chained_handler(unsigned int irq,
+ irq_flow_handler_t handle)
+{
+ __set_irq_handler(irq, handle, 1, ((void *)0));
+}
+
+extern void set_irq_nested_thread(unsigned int irq, int nest);
+
+extern void set_irq_noprobe(unsigned int irq);
+extern void set_irq_probe(unsigned int irq);
+
+
+extern unsigned int create_irq_nr(unsigned int irq_want, int node);
+extern int create_irq(void);
+extern void destroy_irq(unsigned int irq);
+
+
+static inline __attribute__((always_inline)) int irq_has_action(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ return desc->action != ((void *)0);
+}
+
+
+extern void dynamic_irq_init(unsigned int irq);
+extern void dynamic_irq_cleanup(unsigned int irq);
+
+
+extern int set_irq_chip(unsigned int irq, struct irq_chip *chip);
+extern int set_irq_data(unsigned int irq, void *data);
+extern int set_irq_chip_data(unsigned int irq, void *data);
+extern int set_irq_type(unsigned int irq, unsigned int type);
+extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
+# 500 "include/linux/irq.h"
+static inline __attribute__((always_inline)) bool alloc_desc_masks(struct irq_desc *desc, int node,
+ bool boot)
+{
+ return true;
+}
+
+static inline __attribute__((always_inline)) void init_desc_masks(struct irq_desc *desc)
+{
+}
+
+static inline __attribute__((always_inline)) void init_copy_desc_masks(struct irq_desc *old_desc,
+ struct irq_desc *new_desc)
+{
+}
+
+static inline __attribute__((always_inline)) void free_desc_masks(struct irq_desc *old_desc,
+ struct irq_desc *new_desc)
+{
+}
+# 7 "include/asm-generic/hardirq.h" 2
+
+typedef struct {
+ unsigned int __softirq_pending;
+} irq_cpustat_t;
+
+# 1 "include/linux/irq_cpustat.h" 1
+# 20 "include/linux/irq_cpustat.h"
+extern irq_cpustat_t irq_stat[];
+# 13 "include/asm-generic/hardirq.h" 2
+# 19 "/usr/local/src/blackfin/git/linux-kernel/arch/blackfin/include/asm/hardirq.h" 2
+# 11 "include/linux/hardirq.h" 2
+# 128 "include/linux/hardirq.h"
+extern void synchronize_irq(unsigned int irq);
+
+
+
+
+struct task_struct;
+
+
+static inline __attribute__((always_inline)) void account_system_vtime(struct task_struct *tsk)
+{
+}
+
+
+
+extern void rcu_irq_enter(void);
+extern void rcu_irq_exit(void);
+extern void rcu_nmi_enter(void);
+extern void rcu_nmi_exit(void);
+# 169 "include/linux/hardirq.h"
+extern void irq_enter(void);
+# 184 "include/linux/hardirq.h"
+extern void irq_exit(void);
+# 16 "include/linux/pagemap.h" 2
+
+
+
+
+
+enum mapping_flags {
+ AS_EIO = 22 + 0,
+ AS_ENOSPC = 22 + 1,
+ AS_MM_ALL_LOCKS = 22 + 2,
+ AS_UNEVICTABLE = 22 + 3,
+};
+
+static inline __attribute__((always_inline)) void mapping_set_error(struct address_space *mapping, int error)
+{
+ if (__builtin_expect(!!(error), 0)) {
+ if (error == -28)
+ set_bit(AS_ENOSPC, &mapping->flags);
+ else
+ set_bit(AS_EIO, &mapping->flags);
+ }
+}
+
+static inline __attribute__((always_inline)) void mapping_set_unevictable(struct address_space *mapping)
+{
+ set_bit(AS_UNEVICTABLE, &mapping->flags);
+}
+
+static inline __attribute__((always_inline)) void mapping_clear_unevictable(struct address_space *mapping)
+{
+ clear_bit(AS_UNEVICTABLE, &mapping->flags);
+}
+
+static inline __attribute__((always_inline)) int mapping_unevictable(struct address_space *mapping)
+{
+ if (__builtin_expect(!!(mapping), 1))
+ return test_bit(AS_UNEVICTABLE, &mapping->flags);
+ return !!mapping;
+}
+
+static inline __attribute__((always_inline)) gfp_t mapping_gfp_mask(struct address_space * mapping)
+{
+ return ( gfp_t)mapping->flags & (( gfp_t)((1 << 22) - 1));
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
+{
+ m->flags = (m->flags & ~( unsigned long)(( gfp_t)((1 << 22) - 1))) |
+ ( unsigned long)mask;
+}
+# 85 "include/linux/pagemap.h"
+void release_pages(struct page **pages, int nr, int cold);
+# 131 "include/linux/pagemap.h"
+static inline __attribute__((always_inline)) int page_cache_get_speculative(struct page *page)
+{
+ do { } while (0);
+# 148 "include/linux/pagemap.h"
+ do { } while (0);
+ atomic_inc(&page->_count);
+# 161 "include/linux/pagemap.h"
+ do { } while (0);
+
+ return 1;
+}
+
+
+
+
+static inline __attribute__((always_inline)) int page_cache_add_speculative(struct page *page, int count)
+{
+ do { } while (0);
+
+
+
+
+
+ do { } while (0);
+ atomic_add(count, &page->_count);
+
+
+
+
+
+ do { } while (0);
+
+ return 1;
+}
+
+static inline __attribute__((always_inline)) int page_freeze_refs(struct page *page, int count)
+{
+ return __builtin_expect(!!((((__typeof__(*((&((&page->_count)->counter)))))__cmpxchg_local_generic(((&((&page->_count)->counter))), (unsigned long)(((count))), (unsigned long)(((0))), sizeof(*((&((&page->_count)->counter))))))) == count), 1);
+}
+
+static inline __attribute__((always_inline)) void page_unfreeze_refs(struct page *page, int count)
+{
+ do { } while (0);
+ do { } while (0);
+
+ (((&page->_count)->counter) = (count));
+}
+
+
+
+
+static inline __attribute__((always_inline)) struct page *__page_cache_alloc(gfp_t gfp)
+{
+ return alloc_pages_node((((void)(0),0)), gfp, 0);
+}
+
+
+static inline __attribute__((always_inline)) struct page *page_cache_alloc(struct address_space *x)
+{
+ return __page_cache_alloc(mapping_gfp_mask(x));
+}
+
+static inline __attribute__((always_inline)) struct page *page_cache_alloc_cold(struct address_space *x)
+{
+ return __page_cache_alloc(mapping_gfp_mask(x)|(( gfp_t)0x100u));
+}
+
+typedef int filler_t(void *, struct page *);
+
+extern struct page * find_get_page(struct address_space *mapping,
+ unsigned long index);
+extern struct page * find_lock_page(struct address_space *mapping,
+ unsigned long index);
+extern struct page * find_or_create_page(struct address_space *mapping,
+ unsigned long index, gfp_t gfp_mask);
+unsigned find_get_pages(struct address_space *mapping, unsigned long start,
+ unsigned int nr_pages, struct page **pages);
+unsigned find_get_pages_contig(struct address_space *mapping, unsigned long start,
+ unsigned int nr_pages, struct page **pages);
+unsigned find_get_pages_tag(struct address_space *mapping, unsigned long *index,
+ int tag, unsigned int nr_pages, struct page **pages);
+
+struct page *grab_cache_page_write_begin(struct address_space *mapping,
+ unsigned long index, unsigned flags);
+
+
+
+
+static inline __attribute__((always_inline)) struct page *grab_cache_page(struct address_space *mapping,
+ unsigned long index)
+{
+ return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
+}
+
+extern struct page * grab_cache_page_nowait(struct address_space *mapping,
+ unsigned long index);
+extern struct page * read_cache_page_async(struct address_space *mapping,
+ unsigned long index, filler_t *filler,
+ void *data);
+extern struct page * read_cache_page(struct address_space *mapping,
+ unsigned long index, filler_t *filler,
+ void *data);
+extern int read_cache_pages(struct address_space *mapping,
+ struct list_head *pages, filler_t *filler, void *data);
+
+static inline __attribute__((always_inline)) struct page *read_mapping_page_async(
+ struct address_space *mapping,
+ unsigned long index, void *data)
+{
+ filler_t *filler = (filler_t *)mapping->a_ops->readpage;
+ return read_cache_page_async(mapping, index, filler, data);
+}
+
+static inline __attribute__((always_inline)) struct page *read_mapping_page(struct address_space *mapping,
+ unsigned long index, void *data)
+{
+ filler_t *filler = (filler_t *)mapping->a_ops->readpage;
+ return read_cache_page(mapping, index, filler, data);
+}
+
+
+
+
+static inline __attribute__((always_inline)) loff_t page_offset(struct page *page)
+{
+ return ((loff_t)page->index) << 12;
+}
+
+static inline __attribute__((always_inline)) unsigned long linear_page_index(struct vm_area_struct *vma,
+ unsigned long address)
+{
+ unsigned long pgoff = (address - vma->vm_start) >> 12;
+ pgoff += vma->vm_pgoff;
+ return pgoff >> (12 - 12);
+}
+
+extern void __lock_page(struct page *page);
+extern int __lock_page_killable(struct page *page);
+extern void __lock_page_nosync(struct page *page);
+extern void unlock_page(struct page *page);
+
+static inline __attribute__((always_inline)) void __set_page_locked(struct page *page)
+{
+ __set_bit(PG_locked, &page->flags);
+}
+
+static inline __attribute__((always_inline)) void __clear_page_locked(struct page *page)
+{
+ __clear_bit(PG_locked, &page->flags);
+}
+
+static inline __attribute__((always_inline)) int trylock_page(struct page *page)
+{
+ return (__builtin_expect(!!(!test_and_set_bit(PG_locked, &page->flags)), 1));
+}
+
+
+
+
+static inline __attribute__((always_inline)) void lock_page(struct page *page)
+{
+ do { __might_sleep("include/linux/pagemap.h", 315, 0); _cond_resched(); } while (0);
+ if (!trylock_page(page))
+ __lock_page(page);
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int lock_page_killable(struct page *page)
+{
+ do { __might_sleep("include/linux/pagemap.h", 327, 0); _cond_resched(); } while (0);
+ if (!trylock_page(page))
+ return __lock_page_killable(page);
+ return 0;
+}
+
+
+
+
+
+static inline __attribute__((always_inline)) void lock_page_nosync(struct page *page)
+{
+ do { __might_sleep("include/linux/pagemap.h", 339, 0); _cond_resched(); } while (0);
+ if (!trylock_page(page))
+ __lock_page_nosync(page);
+}
+
+
+
+
+
+extern void wait_on_page_bit(struct page *page, int bit_nr);
+# 357 "include/linux/pagemap.h"
+static inline __attribute__((always_inline)) void wait_on_page_locked(struct page *page)
+{
+ if (PageLocked(page))
+ wait_on_page_bit(page, PG_locked);
+}
+
+
+
+
+static inline __attribute__((always_inline)) void wait_on_page_writeback(struct page *page)
+{
+ if (PageWriteback(page))
+ wait_on_page_bit(page, PG_writeback);
+}
+
+extern void end_page_writeback(struct page *page);
+
+
+
+
+extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
+
+
+
+
+
+
+
+static inline __attribute__((always_inline)) int fault_in_pages_writeable(char *uaddr, int size)
+{
+ int ret;
+
+ if (__builtin_expect(!!(size == 0), 0))
+ return 0;
+
+
+
+
+
+ ret = ({ int _err = 0; typeof(*(uaddr)) _x = (0); typeof(*(uaddr)) *_p = (uaddr); if (!_access_ok((unsigned long)(_p), (sizeof(*(_p))))) { _err = -14; } else { switch (sizeof (*(_p))) { case 1: __asm__ ("B""[%1] = %0;\n\t" : :"d" (_x),"a" (((unsigned long *)(_p))) : "memory"); break; case 2: __asm__ ("W""[%1] = %0;\n\t" : :"d" (_x),"a" (((unsigned long *)(_p))) : "memory"); break; case 4: __asm__ ("""[%1] = %0;\n\t" : :"d" (_x),"a" (((unsigned long *)(_p))) : "memory"); break; case 8: { long _xl, _xh; _xl = ((long *)&_x)[0]; _xh = ((long *)&_x)[1]; __asm__ ("""[%1] = %0;\n\t" : :"d" (_xl),"a" (((unsigned long *)(((long *)_p)+0))) : "memory"); __asm__ ("""[%1] = %0;\n\t" : :"d" (_xh),"a" (((unsigned long *)(((long *)_p)+1))) : "memory"); } break; default: _err = (printk("<6>" "put_user_bad %s:%d %s\n", "include/linux/pagemap.h", 396, __func__), bad_user_access_length(), (-14)); break; } } _err; });
+ if (ret == 0) {
+ char *end = uaddr + size - 1;
+
+
+
+
+
+ if (((unsigned long)uaddr & (~((1UL << 12)-1))) !=
+ ((unsigned long)end & (~((1UL << 12)-1))))
+ ret = ({ int _err = 0; typeof(*(end)) _x = (0); typeof(*(end)) *_p = (end); if (!_access_ok((unsigned long)(_p), (sizeof(*(_p))))) { _err = -14; } else { switch (sizeof (*(_p))) { case 1: __asm__ ("B""[%1] = %0;\n\t" : :"d" (_x),"a" (((unsigned long *)(_p))) : "memory"); break; case 2: __asm__ ("W""[%1] = %0;\n\t" : :"d" (_x),"a" (((unsigned long *)(_p))) : "memory"); break; case 4: __asm__ ("""[%1] = %0;\n\t" : :"d" (_x),"a" (((unsigned long *)(_p))) : "memory"); break; case 8: { long _xl, _xh; _xl = ((long *)&_x)[0]; _xh = ((long *)&_x)[1]; __asm__ ("""[%1] = %0;\n\t" : :"d" (_xl),"a" (((unsigned long *)(((long *)_p)+0))) : "memory"); __asm__ ("""[%1] = %0;\n\t" : :"d" (_xh),"a" (((unsigned long *)(((long *)_p)+1))) : "memory"); } break; default: _err = (printk("<6>" "put_user_bad %s:%d %s\n", "include/linux/pagemap.h", 406, __func__), bad_user_access_length(), (-14)); break; } } _err; });
+ }
+ return ret;
+}
+
+static inline __attribute__((always_inline)) int fault_in_pages_readable(const char *uaddr, int size)
+{
+ volatile char c;
+ int ret;
+
+ if (__builtin_expect(!!(size == 0), 0))
+ return 0;
+
+ ret = ({ int _err = 0; unsigned long _val = 0; const typeof(*(uaddr)) *_p = (uaddr); const size_t ptr_size = sizeof(*(_p)); if (__builtin_expect(!!(_access_ok((unsigned long)(_p), (ptr_size))), 1)) { ((void)(sizeof(struct { int:-!!(ptr_size >= 8); }))); switch (ptr_size) { case 1: ({ __asm__ __volatile__ ( "%0 =" "B" "[%1]" "(Z)" ";" : "=d" (_val) : "a" (((unsigned long *)(_p)))); }); break; case 2: ({ __asm__ __volatile__ ( "%0 =" "W" "[%1]" "(Z)" ";" : "=d" (_val) : "a" (((unsigned long *)(_p)))); }); break; case 4: ({ __asm__ __volatile__ ( "%0 =" "" "[%1]" "" ";" : "=d" (_val) : "a" (((unsigned long *)(_p)))); }); break; } } else _err = -14; c = (typeof(*(uaddr)))_val; _err; });
+ if (ret == 0) {
+ const char *end = uaddr + size - 1;
+
+ if (((unsigned long)uaddr & (~((1UL << 12)-1))) !=
+ ((unsigned long)end & (~((1UL << 12)-1))))
+ ret = ({ int _err = 0; unsigned long _val = 0; const typeof(*(end)) *_p = (end); const size_t ptr_size = sizeof(*(_p)); if (__builtin_expect(!!(_access_ok((unsigned long)(_p), (ptr_size))), 1)) { ((void)(sizeof(struct { int:-!!(ptr_size >= 8); }))); switch (ptr_size) { case 1: ({ __asm__ __volatile__ ( "%0 =" "B" "[%1]" "(Z)" ";" : "=d" (_val) : "a" (((unsigned long *)(_p)))); }); break; case 2: ({ __asm__ __volatile__ ( "%0 =" "W" "[%1]" "(Z)" ";" : "=d" (_val) : "a" (((unsigned long *)(_p)))); }); break; case 4: ({ __asm__ __volatile__ ( "%0 =" "" "[%1]" "" ";" : "=d" (_val) : "a" (((unsigned long *)(_p)))); }); break; } } else _err = -14; c = (typeof(*(end)))_val; _err; });
+ }
+ return ret;
+}
+
+int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
+ unsigned long index, gfp_t gfp_mask);
+int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
+ unsigned long index, gfp_t gfp_mask);
+extern void remove_from_page_cache(struct page *page);
+extern void __remove_from_page_cache(struct page *page);
+
+
+
+
+
+static inline __attribute__((always_inline)) int add_to_page_cache(struct page *page,
+ struct address_space *mapping, unsigned long offset, gfp_t gfp_mask)
+{
+ int error;
+
+ __set_page_locked(page);
+ error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
+ if (__builtin_expect(!!(error), 0))
+ __clear_page_locked(page);
+ return error;
+}
+# 14 "include/linux/buffer_head.h" 2
+
+
+
+
+
+enum bh_state_bits {
+ BH_Uptodate,
+ BH_Dirty,
+ BH_Lock,
+ BH_Req,
+ BH_Uptodate_Lock,
+
+
+
+ BH_Mapped,
+ BH_New,
+ BH_Async_Read,
+ BH_Async_Write,
+ BH_Delay,
+ BH_Boundary,
+ BH_Write_EIO,
+ BH_Ordered,
+ BH_Eopnotsupp,
+ BH_Unwritten,
+ BH_Quiet,
+
+ BH_PrivateStart,
+
+
+};
+
+
+
+struct page;
+struct buffer_head;
+struct address_space;
+typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
+# 61 "include/linux/buffer_head.h"
+struct buffer_head {
+ unsigned long b_state;
+ struct buffer_head *b_this_page;
+ struct page *b_page;
+
+ sector_t b_blocknr;
+ size_t b_size;
+ char *b_data;
+
+ struct block_device *b_bdev;
+ bh_end_io_t *b_end_io;
+ void *b_private;
+ struct list_head b_assoc_buffers;
+ struct address_space *b_assoc_map;
+
+ atomic_t b_count;
+};
+# 115 "include/linux/buffer_head.h"
+static inline __attribute__((always_inline)) void set_buffer_uptodate(struct buffer_head *bh) { set_bit(BH_Uptodate, &(bh)->b_state); } static inline __attribute__((always_inline)) void clear_buffer_uptodate(struct buffer_head *bh) { clear_bit(BH_Uptodate, &(bh)->b_state); } static inline __attribute__((always_inline)) int buffer_uptodate(const struct buffer_head *bh) { return test_bit(BH_Uptodate, &(bh)->b_state); }
+static inline __attribute__((always_inline)) void set_buffer_dirty(struct buffer_head *bh) { set_bit(BH_Dirty, &(bh)->b_state); } static inline __attribute__((always_inline)) void clear_buffer_dirty(struct buffer_head *bh) { clear_bit(BH_Dirty, &(bh)->b_state); } static inline __attribute__((always_inline)) int buffer_dirty(const struct buffer_head *bh) { return test_bit(BH_Dirty, &(bh)->b_state); }
+static inline __attribute__((always_inline)) int test_set_buffer_dirty(struct buffer_head *bh) { return test_and_set_bit(BH_Dirty, &(bh)->b_state); } static inline __attribute__((always_inline)) int test_clear_buffer_dirty(struct buffer_head *bh) { return test_and_clear_bit(BH_Dirty, &(bh)->b_state); }
+static inline __attribute__((always_inline)) void set_buffer_locked(struct buffer_head *bh) { set_bit(BH_Lock, &(bh)->b_state); } static inline __attribute__((always_inline)) void clear_buffer_locked(struct buffer_head *bh) { clear_bit(BH_Lock, &(bh)->b_state); } static inline __attribute__((always_inline)) int buffer_locked(const struct buffer_head *bh) { return test_bit(BH_Lock, &(bh)->b_state); }
+static inline __attribute__((always_inline)) void set_buffer_req(struct buffer_head *bh) { set_bit(BH_Req, &(bh)->b_state); } static inline __attribute__((always_inline)) void clear_buffer_req(struct buffer_head *bh) { clear_bit(BH_Req, &(bh)->b_state); } static inline __attribute__((always_inline)) int buffer_req(const struct buffer_head *bh) { return test_bit(BH_Req, &(bh)->b_state); }
+static inline __attribute__((always_inline)) int test_set_buffer_req(struct buffer_head *bh) { return test_and_set_bit(BH_Req, &(bh)->b_state); } static inline __attribute__((always_inline)) int test_clear_buffer_req(struct buffer_head *bh) { return test_and_clear_bit(BH_Req, &(bh)->b_state); }
+static inline __attribute__((always_inline)) void set_buffer_mapped(struct buffer_head *bh) { set_bit(BH_Mapped, &(bh)->b_state); } static inline __attribute__((always_inline)) void clear_buffer_mapped(struct buffer_head *bh) { clear_bit(BH_Mapped, &(bh)->b_state); } static inline __attribute__((always_inline)) int buffer_mapped(const struct buffer_head *bh) { return test_bit(BH_Mapped, &(bh)->b_state); }
+static inline __attribute__((always_inline)) void set_buffer_new(struct buffer_head *bh) { set_bit(BH_New, &(bh)->b_state); } static inline __attribute__((always_inline)) void clear_buffer_new(struct buffer_head *bh) { clear_bit(BH_New, &(bh)->b_state); } static inline __attribute__((always_inline)) int buffer_new(const struct buffer_head *bh) { return test_bit(BH_New, &(bh)->b_state); }
+static inline __attribute__((always_inline)) void set_buffer_async_read(struct buffer_head *bh) { set_bit(BH_Async_Read, &(bh)->b_state); } static inline __attribute__((always_inline)) void clear_buffer_async_read(struct buffer_head *bh) { clear_bit(BH_Async_Read, &(bh)->b_state); } static inline __attribute__((always_inline)) int buffer_async_read(const struct buffer_head *bh) { return test_bit(BH_Async_Read, &(bh)->b_state); }
+static inline __attribute__((always_inline)) void set_buffer_async_write(struct buffer_head *bh) { set_bit(BH_Async_Write, &(bh)->b_state); } static inline __attribute__((always_inline)) void clear_buffer_async_write(struct buffer_head *bh) { clear_bit(BH_Async_Write, &(bh)->b_state); } static inline __attribute__((always_inline)) int buffer_async_write(const struct buffer_head *bh) { return test_bit(BH_Async_Write, &(bh)->b_state); }
+static inline __attribute__((always_inline)) void set_buffer_delay(struct buffer_head *bh) { set_bit(BH_Delay, &(bh)->b_state); } static inline __attribute__((always_inline)) void clear_buffer_delay(struct buffer_head *bh) { clear_bit(BH_Delay, &(bh)->b_state); } static inline __attribute__((always_inline)) int buffer_delay(const struct buffer_head *bh) { return test_bit(BH_Delay, &(bh)->b_state); }
+static inline __attribute__((always_inline)) void set_buffer_boundary(struct buffer_head *bh) { set_bit(BH_Boundary, &(bh)->b_state); } static inline __attribute__((always_inline)) void clear_buffer_boundary(struct buffer_head *bh) { clear_bit(BH_Boundary, &(bh)->b_state); } static inline __attribute__((always_inline)) int buffer_boundary(const struct buffer_head *bh) { return test_bit(BH_Boundary, &(bh)->b_state); }
+static inline __attribute__((always_inline)) void set_buffer_write_io_error(struct buffer_head *bh) { set_bit(BH_Write_EIO, &(bh)->b_state); } static inline __attribute__((always_inline)) void clear_buffer_write_io_error(struct buffer_head *bh) { clear_bit(BH_Write_EIO, &(bh)->b_state); } static inline __attribute__((always_inline)) int buffer_write_io_error(const struct buffer_head *bh) { return test_bit(BH_Write_EIO, &(bh)->b_state); }
+static inline __attribute__((always_inline)) void set_buffer_ordered(struct buffer_head *bh) { set_bit(BH_Ordered, &(bh)->b_state); } static inline __attribute__((always_inline)) void clear_buffer_ordered(struct buffer_head *bh) { clear_bit(BH_Ordered, &(bh)->b_state); } static inline __attribute__((always_inline)) int buffer_ordered(const struct buffer_head *bh) { return test_bit(BH_Ordered, &(bh)->b_state); }
+static inline __attribute__((always_inline)) void set_buffer_eopnotsupp(struct buffer_head *bh) { set_bit(BH_Eopnotsupp, &(bh)->b_state); } static inline __attribute__((always_inline)) void clear_buffer_eopnotsupp(struct buffer_head *bh) { clear_bit(BH_Eopnotsupp, &(bh)->b_state); } static inline __attribute__((always_inline)) int buffer_eopnotsupp(const struct buffer_head *bh) { return test_bit(BH_Eopnotsupp, &(bh)->b_state); }
+static inline __attribute__((always_inline)) void set_buffer_unwritten(struct buffer_head *bh) { set_bit(BH_Unwritten, &(bh)->b_state); } static inline __attribute__((always_inline)) void clear_buffer_unwritten(struct buffer_head *bh) { clear_bit(BH_Unwritten, &(bh)->b_state); } static inline __attribute__((always_inline)) int buffer_unwritten(const struct buffer_head *bh) { return test_bit(BH_Unwritten, &(bh)->b_state); }
+# 147 "include/linux/buffer_head.h"
+void mark_buffer_dirty(struct buffer_head *bh);
+void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
+void set_bh_page(struct buffer_head *bh,
+ struct page *page, unsigned long offset);
+int try_to_free_buffers(struct page *);
+struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
+ int retry);
+void create_empty_buffers(struct page *, unsigned long,
+ unsigned long b_state);
+void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
+void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
+void end_buffer_async_write(struct buffer_head *bh, int uptodate);
+
+
+void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
+int inode_has_buffers(struct inode *);
+void invalidate_inode_buffers(struct inode *);
+int remove_inode_buffers(struct inode *inode);
+int sync_mapping_buffers(struct address_space *mapping);
+void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
+
+void mark_buffer_async_write(struct buffer_head *bh);
+void __wait_on_buffer(struct buffer_head *);
+wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
+struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
+ unsigned size);
+struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
+ unsigned size);
+void __brelse(struct buffer_head *);
+void __bforget(struct buffer_head *);
+void __breadahead(struct block_device *, sector_t block, unsigned int size);
+struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size);
+void invalidate_bh_lrus(void);
+struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
+void free_buffer_head(struct buffer_head * bh);
+void unlock_buffer(struct buffer_head *bh);
+void __lock_buffer(struct buffer_head *bh);
+void ll_rw_block(int, int, struct buffer_head * bh[]);
+int sync_dirty_buffer(struct buffer_head *bh);
+int submit_bh(int, struct buffer_head *);
+void write_boundary_block(struct block_device *bdev,
+ sector_t bblock, unsigned blocksize);
+int bh_uptodate_or_lock(struct buffer_head *bh);
+int bh_submit_read(struct buffer_head *bh);
+
+extern int buffer_heads_over_limit;
+
+
+
+
+
+void block_invalidatepage(struct page *page, unsigned long offset);
+int block_write_full_page(struct page *page, get_block_t *get_block,
+ struct writeback_control *wbc);
+int block_write_full_page_endio(struct page *page, get_block_t *get_block,
+ struct writeback_control *wbc, bh_end_io_t *handler);
+int block_read_full_page(struct page*, get_block_t*);
+int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
+ unsigned long from);
+int block_write_begin(struct file *, struct address_space *,
+ loff_t, unsigned, unsigned,
+ struct page **, void **, get_block_t*);
+int block_write_end(struct file *, struct address_space *,
+ loff_t, unsigned, unsigned,
+ struct page *, void *);
+int generic_write_end(struct file *, struct address_space *,
+ loff_t, unsigned, unsigned,
+ struct page *, void *);
+void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
+int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
+int cont_write_begin(struct file *, struct address_space *, loff_t,
+ unsigned, unsigned, struct page **, void **,
+ get_block_t *, loff_t *);
+int generic_cont_expand_simple(struct inode *inode, loff_t size);
+int block_commit_write(struct page *page, unsigned from, unsigned to);
+int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
+ get_block_t get_block);
+void block_sync_page(struct page *);
+sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
+int block_truncate_page(struct address_space *, loff_t, get_block_t *);
+int file_fsync(struct file *, struct dentry *, int);
+int nobh_write_begin(struct file *, struct address_space *,
+ loff_t, unsigned, unsigned,
+ struct page **, void **, get_block_t*);
+int nobh_write_end(struct file *, struct address_space *,
+ loff_t, unsigned, unsigned,
+ struct page *, void *);
+int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
+int nobh_writepage(struct page *page, get_block_t *get_block,
+ struct writeback_control *wbc);
+
+void buffer_init(void);
+
+
+
+
+
+static inline __attribute__((always_inline)) void attach_page_buffers(struct page *page,
+ struct buffer_head *head)
+{
+ get_page(page);
+ SetPagePrivate(page);
+ ((page)->private = ((unsigned long)head));
+}
+
+static inline __attribute__((always_inline)) void get_bh(struct buffer_head *bh)
+{
+ atomic_inc(&bh->b_count);
+}
+
+static inline __attribute__((always_inline)) void put_bh(struct buffer_head *bh)
+{
+ __asm__ __volatile__("": : :"memory");
+ atomic_dec(&bh->b_count);
+}
+
+static inline __attribute__((always_inline)) void brelse(struct buffer_head *bh)
+{
+ if (bh)
+ __brelse(bh);
+}
+
+static inline __attribute__((always_inline)) void bforget(struct buffer_head *bh)
+{
+ if (bh)
+ __bforget(bh);
+}
+
+static inline __attribute__((always_inline)) struct buffer_head *
+sb_bread(struct super_block *sb, sector_t block)
+{
+ return __bread(sb->s_bdev, block, sb->s_blocksize);
+}
+
+static inline __attribute__((always_inline)) void
+sb_breadahead(struct super_block *sb, sector_t block)
+{
+ __breadahead(sb->s_bdev, block, sb->s_blocksize);
+}
+
+static inline __attribute__((always_inline)) struct buffer_head *
+sb_getblk(struct super_block *sb, sector_t block)
+{
+ return __getblk(sb->s_bdev, block, sb->s_blocksize);
+}
+
+static inline __attribute__((always_inline)) struct buffer_head *
+sb_find_get_block(struct super_block *sb, sector_t block)
+{
+ return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
+}
+
+static inline __attribute__((always_inline)) void
+map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
+{
+ set_buffer_mapped(bh);
+ bh->b_bdev = sb->s_bdev;
+ bh->b_blocknr = block;
+ bh->b_size = sb->s_blocksize;
+}
+
+
+
+
+
+
+static inline __attribute__((always_inline)) void wait_on_buffer(struct buffer_head *bh)
+{
+ do { __might_sleep("include/linux/buffer_head.h", 315, 0); _cond_resched(); } while (0);
+ if (buffer_locked(bh) || ((&bh->b_count)->counter) == 0)
+ __wait_on_buffer(bh);
+}
+
+static inline __attribute__((always_inline)) int trylock_buffer(struct buffer_head *bh)
+{
+ return __builtin_expect(!!(!test_and_set_bit(BH_Lock, &bh->b_state)), 1);
+}
+
+static inline __attribute__((always_inline)) void lock_buffer(struct buffer_head *bh)
+{
+ do { __might_sleep("include/linux/buffer_head.h", 327, 0); _cond_resched(); } while (0);
+ if (!trylock_buffer(bh))
+ __lock_buffer(bh);
+}
+
+extern int __set_page_dirty_buffers(struct page *page);
+# 27 "fs/sysv/super.c" 2
+# 1 "fs/sysv/sysv.h" 1
+
+
+
+
+
+typedef __u16 __fs16;
+typedef __u32 __fs32;
+
+# 1 "include/linux/sysv_fs.h" 1
+# 13 "include/linux/sysv_fs.h"
+typedef __fs16 sysv_ino_t;
+
+
+
+
+typedef __fs32 sysv_zone_t;
+# 28 "include/linux/sysv_fs.h"
+struct xenix_super_block {
+ __fs16 s_isize;
+ __fs32 s_fsize __attribute__((packed, aligned(2)));
+
+ __fs16 s_nfree;
+ sysv_zone_t s_free[100];
+
+ __fs16 s_ninode;
+ sysv_ino_t s_inode[100];
+
+ char s_flock;
+ char s_ilock;
+ char s_fmod;
+ char s_ronly;
+ __fs32 s_time __attribute__((packed, aligned(2)));
+ __fs32 s_tfree __attribute__((packed, aligned(2)));
+ __fs16 s_tinode;
+ __fs16 s_dinfo[4];
+ char s_fname[6];
+ char s_fpack[6];
+ char s_clean;
+ char s_fill[371];
+ s32 s_magic;
+ __fs32 s_type;
+
+
+
+};
+# 66 "include/linux/sysv_fs.h"
+struct sysv4_super_block {
+ __fs16 s_isize;
+ u16 s_pad0;
+ __fs32 s_fsize;
+
+ __fs16 s_nfree;
+ u16 s_pad1;
+ sysv_zone_t s_free[50];
+
+ __fs16 s_ninode;
+ u16 s_pad2;
+ sysv_ino_t s_inode[100];
+
+ char s_flock;
+ char s_ilock;
+ char s_fmod;
+ char s_ronly;
+ __fs32 s_time;
+ __fs16 s_dinfo[4];
+ __fs32 s_tfree;
+ __fs16 s_tinode;
+ u16 s_pad3;
+ char s_fname[6];
+ char s_fpack[6];
+ s32 s_fill[12];
+ __fs32 s_state;
+ s32 s_magic;
+ __fs32 s_type;
+
+};
+
+
+struct sysv2_super_block {
+ __fs16 s_isize;
+ __fs32 s_fsize __attribute__((packed, aligned(2)));
+
+ __fs16 s_nfree;
+ sysv_zone_t s_free[50];
+
+ __fs16 s_ninode;
+ sysv_ino_t s_inode[100];
+
+ char s_flock;
+ char s_ilock;
+ char s_fmod;
+ char s_ronly;
+ __fs32 s_time __attribute__((packed, aligned(2)));
+ __fs16 s_dinfo[4];
+ __fs32 s_tfree __attribute__((packed, aligned(2)));
+ __fs16 s_tinode;
+ char s_fname[6];
+ char s_fpack[6];
+ s32 s_fill[14];
+ __fs32 s_state;
+ s32 s_magic;
+ __fs32 s_type;
+
+};
+
+
+
+
+struct v7_super_block {
+ __fs16 s_isize;
+ __fs32 s_fsize __attribute__((packed, aligned(2)));
+
+ __fs16 s_nfree;
+ sysv_zone_t s_free[50];
+
+ __fs16 s_ninode;
+ sysv_ino_t s_inode[100];
+
+ char s_flock;
+ char s_ilock;
+ char s_fmod;
+ char s_ronly;
+ __fs32 s_time __attribute__((packed, aligned(2)));
+
+ __fs32 s_tfree __attribute__((packed, aligned(2)));
+ __fs16 s_tinode;
+ __fs16 s_m;
+ __fs16 s_n;
+ char s_fname[6];
+ char s_fpack[6];
+};
+
+
+
+
+struct coh_super_block {
+ __fs16 s_isize;
+ __fs32 s_fsize __attribute__((packed, aligned(2)));
+
+ __fs16 s_nfree;
+ sysv_zone_t s_free[64] __attribute__((packed, aligned(2)));
+
+ __fs16 s_ninode;
+ sysv_ino_t s_inode[100];
+
+ char s_flock;
+ char s_ilock;
+ char s_fmod;
+ char s_ronly;
+ __fs32 s_time __attribute__((packed, aligned(2)));
+ __fs32 s_tfree __attribute__((packed, aligned(2)));
+ __fs16 s_tinode;
+ __fs16 s_interleave_m;
+ __fs16 s_interleave_n;
+ char s_fname[6];
+ char s_fpack[6];
+ __fs32 s_unique;
+};
+
+
+struct sysv_inode {
+ __fs16 i_mode;
+ __fs16 i_nlink;
+ __fs16 i_uid;
+ __fs16 i_gid;
+ __fs32 i_size;
+ u8 i_data[3*(10+1+1+1)];
+ u8 i_gen;
+ __fs32 i_atime;
+ __fs32 i_mtime;
+ __fs32 i_ctime;
+};
+
+
+
+struct sysv_dir_entry {
+ sysv_ino_t inode;
+ char name[14];
+};
+# 10 "fs/sysv/sysv.h" 2
+# 21 "fs/sysv/sysv.h"
+struct sysv_sb_info {
+ struct super_block *s_sb;
+ int s_type;
+ char s_bytesex;
+ char s_truncate;
+
+ nlink_t s_link_max;
+ unsigned int s_inodes_per_block;
+ unsigned int s_inodes_per_block_1;
+ unsigned int s_inodes_per_block_bits;
+ unsigned int s_ind_per_block;
+ unsigned int s_ind_per_block_bits;
+ unsigned int s_ind_per_block_2;
+ unsigned int s_toobig_block;
+ unsigned int s_block_base;
+ unsigned short s_fic_size;
+ unsigned short s_flc_size;
+
+ struct buffer_head *s_bh1;
+ struct buffer_head *s_bh2;
+
+
+ char * s_sbd1;
+ char * s_sbd2;
+ __fs16 *s_sb_fic_count;
+ sysv_ino_t *s_sb_fic_inodes;
+ __fs16 *s_sb_total_free_inodes;
+ __fs16 *s_bcache_count;
+ sysv_zone_t *s_bcache;
+ __fs32 *s_free_blocks;
+ __fs32 *s_sb_time;
+ __fs32 *s_sb_state;
+
+
+ u32 s_firstinodezone;
+ u32 s_firstdatazone;
+ u32 s_ninodes;
+ u32 s_ndatazones;
+ u32 s_nzones;
+ u16 s_namelen;
+ int s_forced_ro;
+};
+
+
+
+
+struct sysv_inode_info {
+ __fs32 i_data[13];
+ u32 i_dir_start_lookup;
+ struct inode vfs_inode;
+};
+
+
+static inline __attribute__((always_inline)) struct sysv_inode_info *SYSV_I(struct inode *inode)
+{
+ return ({ const typeof( ((struct sysv_inode_info *)0)->vfs_inode ) *__mptr = (inode); (struct sysv_inode_info *)( (char *)__mptr - __builtin_offsetof(struct sysv_inode_info,vfs_inode) );});
+}
+
+static inline __attribute__((always_inline)) struct sysv_sb_info *SYSV_SB(struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+
+
+enum {
+ FSTYPE_NONE = 0,
+ FSTYPE_XENIX,
+ FSTYPE_SYSV4,
+ FSTYPE_SYSV2,
+ FSTYPE_COH,
+ FSTYPE_V7,
+ FSTYPE_AFS,
+ FSTYPE_END,
+};
+# 106 "fs/sysv/sysv.h"
+enum {
+ XENIX_LINK_MAX = 126,
+ SYSV_LINK_MAX = 126,
+ V7_LINK_MAX = 126,
+ COH_LINK_MAX = 10000,
+};
+
+
+static inline __attribute__((always_inline)) void dirty_sb(struct super_block *sb)
+{
+ struct sysv_sb_info *sbi = SYSV_SB(sb);
+
+ mark_buffer_dirty(sbi->s_bh1);
+ if (sbi->s_bh1 != sbi->s_bh2)
+ mark_buffer_dirty(sbi->s_bh2);
+ sb->s_dirt = 1;
+}
+
+
+
+extern struct sysv_inode *sysv_raw_inode(struct super_block *, unsigned,
+ struct buffer_head **);
+extern struct inode * sysv_new_inode(const struct inode *, mode_t);
+extern void sysv_free_inode(struct inode *);
+extern unsigned long sysv_count_free_inodes(struct super_block *);
+
+
+extern sysv_zone_t sysv_new_block(struct super_block *);
+extern void sysv_free_block(struct super_block *, sysv_zone_t);
+extern unsigned long sysv_count_free_blocks(struct super_block *);
+
+
+extern void sysv_truncate(struct inode *);
+extern int __sysv_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata);
+
+
+extern struct inode *sysv_iget(struct super_block *, unsigned int);
+extern int sysv_write_inode(struct inode *, int);
+extern int sysv_sync_inode(struct inode *);
+extern void sysv_set_inode(struct inode *, dev_t);
+extern int sysv_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+extern int sysv_init_icache(void);
+extern void sysv_destroy_icache(void);
+
+
+
+extern struct sysv_dir_entry *sysv_find_entry(struct dentry *, struct page **);
+extern int sysv_add_link(struct dentry *, struct inode *);
+extern int sysv_delete_entry(struct sysv_dir_entry *, struct page *);
+extern int sysv_make_empty(struct inode *, struct inode *);
+extern int sysv_empty_dir(struct inode *);
+extern void sysv_set_link(struct sysv_dir_entry *, struct page *,
+ struct inode *);
+extern struct sysv_dir_entry *sysv_dotdot(struct inode *, struct page **);
+extern ino_t sysv_inode_by_name(struct dentry *);
+
+
+extern const struct inode_operations sysv_file_inode_operations;
+extern const struct inode_operations sysv_dir_inode_operations;
+extern const struct inode_operations sysv_fast_symlink_inode_operations;
+extern const struct file_operations sysv_file_operations;
+extern const struct file_operations sysv_dir_operations;
+extern const struct address_space_operations sysv_aops;
+extern const struct super_operations sysv_sops;
+extern const struct dentry_operations sysv_dentry_operations;
+
+
+enum {
+ BYTESEX_LE,
+ BYTESEX_PDP,
+ BYTESEX_BE,
+};
+
+static inline __attribute__((always_inline)) u32 PDP_swab(u32 x)
+{
+
+ return ((x & 0xffff) << 16) | ((x & 0xffff0000) >> 16);
+
+
+
+
+
+
+
+}
+
+static inline __attribute__((always_inline)) __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
+{
+ if (sbi->s_bytesex == BYTESEX_PDP)
+ return PDP_swab(( __u32)n);
+ else if (sbi->s_bytesex == BYTESEX_LE)
+ return (( __u32)(__le32)(( __le32)n));
+ else
+ return (__builtin_constant_p((__u32)(( __u32)(__be32)(( __be32)n))) ? ((__u32)( (((__u32)(( __u32)(__be32)(( __be32)n)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(( __be32)n)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(( __be32)n)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(( __be32)n)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(( __be32)n)));
+}
+
+static inline __attribute__((always_inline)) __fs32 cpu_to_fs32(struct sysv_sb_info *sbi, __u32 n)
+{
+ if (sbi->s_bytesex == BYTESEX_PDP)
+ return ( __fs32)PDP_swab(n);
+ else if (sbi->s_bytesex == BYTESEX_LE)
+ return ( __fs32)(( __le32)(__u32)(n));
+ else
+ return ( __fs32)(( __be32)(__builtin_constant_p((__u32)((n))) ? ((__u32)( (((__u32)((n)) & (__u32)0x000000ffUL) << 24) | (((__u32)((n)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((n)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((n)) & (__u32)0xff000000UL) >> 24))) : __fswab32((n))));
+}
+
+static inline __attribute__((always_inline)) __fs32 fs32_add(struct sysv_sb_info *sbi, __fs32 *n, int d)
+{
+ if (sbi->s_bytesex == BYTESEX_PDP)
+ *(__u32*)n = PDP_swab(PDP_swab(*(__u32*)n)+d);
+ else if (sbi->s_bytesex == BYTESEX_LE)
+ le32_add_cpu((__le32 *)n, d);
+ else
+ be32_add_cpu((__be32 *)n, d);
+ return *n;
+}
+
+static inline __attribute__((always_inline)) __u16 fs16_to_cpu(struct sysv_sb_info *sbi, __fs16 n)
+{
+ if (sbi->s_bytesex != BYTESEX_BE)
+ return (( __u16)(__le16)(( __le16)n));
+ else
+ return (__builtin_constant_p((__u16)(( __u16)(__be16)(( __be16)n))) ? ((__u16)( (((__u16)(( __u16)(__be16)(( __be16)n)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(( __be16)n)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(( __be16)n)));
+}
+
+static inline __attribute__((always_inline)) __fs16 cpu_to_fs16(struct sysv_sb_info *sbi, __u16 n)
+{
+ if (sbi->s_bytesex != BYTESEX_BE)
+ return ( __fs16)(( __le16)(__u16)(n));
+ else
+ return ( __fs16)(( __be16)(__builtin_constant_p((__u16)((n))) ? ((__u16)( (((__u16)((n)) & (__u16)0x00ffU) << 8) | (((__u16)((n)) & (__u16)0xff00U) >> 8))) : __fswab16((n))));
+}
+
+static inline __attribute__((always_inline)) __fs16 fs16_add(struct sysv_sb_info *sbi, __fs16 *n, int d)
+{
+ if (sbi->s_bytesex != BYTESEX_BE)
+ le16_add_cpu((__le16 *)n, d);
+ else
+ be16_add_cpu((__be16 *)n, d);
+ return *n;
+}
+# 28 "fs/sysv/super.c" 2
+# 43 "fs/sysv/super.c"
+enum {
+ JAN_1_1980 = (10*365 + 2) * 24 * 60 * 60
+};
+
+static void detected_xenix(struct sysv_sb_info *sbi)
+{
+ struct buffer_head *bh1 = sbi->s_bh1;
+ struct buffer_head *bh2 = sbi->s_bh2;
+ struct xenix_super_block * sbd1;
+ struct xenix_super_block * sbd2;
+
+ if (bh1 != bh2)
+ sbd1 = sbd2 = (struct xenix_super_block *) bh1->b_data;
+ else {
+
+ sbd1 = (struct xenix_super_block *) bh1->b_data;
+ sbd2 = (struct xenix_super_block *) (bh2->b_data - 512);
+ }
+
+ sbi->s_link_max = XENIX_LINK_MAX;
+ sbi->s_fic_size = 100;
+ sbi->s_flc_size = 100;
+ sbi->s_sbd1 = (char *)sbd1;
+ sbi->s_sbd2 = (char *)sbd2;
+ sbi->s_sb_fic_count = &sbd1->s_ninode;
+ sbi->s_sb_fic_inodes = &sbd1->s_inode[0];
+ sbi->s_sb_total_free_inodes = &sbd2->s_tinode;
+ sbi->s_bcache_count = &sbd1->s_nfree;
+ sbi->s_bcache = &sbd1->s_free[0];
+ sbi->s_free_blocks = &sbd2->s_tfree;
+ sbi->s_sb_time = &sbd2->s_time;
+ sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd1->s_isize);
+ sbi->s_nzones = fs32_to_cpu(sbi, sbd1->s_fsize);
+}
+
+static void detected_sysv4(struct sysv_sb_info *sbi)
+{
+ struct sysv4_super_block * sbd;
+ struct buffer_head *bh1 = sbi->s_bh1;
+ struct buffer_head *bh2 = sbi->s_bh2;
+
+ if (bh1 == bh2)
+ sbd = (struct sysv4_super_block *) (bh1->b_data + (1<<10)/2);
+ else
+ sbd = (struct sysv4_super_block *) bh2->b_data;
+
+ sbi->s_link_max = SYSV_LINK_MAX;
+ sbi->s_fic_size = 100;
+ sbi->s_flc_size = 50;
+ sbi->s_sbd1 = (char *)sbd;
+ sbi->s_sbd2 = (char *)sbd;
+ sbi->s_sb_fic_count = &sbd->s_ninode;
+ sbi->s_sb_fic_inodes = &sbd->s_inode[0];
+ sbi->s_sb_total_free_inodes = &sbd->s_tinode;
+ sbi->s_bcache_count = &sbd->s_nfree;
+ sbi->s_bcache = &sbd->s_free[0];
+ sbi->s_free_blocks = &sbd->s_tfree;
+ sbi->s_sb_time = &sbd->s_time;
+ sbi->s_sb_state = &sbd->s_state;
+ sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd->s_isize);
+ sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize);
+}
+
+static void detected_sysv2(struct sysv_sb_info *sbi)
+{
+ struct sysv2_super_block *sbd;
+ struct buffer_head *bh1 = sbi->s_bh1;
+ struct buffer_head *bh2 = sbi->s_bh2;
+
+ if (bh1 == bh2)
+ sbd = (struct sysv2_super_block *) (bh1->b_data + (1<<10)/2);
+ else
+ sbd = (struct sysv2_super_block *) bh2->b_data;
+
+ sbi->s_link_max = SYSV_LINK_MAX;
+ sbi->s_fic_size = 100;
+ sbi->s_flc_size = 50;
+ sbi->s_sbd1 = (char *)sbd;
+ sbi->s_sbd2 = (char *)sbd;
+ sbi->s_sb_fic_count = &sbd->s_ninode;
+ sbi->s_sb_fic_inodes = &sbd->s_inode[0];
+ sbi->s_sb_total_free_inodes = &sbd->s_tinode;
+ sbi->s_bcache_count = &sbd->s_nfree;
+ sbi->s_bcache = &sbd->s_free[0];
+ sbi->s_free_blocks = &sbd->s_tfree;
+ sbi->s_sb_time = &sbd->s_time;
+ sbi->s_sb_state = &sbd->s_state;
+ sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd->s_isize);
+ sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize);
+}
+
+static void detected_coherent(struct sysv_sb_info *sbi)
+{
+ struct coh_super_block * sbd;
+ struct buffer_head *bh1 = sbi->s_bh1;
+
+ sbd = (struct coh_super_block *) bh1->b_data;
+
+ sbi->s_link_max = COH_LINK_MAX;
+ sbi->s_fic_size = 100;
+ sbi->s_flc_size = 64;
+ sbi->s_sbd1 = (char *)sbd;
+ sbi->s_sbd2 = (char *)sbd;
+ sbi->s_sb_fic_count = &sbd->s_ninode;
+ sbi->s_sb_fic_inodes = &sbd->s_inode[0];
+ sbi->s_sb_total_free_inodes = &sbd->s_tinode;
+ sbi->s_bcache_count = &sbd->s_nfree;
+ sbi->s_bcache = &sbd->s_free[0];
+ sbi->s_free_blocks = &sbd->s_tfree;
+ sbi->s_sb_time = &sbd->s_time;
+ sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd->s_isize);
+ sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize);
+}
+
+static void detected_v7(struct sysv_sb_info *sbi)
+{
+ struct buffer_head *bh2 = sbi->s_bh2;
+ struct v7_super_block *sbd = (struct v7_super_block *)bh2->b_data;
+
+ sbi->s_link_max = V7_LINK_MAX;
+ sbi->s_fic_size = 100;
+ sbi->s_flc_size = 50;
+ sbi->s_sbd1 = (char *)sbd;
+ sbi->s_sbd2 = (char *)sbd;
+ sbi->s_sb_fic_count = &sbd->s_ninode;
+ sbi->s_sb_fic_inodes = &sbd->s_inode[0];
+ sbi->s_sb_total_free_inodes = &sbd->s_tinode;
+ sbi->s_bcache_count = &sbd->s_nfree;
+ sbi->s_bcache = &sbd->s_free[0];
+ sbi->s_free_blocks = &sbd->s_tfree;
+ sbi->s_sb_time = &sbd->s_time;
+ sbi->s_firstdatazone = fs16_to_cpu(sbi, sbd->s_isize);
+ sbi->s_nzones = fs32_to_cpu(sbi, sbd->s_fsize);
+}
+
+static int detect_xenix(struct sysv_sb_info *sbi, struct buffer_head *bh)
+{
+ struct xenix_super_block *sbd = (struct xenix_super_block *)bh->b_data;
+ if (*(__le32 *)&sbd->s_magic == (( __le32)(__u32)(0x2b5544)))
+ sbi->s_bytesex = BYTESEX_LE;
+ else if (*(__be32 *)&sbd->s_magic == (( __be32)(__builtin_constant_p((__u32)((0x2b5544))) ? ((__u32)( (((__u32)((0x2b5544)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0x2b5544)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0x2b5544)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0x2b5544)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0x2b5544)))))
+ sbi->s_bytesex = BYTESEX_BE;
+ else
+ return 0;
+ switch (fs32_to_cpu(sbi, sbd->s_type)) {
+ case 1:
+ sbi->s_type = FSTYPE_XENIX;
+ return 1;
+ case 2:
+ sbi->s_type = FSTYPE_XENIX;
+ return 2;
+ default:
+ return 0;
+ }
+}
+
+static int detect_sysv(struct sysv_sb_info *sbi, struct buffer_head *bh)
+{
+ struct super_block *sb = sbi->s_sb;
+
+ struct sysv4_super_block * sbd;
+ u32 type;
+
+ sbd = (struct sysv4_super_block *) (bh->b_data + (1<<10)/2);
+ if (*(__le32 *)&sbd->s_magic == (( __le32)(__u32)(0xfd187e20)))
+ sbi->s_bytesex = BYTESEX_LE;
+ else if (*(__be32 *)&sbd->s_magic == (( __be32)(__builtin_constant_p((__u32)((0xfd187e20))) ? ((__u32)( (((__u32)((0xfd187e20)) & (__u32)0x000000ffUL) << 24) | (((__u32)((0xfd187e20)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((0xfd187e20)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((0xfd187e20)) & (__u32)0xff000000UL) >> 24))) : __fswab32((0xfd187e20)))))
+ sbi->s_bytesex = BYTESEX_BE;
+ else
+ return 0;
+
+ type = fs32_to_cpu(sbi, sbd->s_type);
+
+ if (fs16_to_cpu(sbi, sbd->s_nfree) == 0xffff) {
+ sbi->s_type = FSTYPE_AFS;
+ sbi->s_forced_ro = 1;
+ if (!(sb->s_flags & 1)) {
+ printk("SysV FS: SCO EAFS on %s detected, "
+ "forcing read-only mode.\n",
+ sb->s_id);
+ }
+ return type;
+ }
+
+ if (fs32_to_cpu(sbi, sbd->s_time) < JAN_1_1980) {
+
+ if (type > 3 || type < 1)
+ return 0;
+ sbi->s_type = FSTYPE_SYSV2;
+ return type;
+ }
+ if ((type > 3 || type < 1) && (type > 0x30 || type < 0x10))
+ return 0;
+
+
+
+
+
+
+ if (type >= 0x10) {
+ printk("SysV FS: can't handle long file names on %s, "
+ "forcing read-only mode.\n", sb->s_id);
+ sbi->s_forced_ro = 1;
+ }
+
+ sbi->s_type = FSTYPE_SYSV4;
+ return type >= 0x10 ? type >> 4 : type;
+}
+
+static int detect_coherent(struct sysv_sb_info *sbi, struct buffer_head *bh)
+{
+ struct coh_super_block * sbd;
+
+ sbd = (struct coh_super_block *) (bh->b_data + (1<<10)/2);
+ if ((memcmp(sbd->s_fname,"noname",6) && memcmp(sbd->s_fname,"xxxxx ",6))
+ || (memcmp(sbd->s_fpack,"nopack",6) && memcmp(sbd->s_fpack,"xxxxx\n",6)))
+ return 0;
+ sbi->s_bytesex = BYTESEX_PDP;
+ sbi->s_type = FSTYPE_COH;
+ return 1;
+}
+
+static int detect_sysv_odd(struct sysv_sb_info *sbi, struct buffer_head *bh)
+{
+ int size = detect_sysv(sbi, bh);
+
+ return size>2 ? 0 : size;
+}
+
+static struct {
+ int block;
+ int (*test)(struct sysv_sb_info *, struct buffer_head *);
+} flavours[] = {
+ {1, detect_xenix},
+ {0, detect_sysv},
+ {0, detect_coherent},
+ {9, detect_sysv_odd},
+ {15,detect_sysv_odd},
+ {18,detect_sysv},
+};
+
+static char *flavour_names[] = {
+ [FSTYPE_XENIX] = "Xenix",
+ [FSTYPE_SYSV4] = "SystemV",
+ [FSTYPE_SYSV2] = "SystemV Release 2",
+ [FSTYPE_COH] = "Coherent",
+ [FSTYPE_V7] = "V7",
+ [FSTYPE_AFS] = "AFS",
+};
+
+static void (*flavour_setup[])(struct sysv_sb_info *) = {
+ [FSTYPE_XENIX] = detected_xenix,
+ [FSTYPE_SYSV4] = detected_sysv4,
+ [FSTYPE_SYSV2] = detected_sysv2,
+ [FSTYPE_COH] = detected_coherent,
+ [FSTYPE_V7] = detected_v7,
+ [FSTYPE_AFS] = detected_sysv4,
+};
+
+static int complete_read_super(struct super_block *sb, int silent, int size)
+{
+ struct sysv_sb_info *sbi = SYSV_SB(sb);
+ struct inode *root_inode;
+ char *found = flavour_names[sbi->s_type];
+ u_char n_bits = size+8;
+ int bsize = 1 << n_bits;
+ int bsize_4 = bsize >> 2;
+
+ sbi->s_firstinodezone = 2;
+
+ flavour_setup[sbi->s_type](sbi);
+
+ sbi->s_truncate = 1;
+ sbi->s_ndatazones = sbi->s_nzones - sbi->s_firstdatazone;
+ sbi->s_inodes_per_block = bsize >> 6;
+ sbi->s_inodes_per_block_1 = (bsize >> 6)-1;
+ sbi->s_inodes_per_block_bits = n_bits-6;
+ sbi->s_ind_per_block = bsize_4;
+ sbi->s_ind_per_block_2 = bsize_4*bsize_4;
+ sbi->s_toobig_block = 10 + bsize_4 * (1 + bsize_4 * (1 + bsize_4));
+ sbi->s_ind_per_block_bits = n_bits-2;
+
+ sbi->s_ninodes = (sbi->s_firstdatazone - sbi->s_firstinodezone)
+ << sbi->s_inodes_per_block_bits;
+
+ if (!silent)
+ printk("VFS: Found a %s FS (block size = %ld) on device %s\n",
+ found, sb->s_blocksize, sb->s_id);
+
+ sb->s_magic = 0x012FF7B3 + sbi->s_type;
+
+ sb->s_op = &sysv_sops;
+ root_inode = sysv_iget(sb, 2);
+ if (IS_ERR(root_inode)) {
+ printk("SysV FS: get root inode failed\n");
+ return 0;
+ }
+ sb->s_root = d_alloc_root(root_inode);
+ if (!sb->s_root) {
+ iput(root_inode);
+ printk("SysV FS: get root dentry failed\n");
+ return 0;
+ }
+ if (sbi->s_forced_ro)
+ sb->s_flags |= 1;
+ if (sbi->s_truncate)
+ sb->s_root->d_op = &sysv_dentry_operations;
+ sb->s_dirt = 1;
+ return 1;
+}
+
+static int sysv_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct buffer_head *bh1, *bh = ((void *)0);
+ struct sysv_sb_info *sbi;
+ unsigned long blocknr;
+ int size = 0, i;
+
+ ((void)(sizeof(struct { int:-!!(1024 != sizeof (struct xenix_super_block)); })));
+ ((void)(sizeof(struct { int:-!!(512 != sizeof (struct sysv4_super_block)); })));
+ ((void)(sizeof(struct { int:-!!(512 != sizeof (struct sysv2_super_block)); })));
+ ((void)(sizeof(struct { int:-!!(500 != sizeof (struct coh_super_block)); })));
+ ((void)(sizeof(struct { int:-!!(64 != sizeof (struct sysv_inode)); })));
+
+ sbi = kzalloc(sizeof(struct sysv_sb_info), ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u)));
+ if (!sbi)
+ return -12;
+
+ sbi->s_sb = sb;
+ sbi->s_block_base = 0;
+ sb->s_fs_info = sbi;
+
+ sb_set_blocksize(sb, (1<<10));
+
+ for (i = 0; i < (sizeof(flavours) / sizeof((flavours)[0]) + (sizeof(struct { int:-!!(__builtin_types_compatible_p(typeof(flavours), typeof(&flavours[0]))); }))) && !size; i++) {
+ brelse(bh);
+ bh = sb_bread(sb, flavours[i].block);
+ if (!bh)
+ continue;
+ size = flavours[i].test(SYSV_SB(sb), bh);
+ }
+
+ if (!size)
+ goto Eunknown;
+
+ switch (size) {
+ case 1:
+ blocknr = bh->b_blocknr << 1;
+ brelse(bh);
+ sb_set_blocksize(sb, 512);
+ bh1 = sb_bread(sb, blocknr);
+ bh = sb_bread(sb, blocknr + 1);
+ break;
+ case 2:
+ bh1 = bh;
+ break;
+ case 3:
+ blocknr = bh->b_blocknr >> 1;
+ brelse(bh);
+ sb_set_blocksize(sb, 2048);
+ bh1 = bh = sb_bread(sb, blocknr);
+ break;
+ default:
+ goto Ebadsize;
+ }
+
+ if (bh && bh1) {
+ sbi->s_bh1 = bh1;
+ sbi->s_bh2 = bh;
+ if (complete_read_super(sb, silent, size))
+ return 0;
+ }
+
+ brelse(bh1);
+ brelse(bh);
+ sb_set_blocksize(sb, (1<<10));
+ printk("oldfs: cannot read superblock\n");
+failed:
+ kfree(sbi);
+ return -22;
+
+Eunknown:
+ brelse(bh);
+ if (!silent)
+ printk("VFS: unable to find oldfs superblock on device %s\n",
+ sb->s_id);
+ goto failed;
+Ebadsize:
+ brelse(bh);
+ if (!silent)
+ printk("VFS: oldfs: unsupported block size (%dKb)\n",
+ 1<<(size-2));
+ goto failed;
+}
+
+static int v7_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct sysv_sb_info *sbi;
+ struct buffer_head *bh, *bh2 = ((void *)0);
+ struct v7_super_block *v7sb;
+ struct sysv_inode *v7i;
+
+ if (440 != sizeof (struct v7_super_block))
+ panic("V7 FS: bad super-block size");
+ if (64 != sizeof (struct sysv_inode))
+ panic("sysv fs: bad i-node size");
+
+ sbi = kzalloc(sizeof(struct sysv_sb_info), ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u)));
+ if (!sbi)
+ return -12;
+
+ sbi->s_sb = sb;
+ sbi->s_block_base = 0;
+ sbi->s_type = FSTYPE_V7;
+ sbi->s_bytesex = BYTESEX_PDP;
+ sb->s_fs_info = sbi;
+
+ sb_set_blocksize(sb, 512);
+
+ if ((bh = sb_bread(sb, 1)) == ((void *)0)) {
+ if (!silent)
+ printk("VFS: unable to read V7 FS superblock on "
+ "device %s.\n", sb->s_id);
+ goto failed;
+ }
+
+
+ v7sb = (struct v7_super_block *) bh->b_data;
+ if (fs16_to_cpu(sbi, v7sb->s_nfree) > 50 ||
+ fs16_to_cpu(sbi, v7sb->s_ninode) > 100 ||
+ fs32_to_cpu(sbi, v7sb->s_time) == 0)
+ goto failed;
+
+
+
+ if ((bh2 = sb_bread(sb, 2)) == ((void *)0))
+ goto failed;
+ v7i = (struct sysv_inode *)(bh2->b_data + 64);
+ if ((fs16_to_cpu(sbi, v7i->i_mode) & ~0777) != 0040000 ||
+ (fs32_to_cpu(sbi, v7i->i_size) == 0) ||
+ (fs32_to_cpu(sbi, v7i->i_size) & 017) != 0)
+ goto failed;
+ brelse(bh2);
+ bh2 = ((void *)0);
+
+ sbi->s_bh1 = bh;
+ sbi->s_bh2 = bh;
+ if (complete_read_super(sb, silent, 1))
+ return 0;
+
+failed:
+ brelse(bh2);
+ brelse(bh);
+ kfree(sbi);
+ return -22;
+}
+
+
+
+static int sysv_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
+{
+ return get_sb_bdev(fs_type, flags, dev_name, data, sysv_fill_super,
+ mnt);
+}
+
+static int v7_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
+{
+ return get_sb_bdev(fs_type, flags, dev_name, data, v7_fill_super, mnt);
+}
+
+static struct file_system_type sysv_fs_type = {
+ .owner = (&__this_module),
+ .name = "sysv",
+ .get_sb = sysv_get_sb,
+ .kill_sb = kill_block_super,
+ .fs_flags = 1,
+};
+
+static struct file_system_type v7_fs_type = {
+ .owner = (&__this_module),
+ .name = "v7",
+ .get_sb = v7_get_sb,
+ .kill_sb = kill_block_super,
+ .fs_flags = 1,
+};
+
+static int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) init_sysv_fs(void)
+{
+ int error;
+
+ error = sysv_init_icache();
+ if (error)
+ goto out;
+ error = register_filesystem(&sysv_fs_type);
+ if (error)
+ goto destroy_icache;
+ error = register_filesystem(&v7_fs_type);
+ if (error)
+ goto unregister;
+ return 0;
+
+unregister:
+ unregister_filesystem(&sysv_fs_type);
+destroy_icache:
+ sysv_destroy_icache();
+out:
+ return error;
+}
+
+static void __attribute__ ((__section__(".exit.text"))) __attribute__((__cold__)) exit_sysv_fs(void)
+{
+ unregister_filesystem(&sysv_fs_type);
+ unregister_filesystem(&v7_fs_type);
+ sysv_destroy_icache();
+}
+
+static inline __attribute__((always_inline)) initcall_t __inittest(void) { return init_sysv_fs; } int init_module(void) __attribute__((alias("init_sysv_fs")));
+static inline __attribute__((always_inline)) exitcall_t __exittest(void) { return exit_sysv_fs; } void cleanup_module(void) __attribute__((alias("exit_sysv_fs")));
+static const char __mod_license563[] __attribute__((__used__)) __attribute__((section(".modinfo"),unused)) = "license" "=" "GPL";