initial import
[patches.git] / nommu-process-vm.patch
1 --- kernel/ptrace.c
2 +++ kernel/ptrace.c
3 @@ -207,60 +207,6 @@ int ptrace_detach(struct task_struct *ch
4 return 0;
5 }
6
7 -/*
8 - * Access another process' address space.
9 - * Source/target buffer must be kernel space,
10 - * Do not walk the page table directly, use get_user_pages
11 - */
12 -
13 -int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
14 -{
15 - struct mm_struct *mm;
16 - struct vm_area_struct *vma;
17 - struct page *page;
18 - void *old_buf = buf;
19 -
20 - mm = get_task_mm(tsk);
21 - if (!mm)
22 - return 0;
23 -
24 - down_read(&mm->mmap_sem);
25 - /* ignore errors, just check how much was sucessfully transfered */
26 - while (len) {
27 - int bytes, ret, offset;
28 - void *maddr;
29 -
30 - ret = get_user_pages(tsk, mm, addr, 1,
31 - write, 1, &page, &vma);
32 - if (ret <= 0)
33 - break;
34 -
35 - bytes = len;
36 - offset = addr & (PAGE_SIZE-1);
37 - if (bytes > PAGE_SIZE-offset)
38 - bytes = PAGE_SIZE-offset;
39 -
40 - maddr = kmap(page);
41 - if (write) {
42 - copy_to_user_page(vma, page, addr,
43 - maddr + offset, buf, bytes);
44 - set_page_dirty_lock(page);
45 - } else {
46 - copy_from_user_page(vma, page, addr,
47 - buf, maddr + offset, bytes);
48 - }
49 - kunmap(page);
50 - page_cache_release(page);
51 - len -= bytes;
52 - buf += bytes;
53 - addr += bytes;
54 - }
55 - up_read(&mm->mmap_sem);
56 - mmput(mm);
57 -
58 - return buf - old_buf;
59 -}
60 -
61 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
62 {
63 int copied = 0;
64 --- mm/memory.c
65 +++ mm/memory.c
66 @@ -2445,3 +2445,56 @@ int in_gate_area_no_task(unsigned long a
67 }
68
69 #endif /* __HAVE_ARCH_GATE_AREA */
70 +
71 +/*
72 + * Access another process' address space.
73 + * Source/target buffer must be kernel space,
74 + * Do not walk the page table directly, use get_user_pages
75 + */
76 +int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
77 +{
78 + struct mm_struct *mm;
79 + struct vm_area_struct *vma;
80 + struct page *page;
81 + void *old_buf = buf;
82 +
83 + mm = get_task_mm(tsk);
84 + if (!mm)
85 + return 0;
86 +
87 + down_read(&mm->mmap_sem);
88 + /* ignore errors, just check how much was sucessfully transfered */
89 + while (len) {
90 + int bytes, ret, offset;
91 + void *maddr;
92 +
93 + ret = get_user_pages(tsk, mm, addr, 1,
94 + write, 1, &page, &vma);
95 + if (ret <= 0)
96 + break;
97 +
98 + bytes = len;
99 + offset = addr & (PAGE_SIZE-1);
100 + if (bytes > PAGE_SIZE-offset)
101 + bytes = PAGE_SIZE-offset;
102 +
103 + maddr = kmap(page);
104 + if (write) {
105 + copy_to_user_page(vma, page, addr,
106 + maddr + offset, buf, bytes);
107 + set_page_dirty_lock(page);
108 + } else {
109 + copy_from_user_page(vma, page, addr,
110 + buf, maddr + offset, bytes);
111 + }
112 + kunmap(page);
113 + page_cache_release(page);
114 + len -= bytes;
115 + buf += bytes;
116 + addr += bytes;
117 + }
118 + up_read(&mm->mmap_sem);
119 + mmput(mm);
120 +
121 + return buf - old_buf;
122 +}
123 --- mm/nommu.c
124 +++ mm/nommu.c
125 @@ -1213,3 +1213,51 @@ struct page *filemap_nopage(struct vm_ar
126 BUG();
127 return NULL;
128 }
129 +
130 +/*
131 + * Access another process' address space.
132 + * - source/target buffer must be kernel space
133 + */
134 +int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
135 +{
136 + struct vm_list_struct *vml;
137 + struct vm_area_struct *vma;
138 + struct mm_struct *mm;
139 +
140 + if (addr + len < addr)
141 + return 0;
142 +
143 + mm = get_task_mm(tsk);
144 + if (!mm)
145 + return 0;
146 +
147 + down_read(&mm->mmap_sem);
148 +
149 + /* the access must start within one of the target process's mappings */
150 + for (vml = mm->context.vmlist; vml; vml = vml->next) {
151 + if (addr >= vml->vma->vm_start && addr < vml->vma->vm_end)
152 + break;
153 + }
154 +
155 + if (vml) {
156 + vma = vml->vma;
157 +
158 + /* don't overrun this mapping */
159 + if (addr + len >= vma->vm_end)
160 + len = vma->vm_end - addr;
161 +
162 + /* only read or write mappings where it is permitted */
163 + if (write && vma->vm_flags & VM_WRITE)
164 + len -= copy_to_user((void *) addr, buf, len);
165 + else if (!write && vma->vm_flags & VM_READ)
166 + len -= copy_from_user(buf, (void *) addr, len);
167 + else
168 + len = 0;
169 + } else {
170 + len = 0;
171 + }
172 +
173 + up_read(&mm->mmap_sem);
174 + mmput(mm);
175 + return len;
176 +}