3 @@ -207,60 +207,6 @@ int ptrace_detach(struct task_struct *ch
8 - * Access another process' address space.
9 - * Source/target buffer must be kernel space,
10 - * Do not walk the page table directly, use get_user_pages
13 -int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
15 - struct mm_struct *mm;
16 - struct vm_area_struct *vma;
18 - void *old_buf = buf;
20 - mm = get_task_mm(tsk);
24 - down_read(&mm->mmap_sem);
25 - /* ignore errors, just check how much was sucessfully transfered */
27 - int bytes, ret, offset;
30 - ret = get_user_pages(tsk, mm, addr, 1,
31 - write, 1, &page, &vma);
36 - offset = addr & (PAGE_SIZE-1);
37 - if (bytes > PAGE_SIZE-offset)
38 - bytes = PAGE_SIZE-offset;
42 - copy_to_user_page(vma, page, addr,
43 - maddr + offset, buf, bytes);
44 - set_page_dirty_lock(page);
46 - copy_from_user_page(vma, page, addr,
47 - buf, maddr + offset, bytes);
50 - page_cache_release(page);
55 - up_read(&mm->mmap_sem);
58 - return buf - old_buf;
61 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
66 @@ -2445,3 +2445,56 @@ int in_gate_area_no_task(unsigned long a
69 #endif /* __HAVE_ARCH_GATE_AREA */
72 + * Access another process' address space.
73 + * Source/target buffer must be kernel space,
74 + * Do not walk the page table directly, use get_user_pages
76 +int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
78 + struct mm_struct *mm;
79 + struct vm_area_struct *vma;
81 + void *old_buf = buf;
83 + mm = get_task_mm(tsk);
87 + down_read(&mm->mmap_sem);
88 + /* ignore errors, just check how much was sucessfully transfered */
90 + int bytes, ret, offset;
93 + ret = get_user_pages(tsk, mm, addr, 1,
94 + write, 1, &page, &vma);
99 + offset = addr & (PAGE_SIZE-1);
100 + if (bytes > PAGE_SIZE-offset)
101 + bytes = PAGE_SIZE-offset;
103 + maddr = kmap(page);
105 + copy_to_user_page(vma, page, addr,
106 + maddr + offset, buf, bytes);
107 + set_page_dirty_lock(page);
109 + copy_from_user_page(vma, page, addr,
110 + buf, maddr + offset, bytes);
113 + page_cache_release(page);
118 + up_read(&mm->mmap_sem);
121 + return buf - old_buf;
125 @@ -1213,3 +1213,51 @@ struct page *filemap_nopage(struct vm_ar
131 + * Access another process' address space.
132 + * - source/target buffer must be kernel space
134 +int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
136 + struct vm_list_struct *vml;
137 + struct vm_area_struct *vma;
138 + struct mm_struct *mm;
140 + if (addr + len < addr)
143 + mm = get_task_mm(tsk);
147 + down_read(&mm->mmap_sem);
149 + /* the access must start within one of the target process's mappings */
150 + for (vml = mm->context.vmlist; vml; vml = vml->next) {
151 + if (addr >= vml->vma->vm_start && addr < vml->vma->vm_end)
158 + /* don't overrun this mapping */
159 + if (addr + len >= vma->vm_end)
160 + len = vma->vm_end - addr;
162 + /* only read or write mappings where it is permitted */
163 + if (write && vma->vm_flags & VM_WRITE)
164 + len -= copy_to_user((void *) addr, buf, len);
165 + else if (!write && vma->vm_flags & VM_READ)
166 + len -= copy_from_user(buf, (void *) addr, len);
173 + up_read(&mm->mmap_sem);