]> git.wh0rd.org - patches.git/blame - bfin-proc-mem.patch
scummvm random work
[patches.git] / bfin-proc-mem.patch
CommitLineData
5e993f12 1--- a/mm/memory.c
2+++ b/mm/memory.c
3@@ -2505,3 +2505,56 @@ #endif
4 }
5
6 #endif /* __HAVE_ARCH_GATE_AREA */
7+
8+/*
9+ * Access another process' address space.
10+ * Source/target buffer must be kernel space,
11+ * Do not walk the page table directly, use get_user_pages
12+ */
13+int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
14+{
15+ struct mm_struct *mm;
16+ struct vm_area_struct *vma;
17+ struct page *page;
18+ void *old_buf = buf;
19+
20+ mm = get_task_mm(tsk);
21+ if (!mm)
22+ return 0;
23+
24+ down_read(&mm->mmap_sem);
25+ /* ignore errors, just check how much was sucessfully transfered */
26+ while (len) {
27+ int bytes, ret, offset;
28+ void *maddr;
29+
30+ ret = get_user_pages(tsk, mm, addr, 1,
31+ write, 1, &page, &vma);
32+ if (ret <= 0)
33+ break;
34+
35+ bytes = len;
36+ offset = addr & (PAGE_SIZE-1);
37+ if (bytes > PAGE_SIZE-offset)
38+ bytes = PAGE_SIZE-offset;
39+
40+ maddr = kmap(page);
41+ if (write) {
42+ copy_to_user_page(vma, page, addr,
43+ maddr + offset, buf, bytes);
44+ set_page_dirty_lock(page);
45+ } else {
46+ copy_from_user_page(vma, page, addr,
47+ buf, maddr + offset, bytes);
48+ }
49+ kunmap(page);
50+ page_cache_release(page);
51+ len -= bytes;
52+ buf += bytes;
53+ addr += bytes;
54+ }
55+ up_read(&mm->mmap_sem);
56+ mmput(mm);
57+
58+ return buf - old_buf;
59+}
60--- a/mm/nommu.c
61+++ b/mm/nommu.c
62@@ -1206,3 +1206,51 @@ struct page *filemap_nopage(struct vm_ar
63 BUG();
64 return NULL;
65 }
66+
67+/*
68+ * Access another process' address space.
69+ * - source/target buffer must be kernel space
70+ */
71+int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
72+{
73+ struct vm_list_struct *vml;
74+ struct vm_area_struct *vma;
75+ struct mm_struct *mm;
76+printk("!!! OH YEAH\n");
77+ if (addr + len < addr)
78+ return 0;
79+
80+ mm = get_task_mm(tsk);
81+ if (!mm)
82+ return 0;
83+
84+ down_read(&mm->mmap_sem);
85+
86+ /* the access must start within one of the target process's mappings */
87+ for (vml = mm->context.vmlist; vml; vml = vml->next) {
88+ if (addr >= vml->vma->vm_start && addr < vml->vma->vm_end)
89+ break;
90+ }
91+
92+ if (vml) {
93+ vma = vml->vma;
94+
95+ /* don't overrun this mapping */
96+ if (addr + len >= vma->vm_end)
97+ len = vma->vm_end - addr;
98+
99+ /* only read or write mappings where it is permitted */
100+ if (write && vma->vm_flags & VM_WRITE)
101+ len -= copy_to_user((void *) addr, buf, len);
102+ else if (!write && vma->vm_flags & VM_READ)
103+ len -= copy_from_user(buf, (void *) addr, len);
104+ else
105+ len = 0;
106+ } else {
107+ len = 0;
108+ }
109+
110+ up_read(&mm->mmap_sem);
111+ mmput(mm);
112+ return len;
113+}