]> git.wh0rd.org - patches.git/blob - bfin-nommu-maps.patch
initial import
[patches.git] / bfin-nommu-maps.patch
1 Index: fs/proc/task_nommu.c
2 ===================================================================
3 RCS file: /usr/local/src/blackfin/rsync/uclinux533/uClinux-dist/linux-2.6.x/fs/proc/task_nommu.c,v
4 retrieving revision 1.4
5 diff -u -p -r1.4 task_nommu.c
6 --- fs/proc/task_nommu.c 12 Aug 2005 04:12:03 -0000 1.4
7 +++ fs/proc/task_nommu.c 25 Aug 2006 23:03:47 -0000
8 @@ -141,20 +141,227 @@ out:
9 * Albert D. Cahalan suggested to fake entries for the traditional
10 * sections here. This might be worth investigating.
11 */
12 +static void pad_len_spaces(struct seq_file *m, int len)
13 +{
14 + len = 25 + sizeof(void*) * 6 - len;
15 + if (len < 1)
16 + len = 1;
17 + seq_printf(m, "%*c", len, ' ');
18 +}
19 +
20 static int show_map(struct seq_file *m, void *v)
21 {
22 + struct task_struct *task = m->private;
23 + struct vm_area_struct *vma = v;
24 + struct mm_struct *mm = vma->vm_mm;
25 + struct file *file = vma->vm_file;
26 + int flags = vma->vm_flags;
27 + unsigned long ino = 0;
28 + dev_t dev = 0;
29 + int len;
30 +
31 +printk("!!! BOOYA %s\n", __FUNCTION__);
32 +
33 + if (file) {
34 + struct inode *inode = vma->vm_file->f_dentry->d_inode;
35 + dev = inode->i_sb->s_dev;
36 + ino = inode->i_ino;
37 + }
38 +
39 + seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
40 + vma->vm_start,
41 + vma->vm_end,
42 + flags & VM_READ ? 'r' : '-',
43 + flags & VM_WRITE ? 'w' : '-',
44 + flags & VM_EXEC ? 'x' : '-',
45 + flags & VM_MAYSHARE ? 's' : 'p',
46 + vma->vm_pgoff << PAGE_SHIFT,
47 + MAJOR(dev), MINOR(dev), ino, &len);
48 +
49 + /*
50 + * Print the dentry name for named mappings, and a
51 + * special [heap] marker for the heap:
52 + */
53 + if (file) {
54 + pad_len_spaces(m, len);
55 + seq_path(m, file->f_vfsmnt, file->f_dentry, "\n");
56 + } else {
57 + if (mm) {
58 + if (vma->vm_start <= mm->start_brk &&
59 + vma->vm_end >= mm->brk) {
60 + pad_len_spaces(m, len);
61 + seq_puts(m, "[heap]");
62 + } else {
63 + if (vma->vm_start <= mm->start_stack &&
64 + vma->vm_end >= mm->start_stack) {
65 +
66 + pad_len_spaces(m, len);
67 + seq_puts(m, "[stack]");
68 + }
69 + }
70 + } else {
71 + pad_len_spaces(m, len);
72 + seq_puts(m, "[vdso]");
73 + }
74 + }
75 + seq_putc(m, '\n');
76 +
77 +/*
78 + if (mss)
79 + seq_printf(m,
80 + "Size: %8lu kB\n"
81 + "Rss: %8lu kB\n"
82 + "Shared_Clean: %8lu kB\n"
83 + "Shared_Dirty: %8lu kB\n"
84 + "Private_Clean: %8lu kB\n"
85 + "Private_Dirty: %8lu kB\n",
86 + (vma->vm_end - vma->vm_start) >> 10,
87 + mss->resident >> 10,
88 + mss->shared_clean >> 10,
89 + mss->shared_dirty >> 10,
90 + mss->private_clean >> 10,
91 + mss->private_dirty >> 10);
92 +*/
93 +#define get_gate_vma(x) NULL
94 + if (m->count < m->size) /* vma is copied successfully */
95 + m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
96 return 0;
97 }
98 static void *m_start(struct seq_file *m, loff_t *pos)
99 {
100 - return NULL;
101 + struct task_struct *task = m->private;
102 + unsigned long last_addr = m->version;
103 + struct mm_struct *mm;
104 + struct vm_area_struct *vma, *tail_vma;
105 + struct vm_list_struct *vml;
106 + loff_t l = *pos;
107 +printk("!!! BOOYA %s\n", __FUNCTION__);
108 +printk("%s:%i: %p %lu \n", __FUNCTION__, __LINE__, task, last_addr);
109 + /*
110 + * We remember last_addr rather than next_addr to hit with
111 + * mmap_cache most of the time. We have zero last_addr at
112 + * the beginning and also after lseek. We will have -1 last_addr
113 + * after the end of the vmas.
114 + */
115 +
116 + if (last_addr == -1UL)
117 + return NULL;
118 +printk("%s:%i: \n", __FUNCTION__, __LINE__);
119 +
120 + mm = get_task_mm(task);
121 + if (!mm)
122 + return NULL;
123 +printk("%s:%i: mm=%p \n", __FUNCTION__, __LINE__, mm);
124 +
125 + for (vml = mm->context.vmlist; vml; vml = vml->next) {
126 + vma = vml->vma;
127 +char _tmpbuf[1024];
128 + char *p;
129 + unsigned long ino = 0;
130 + dev_t dev = 0;
131 +int flags = vma->vm_flags;
132 + struct file *file = vma->vm_file;
133 + if (file) {
134 + struct inode *inode = vma->vm_file->f_dentry->d_inode;
135 + dev = inode->i_sb->s_dev;
136 + ino = inode->i_ino;
137 + }
138 +// printk("%s:%i: %p: %lu -> %lu\n", __FUNCTION__, __LINE__, vml->vma,
139 +// vml->vma->vm_start, vml->vma->vm_end);
140 +printk("MAPS: %08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n ",
141 + vma->vm_start,
142 + vma->vm_end,
143 + flags & VM_READ ? 'r' : '-',
144 + flags & VM_WRITE ? 'w' : '-',
145 + flags & VM_EXEC ? 'x' : '-',
146 + flags & VM_MAYSHARE ? 's' : 'p',
147 + vma->vm_pgoff << PAGE_SHIFT,
148 + MAJOR(dev), MINOR(dev), ino);
149 +if (file) {
150 +p = d_path(file->f_dentry, file->f_vfsmnt, _tmpbuf, sizeof(_tmpbuf));
151 +printk(" %s ", p);
152 +
153 +} else {
154 + if (mm) {
155 + if (vma->vm_start <= mm->start_brk &&
156 + vma->vm_end >= mm->brk) {
157 + printk(" [heap]");
158 + } else {
159 + if (vma->vm_start <= mm->start_stack &&
160 + vma->vm_end >= mm->start_stack) {
161 +
162 + printk(" [stack]");
163 + }
164 + }
165 + }
166 +}
167 +printk("\n");
168 +
169 +// if (addr >= vml->vma->vm_start && addr < vml->vma->vm_end)
170 +// break;
171 + }
172 +
173 + tail_vma = get_gate_vma(task);
174 + down_read(&mm->mmap_sem);
175 +
176 + /* Start with last addr hint */
177 + if (last_addr && (vma = find_vma(mm, last_addr))) {
178 + vma = vma->vm_next;
179 + goto out;
180 + }
181 +printk("%s:%i: \n", __FUNCTION__, __LINE__);
182 +
183 + /*
184 + * Check the vma index is within the range and do
185 + * sequential scan until m_index.
186 + */
187 + vma = NULL;
188 + if ((unsigned long)l < mm->map_count) {
189 + vma = mm->mmap;
190 + while (l-- && vma)
191 + vma = vma->vm_next;
192 + goto out;
193 + }
194 +printk("%s:%i: \n", __FUNCTION__, __LINE__);
195 +
196 + if (l != mm->map_count)
197 + tail_vma = NULL; /* After gate vma */
198 +printk("%s:%i: \n", __FUNCTION__, __LINE__);
199 +
200 +out:
201 + if (vma)
202 + return vma;
203 +printk("%s:%i: %p\n", __FUNCTION__, __LINE__, tail_vma);
204 +
205 + /* End of vmas has been reached */
206 + m->version = (tail_vma != NULL)? 0: -1UL;
207 + up_read(&mm->mmap_sem);
208 + mmput(mm);
209 + return tail_vma;
210 }
211 static void m_stop(struct seq_file *m, void *v)
212 {
213 + struct task_struct *task = m->private;
214 + struct vm_area_struct *vma = v;
215 +printk("!!! BOOYA %s\n", __FUNCTION__);
216 + if (vma && vma != get_gate_vma(task)) {
217 + struct mm_struct *mm = vma->vm_mm;
218 + up_read(&mm->mmap_sem);
219 + mmput(mm);
220 + }
221 }
222 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
223 {
224 - return NULL;
225 + struct task_struct *task = m->private;
226 + struct vm_area_struct *vma = v;
227 + struct vm_area_struct *tail_vma = get_gate_vma(task);
228 +printk("!!! BOOYA %s\n", __FUNCTION__);
229 +
230 + (*pos)++;
231 + if (vma && (vma != tail_vma) && vma->vm_next)
232 + return vma->vm_next;
233 + m_stop(m, v);
234 + return (vma != tail_vma)? tail_vma: NULL;
235 }
236 struct seq_operations proc_pid_maps_op = {
237 .start = m_start,