initial import
[patches.git] / nommu-map-list.patch
CommitLineData
5e993f12 1--- linux-2.6.x/mm/nommu.c
2+++ linux-2.6.x/mm/nommu.c
3@@ -421,6 +421,10 @@ static int validate_mmap_request(struct
4 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
5 return -EOVERFLOW;
6
7+ /* Too many mappings? */
8+ if (current->mm->map_count > sysctl_max_map_count)
9+ return -ENOMEM;
10+
11 if (file) {
12 /* validate file mapping requests */
13 struct address_space *mapping;
14@@ -853,6 +857,7 @@ unsigned long do_mmap_pgoff(struct file
15
16 vml->next = current->mm->context.vmlist;
17 current->mm->context.vmlist = vml;
18+ current->mm->map_count++;
19
20 up_write(&nommu_vma_sem);
21
22@@ -961,6 +966,7 @@ int do_munmap(struct mm_struct *mm, unsi
23
24 update_hiwater_vm(mm);
25 mm->total_vm -= len >> PAGE_SHIFT;
26+ mm->map_count--;
27
28 #ifdef DEBUG
29 show_process_blocks();
30--- linux-2.6.x/fs/proc/task_nommu.c
31+++ linux-2.6.x/fs/proc/task_nommu.c
32@@ -137,23 +137,129 @@ out:
33 return result;
34 }
35
36-/*
37- * Albert D. Cahalan suggested to fake entries for the traditional
38- * sections here. This might be worth investigating.
39- */
40+static void pad_len_spaces(struct seq_file *m, int len)
41+{
42+ len = 25 + sizeof(void*) * 6 - len;
43+ if (len < 1)
44+ len = 1;
45+ seq_printf(m, "%*c", len, ' ');
46+}
47+
48 static int show_map(struct seq_file *m, void *v)
49 {
50+ struct vm_list_struct *vml = v;
51+ struct vm_area_struct *vma = vml->vma;
52+ struct task_struct *task = m->private;
53+ struct mm_struct *mm = get_task_mm(task);
54+ struct file *file = vma->vm_file;
55+ int flags = vma->vm_flags;
56+ unsigned long ino = 0;
57+ dev_t dev = 0;
58+ int len;
59+
60+ if (file) {
61+ struct inode *inode = vma->vm_file->f_dentry->d_inode;
62+ dev = inode->i_sb->s_dev;
63+ ino = inode->i_ino;
64+ }
65+
66+ seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
67+ vma->vm_start,
68+ vma->vm_end,
69+ flags & VM_READ ? 'r' : '-',
70+ flags & VM_WRITE ? 'w' : '-',
71+ flags & VM_EXEC ? 'x' : '-',
72+ flags & VM_MAYSHARE ? 's' : 'p',
73+ vma->vm_pgoff << PAGE_SHIFT,
74+ MAJOR(dev), MINOR(dev), ino, &len);
75+
76+ /*
77+ * Print the dentry name for named mappings, and a
78+ * special [heap] marker for the heap:
79+ */
80+ if (file) {
81+ pad_len_spaces(m, len);
82+ seq_path(m, file->f_vfsmnt, file->f_dentry, "\n");
83+ } else {
84+ if (mm) {
85+ if (vma->vm_start <= mm->start_brk &&
86+ vma->vm_end >= mm->brk) {
87+ pad_len_spaces(m, len);
88+ seq_puts(m, "[heap]");
89+ } else {
90+ if (vma->vm_start <= mm->start_stack &&
91+ vma->vm_end >= mm->start_stack) {
92+
93+ pad_len_spaces(m, len);
94+ seq_puts(m, "[stack]");
95+ }
96+ }
97+ } else {
98+ pad_len_spaces(m, len);
99+ seq_puts(m, "[vdso]");
100+ }
101+ }
102+ seq_putc(m, '\n');
103+
104 return 0;
105 }
106 static void *m_start(struct seq_file *m, loff_t *pos)
107 {
108+ struct task_struct *task = m->private;
109+ struct mm_struct *mm;
110+ struct vm_list_struct *vml;
111+ loff_t l = *pos;
112+
113+ mm = get_task_mm(task);
114+ if (!mm)
115+ return NULL;
116+
117+ down_read(&mm->mmap_sem);
118+
119+ /*
120+ * Check the vml index is within the range and do
121+ * sequential scan until m_index.
122+ */
123+ vml = NULL;
124+ if ((unsigned long)l < mm->map_count) {
125+ vml = mm->context.vmlist;
126+ while (l-- && vml)
127+ vml = vml->next;
128+ }
129+
130+ if (vml)
131+ return vml;
132+
133+ /* End of vmls has been reached */
134+ up_read(&mm->mmap_sem);
135+ mmput(mm);
136+
137 return NULL;
138 }
139 static void m_stop(struct seq_file *m, void *v)
140 {
141+ struct task_struct *task = m->private;
142+ struct mm_struct *mm;
143+
144+ if (!v)
145+ return;
146+
147+ mm = get_task_mm(task);
148+ if (!mm)
149+ return;
150+
151+ up_read(&mm->mmap_sem);
152+ mmput(mm);
153 }
154 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
155 {
156+ struct vm_list_struct *vml = v;
157+
158+ (*pos)++;
159+ if (vml && vml->next)
160+ return vml->next;
161+
162+ m_stop(m, vml);
163 return NULL;
164 }
165 struct seq_operations proc_pid_maps_op = {