Contents of /trunk/kernel26-magellan/patches-2.6.35-r2/0102-2.6.35.3-all-fixes.patch
Parent Directory | Revision Log
Revision 1122 -
(show annotations)
(download)
Fri Sep 10 13:45:01 2010 UTC (14 years ago) by niro
File size: 2656 byte(s)
Fri Sep 10 13:45:01 2010 UTC (14 years ago) by niro
File size: 2656 byte(s)
-2.6.35-magellan-r2: updated to linux-2.6.35.4
1 | diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c |
2 | index 227b044..ce9c6c2 100644 |
3 | --- a/arch/x86/kernel/cpu/vmware.c |
4 | +++ b/arch/x86/kernel/cpu/vmware.c |
5 | @@ -23,6 +23,7 @@ |
6 | |
7 | #include <linux/dmi.h> |
8 | #include <linux/module.h> |
9 | +#include <linux/jiffies.h> |
10 | #include <asm/div64.h> |
11 | #include <asm/x86_init.h> |
12 | #include <asm/hypervisor.h> |
13 | diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c |
14 | index aea1d3f..439fc1f 100644 |
15 | --- a/fs/proc/task_mmu.c |
16 | +++ b/fs/proc/task_mmu.c |
17 | @@ -210,6 +210,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) |
18 | int flags = vma->vm_flags; |
19 | unsigned long ino = 0; |
20 | unsigned long long pgoff = 0; |
21 | + unsigned long start; |
22 | dev_t dev = 0; |
23 | int len; |
24 | |
25 | @@ -220,8 +221,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) |
26 | pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; |
27 | } |
28 | |
29 | + /* We don't show the stack guard page in /proc/maps */ |
30 | + start = vma->vm_start; |
31 | + if (vma->vm_flags & VM_GROWSDOWN) |
32 | + start += PAGE_SIZE; |
33 | + |
34 | seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", |
35 | - vma->vm_start, |
36 | + start, |
37 | vma->vm_end, |
38 | flags & VM_READ ? 'r' : '-', |
39 | flags & VM_WRITE ? 'w' : '-', |
40 | diff --git a/mm/memory.c b/mm/memory.c |
41 | index aaaedbd..307bf77 100644 |
42 | --- a/mm/memory.c |
43 | +++ b/mm/memory.c |
44 | @@ -2792,24 +2792,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, |
45 | spinlock_t *ptl; |
46 | pte_t entry; |
47 | |
48 | - if (check_stack_guard_page(vma, address) < 0) { |
49 | - pte_unmap(page_table); |
50 | + pte_unmap(page_table); |
51 | + |
52 | + /* Check if we need to add a guard page to the stack */ |
53 | + if (check_stack_guard_page(vma, address) < 0) |
54 | return VM_FAULT_SIGBUS; |
55 | - } |
56 | |
57 | + /* Use the zero-page for reads */ |
58 | if (!(flags & FAULT_FLAG_WRITE)) { |
59 | entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), |
60 | vma->vm_page_prot)); |
61 | - ptl = pte_lockptr(mm, pmd); |
62 | - spin_lock(ptl); |
63 | + page_table = pte_offset_map_lock(mm, pmd, address, &ptl); |
64 | if (!pte_none(*page_table)) |
65 | goto unlock; |
66 | goto setpte; |
67 | } |
68 | |
69 | /* Allocate our own private page. */ |
70 | - pte_unmap(page_table); |
71 | - |
72 | if (unlikely(anon_vma_prepare(vma))) |
73 | goto oom; |
74 | page = alloc_zeroed_user_highpage_movable(vma, address); |
75 | diff --git a/mm/mlock.c b/mm/mlock.c |
76 | index 3f82720..49e5e4c 100644 |
77 | --- a/mm/mlock.c |
78 | +++ b/mm/mlock.c |
79 | @@ -167,6 +167,14 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, |
80 | if (vma->vm_flags & VM_WRITE) |
81 | gup_flags |= FOLL_WRITE; |
82 | |
83 | + /* We don't try to access the guard page of a stack vma */ |
84 | + if (vma->vm_flags & VM_GROWSDOWN) { |
85 | + if (start == vma->vm_start) { |
86 | + start += PAGE_SIZE; |
87 | + nr_pages--; |
88 | + } |
89 | + } |
90 | + |
91 | while (nr_pages > 0) { |
92 | int i; |
93 |