Annotation of /trunk/kernel26-magellan/patches-2.6.29-r8/0102-2.6.29.3-all-fixes.patch
Parent Directory | Revision Log
Revision 1114 -
(hide annotations)
(download)
Sun Aug 22 17:59:15 2010 UTC (14 years, 1 month ago) by niro
File size: 89104 byte(s)
Sun Aug 22 17:59:15 2010 UTC (14 years, 1 month ago) by niro
File size: 89104 byte(s)
-added
1 | niro | 1114 | diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h |
2 | index d346649..9eed29e 100644 | ||
3 | --- a/arch/powerpc/include/asm/processor.h | ||
4 | +++ b/arch/powerpc/include/asm/processor.h | ||
5 | @@ -313,6 +313,25 @@ static inline void prefetchw(const void *x) | ||
6 | #define HAVE_ARCH_PICK_MMAP_LAYOUT | ||
7 | #endif | ||
8 | |||
9 | +#ifdef CONFIG_PPC64 | ||
10 | +static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) | ||
11 | +{ | ||
12 | + unsigned long sp; | ||
13 | + | ||
14 | + if (is_32) | ||
15 | + sp = regs->gpr[1] & 0x0ffffffffUL; | ||
16 | + else | ||
17 | + sp = regs->gpr[1]; | ||
18 | + | ||
19 | + return sp; | ||
20 | +} | ||
21 | +#else | ||
22 | +static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) | ||
23 | +{ | ||
24 | + return regs->gpr[1]; | ||
25 | +} | ||
26 | +#endif | ||
27 | + | ||
28 | #endif /* __KERNEL__ */ | ||
29 | #endif /* __ASSEMBLY__ */ | ||
30 | #endif /* _ASM_POWERPC_PROCESSOR_H */ | ||
31 | diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c | ||
32 | index a54405e..00b5078 100644 | ||
33 | --- a/arch/powerpc/kernel/signal.c | ||
34 | +++ b/arch/powerpc/kernel/signal.c | ||
35 | @@ -26,12 +26,12 @@ int show_unhandled_signals = 0; | ||
36 | * Allocate space for the signal frame | ||
37 | */ | ||
38 | void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | ||
39 | - size_t frame_size) | ||
40 | + size_t frame_size, int is_32) | ||
41 | { | ||
42 | unsigned long oldsp, newsp; | ||
43 | |||
44 | /* Default to using normal stack */ | ||
45 | - oldsp = regs->gpr[1]; | ||
46 | + oldsp = get_clean_sp(regs, is_32); | ||
47 | |||
48 | /* Check for alt stack */ | ||
49 | if ((ka->sa.sa_flags & SA_ONSTACK) && | ||
50 | diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h | ||
51 | index b427bf8..95e1b14 100644 | ||
52 | --- a/arch/powerpc/kernel/signal.h | ||
53 | +++ b/arch/powerpc/kernel/signal.h | ||
54 | @@ -15,7 +15,7 @@ | ||
55 | extern void do_signal(struct pt_regs *regs, unsigned long thread_info_flags); | ||
56 | |||
57 | extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | ||
58 | - size_t frame_size); | ||
59 | + size_t frame_size, int is_32); | ||
60 | extern void restore_sigmask(sigset_t *set); | ||
61 | |||
62 | extern int handle_signal32(unsigned long sig, struct k_sigaction *ka, | ||
63 | diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c | ||
64 | index b13abf3..d670429 100644 | ||
65 | --- a/arch/powerpc/kernel/signal_32.c | ||
66 | +++ b/arch/powerpc/kernel/signal_32.c | ||
67 | @@ -836,7 +836,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, | ||
68 | |||
69 | /* Set up Signal Frame */ | ||
70 | /* Put a Real Time Context onto stack */ | ||
71 | - rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf)); | ||
72 | + rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1); | ||
73 | addr = rt_sf; | ||
74 | if (unlikely(rt_sf == NULL)) | ||
75 | goto badframe; | ||
76 | @@ -1182,7 +1182,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, | ||
77 | unsigned long newsp = 0; | ||
78 | |||
79 | /* Set up Signal Frame */ | ||
80 | - frame = get_sigframe(ka, regs, sizeof(*frame)); | ||
81 | + frame = get_sigframe(ka, regs, sizeof(*frame), 1); | ||
82 | if (unlikely(frame == NULL)) | ||
83 | goto badframe; | ||
84 | sc = (struct sigcontext __user *) &frame->sctx; | ||
85 | diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c | ||
86 | index e132891..2fe6fc6 100644 | ||
87 | --- a/arch/powerpc/kernel/signal_64.c | ||
88 | +++ b/arch/powerpc/kernel/signal_64.c | ||
89 | @@ -402,7 +402,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, | ||
90 | unsigned long newsp = 0; | ||
91 | long err = 0; | ||
92 | |||
93 | - frame = get_sigframe(ka, regs, sizeof(*frame)); | ||
94 | + frame = get_sigframe(ka, regs, sizeof(*frame), 0); | ||
95 | if (unlikely(frame == NULL)) | ||
96 | goto badframe; | ||
97 | |||
98 | diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c | ||
99 | index 2b54fe0..aa8bc45 100644 | ||
100 | --- a/arch/x86/kernel/xsave.c | ||
101 | +++ b/arch/x86/kernel/xsave.c | ||
102 | @@ -89,7 +89,7 @@ int save_i387_xstate(void __user *buf) | ||
103 | |||
104 | if (!used_math()) | ||
105 | return 0; | ||
106 | - clear_used_math(); /* trigger finit */ | ||
107 | + | ||
108 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | ||
109 | /* | ||
110 | * Start with clearing the user buffer. This will present a | ||
111 | @@ -114,6 +114,8 @@ int save_i387_xstate(void __user *buf) | ||
112 | return -1; | ||
113 | } | ||
114 | |||
115 | + clear_used_math(); /* trigger finit */ | ||
116 | + | ||
117 | if (task_thread_info(tsk)->status & TS_XSAVE) { | ||
118 | struct _fpstate __user *fx = buf; | ||
119 | struct _xstate __user *x = buf; | ||
120 | diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c | ||
121 | index 2d4477c..8005da2 100644 | ||
122 | --- a/arch/x86/kvm/mmu.c | ||
123 | +++ b/arch/x86/kvm/mmu.c | ||
124 | @@ -797,7 +797,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, | ||
125 | ASSERT(is_empty_shadow_page(sp->spt)); | ||
126 | bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); | ||
127 | sp->multimapped = 0; | ||
128 | - sp->global = 1; | ||
129 | + sp->global = 0; | ||
130 | sp->parent_pte = parent_pte; | ||
131 | --vcpu->kvm->arch.n_free_mmu_pages; | ||
132 | return sp; | ||
133 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c | ||
134 | index 758b7a1..425423e 100644 | ||
135 | --- a/arch/x86/kvm/x86.c | ||
136 | +++ b/arch/x86/kvm/x86.c | ||
137 | @@ -3962,6 +3962,11 @@ EXPORT_SYMBOL_GPL(kvm_put_guest_fpu); | ||
138 | |||
139 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | ||
140 | { | ||
141 | + if (vcpu->arch.time_page) { | ||
142 | + kvm_release_page_dirty(vcpu->arch.time_page); | ||
143 | + vcpu->arch.time_page = NULL; | ||
144 | + } | ||
145 | + | ||
146 | kvm_x86_ops->vcpu_free(vcpu); | ||
147 | } | ||
148 | |||
149 | diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c | ||
150 | index 6a518dd..4a68571 100644 | ||
151 | --- a/arch/x86/mm/kmmio.c | ||
152 | +++ b/arch/x86/mm/kmmio.c | ||
153 | @@ -87,7 +87,7 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr) | ||
154 | { | ||
155 | struct kmmio_probe *p; | ||
156 | list_for_each_entry_rcu(p, &kmmio_probes, list) { | ||
157 | - if (addr >= p->addr && addr <= (p->addr + p->len)) | ||
158 | + if (addr >= p->addr && addr < (p->addr + p->len)) | ||
159 | return p; | ||
160 | } | ||
161 | return NULL; | ||
162 | diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c | ||
163 | index 89bf924..9136946 100644 | ||
164 | --- a/arch/x86/pci/mmconfig-shared.c | ||
165 | +++ b/arch/x86/pci/mmconfig-shared.c | ||
166 | @@ -254,7 +254,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res, | ||
167 | if (!fixmem32) | ||
168 | return AE_OK; | ||
169 | if ((mcfg_res->start >= fixmem32->address) && | ||
170 | - (mcfg_res->end < (fixmem32->address + | ||
171 | + (mcfg_res->end <= (fixmem32->address + | ||
172 | fixmem32->address_length))) { | ||
173 | mcfg_res->flags = 1; | ||
174 | return AE_CTRL_TERMINATE; | ||
175 | @@ -271,7 +271,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res, | ||
176 | return AE_OK; | ||
177 | |||
178 | if ((mcfg_res->start >= address.minimum) && | ||
179 | - (mcfg_res->end < (address.minimum + address.address_length))) { | ||
180 | + (mcfg_res->end <= (address.minimum + address.address_length))) { | ||
181 | mcfg_res->flags = 1; | ||
182 | return AE_CTRL_TERMINATE; | ||
183 | } | ||
184 | @@ -318,7 +318,7 @@ static int __init is_mmconf_reserved(check_reserved_t is_reserved, | ||
185 | u64 old_size = size; | ||
186 | int valid = 0; | ||
187 | |||
188 | - while (!is_reserved(addr, addr + size - 1, E820_RESERVED)) { | ||
189 | + while (!is_reserved(addr, addr + size, E820_RESERVED)) { | ||
190 | size >>= 1; | ||
191 | if (size < (16UL<<20)) | ||
192 | break; | ||
193 | diff --git a/block/genhd.c b/block/genhd.c | ||
194 | index a9ec910..1a4916e 100644 | ||
195 | --- a/block/genhd.c | ||
196 | +++ b/block/genhd.c | ||
197 | @@ -98,7 +98,7 @@ void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk, | ||
198 | |||
199 | if (flags & DISK_PITER_REVERSE) | ||
200 | piter->idx = ptbl->len - 1; | ||
201 | - else if (flags & DISK_PITER_INCL_PART0) | ||
202 | + else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0)) | ||
203 | piter->idx = 0; | ||
204 | else | ||
205 | piter->idx = 1; | ||
206 | @@ -134,7 +134,8 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter) | ||
207 | /* determine iteration parameters */ | ||
208 | if (piter->flags & DISK_PITER_REVERSE) { | ||
209 | inc = -1; | ||
210 | - if (piter->flags & DISK_PITER_INCL_PART0) | ||
211 | + if (piter->flags & (DISK_PITER_INCL_PART0 | | ||
212 | + DISK_PITER_INCL_EMPTY_PART0)) | ||
213 | end = -1; | ||
214 | else | ||
215 | end = 0; | ||
216 | @@ -150,7 +151,10 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter) | ||
217 | part = rcu_dereference(ptbl->part[piter->idx]); | ||
218 | if (!part) | ||
219 | continue; | ||
220 | - if (!(piter->flags & DISK_PITER_INCL_EMPTY) && !part->nr_sects) | ||
221 | + if (!part->nr_sects && | ||
222 | + !(piter->flags & DISK_PITER_INCL_EMPTY) && | ||
223 | + !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && | ||
224 | + piter->idx == 0)) | ||
225 | continue; | ||
226 | |||
227 | get_device(part_to_dev(part)); | ||
228 | @@ -1011,7 +1015,7 @@ static int diskstats_show(struct seq_file *seqf, void *v) | ||
229 | "\n\n"); | ||
230 | */ | ||
231 | |||
232 | - disk_part_iter_init(&piter, gp, DISK_PITER_INCL_PART0); | ||
233 | + disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0); | ||
234 | while ((hd = disk_part_iter_next(&piter))) { | ||
235 | cpu = part_stat_lock(); | ||
236 | part_round_stats(cpu, hd); | ||
237 | diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c | ||
238 | index 61566b1..2b60413 100644 | ||
239 | --- a/drivers/acpi/acpica/rscreate.c | ||
240 | +++ b/drivers/acpi/acpica/rscreate.c | ||
241 | @@ -191,8 +191,6 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, | ||
242 | user_prt = ACPI_CAST_PTR(struct acpi_pci_routing_table, buffer); | ||
243 | |||
244 | for (index = 0; index < number_of_elements; index++) { | ||
245 | - int source_name_index = 2; | ||
246 | - int source_index_index = 3; | ||
247 | |||
248 | /* | ||
249 | * Point user_prt past this current structure | ||
250 | @@ -261,27 +259,6 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, | ||
251 | return_ACPI_STATUS(AE_BAD_DATA); | ||
252 | } | ||
253 | |||
254 | - /* | ||
255 | - * If BIOS erroneously reversed the _PRT source_name and source_index, | ||
256 | - * then reverse them back. | ||
257 | - */ | ||
258 | - if (ACPI_GET_OBJECT_TYPE(sub_object_list[3]) != | ||
259 | - ACPI_TYPE_INTEGER) { | ||
260 | - if (acpi_gbl_enable_interpreter_slack) { | ||
261 | - source_name_index = 3; | ||
262 | - source_index_index = 2; | ||
263 | - printk(KERN_WARNING | ||
264 | - "ACPI: Handling Garbled _PRT entry\n"); | ||
265 | - } else { | ||
266 | - ACPI_ERROR((AE_INFO, | ||
267 | - "(PRT[%X].source_index) Need Integer, found %s", | ||
268 | - index, | ||
269 | - acpi_ut_get_object_type_name | ||
270 | - (sub_object_list[3]))); | ||
271 | - return_ACPI_STATUS(AE_BAD_DATA); | ||
272 | - } | ||
273 | - } | ||
274 | - | ||
275 | user_prt->pin = (u32) obj_desc->integer.value; | ||
276 | |||
277 | /* | ||
278 | @@ -305,7 +282,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, | ||
279 | * 3) Third subobject: Dereference the PRT.source_name | ||
280 | * The name may be unresolved (slack mode), so allow a null object | ||
281 | */ | ||
282 | - obj_desc = sub_object_list[source_name_index]; | ||
283 | + obj_desc = sub_object_list[2]; | ||
284 | if (obj_desc) { | ||
285 | switch (ACPI_GET_OBJECT_TYPE(obj_desc)) { | ||
286 | case ACPI_TYPE_LOCAL_REFERENCE: | ||
287 | @@ -379,7 +356,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object, | ||
288 | |||
289 | /* 4) Fourth subobject: Dereference the PRT.source_index */ | ||
290 | |||
291 | - obj_desc = sub_object_list[source_index_index]; | ||
292 | + obj_desc = sub_object_list[3]; | ||
293 | if (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER) { | ||
294 | ACPI_ERROR((AE_INFO, | ||
295 | "(PRT[%X].SourceIndex) Need Integer, found %s", | ||
296 | diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c | ||
297 | index d0e563e..86e83f8 100644 | ||
298 | --- a/drivers/char/hw_random/virtio-rng.c | ||
299 | +++ b/drivers/char/hw_random/virtio-rng.c | ||
300 | @@ -37,9 +37,9 @@ static void random_recv_done(struct virtqueue *vq) | ||
301 | { | ||
302 | int len; | ||
303 | |||
304 | - /* We never get spurious callbacks. */ | ||
305 | + /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ | ||
306 | if (!vq->vq_ops->get_buf(vq, &len)) | ||
307 | - BUG(); | ||
308 | + return; | ||
309 | |||
310 | data_left = len / sizeof(random_data[0]); | ||
311 | complete(&have_data); | ||
312 | diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c | ||
313 | index d9e751b..af9761c 100644 | ||
314 | --- a/drivers/crypto/ixp4xx_crypto.c | ||
315 | +++ b/drivers/crypto/ixp4xx_crypto.c | ||
316 | @@ -101,6 +101,7 @@ struct buffer_desc { | ||
317 | u32 phys_addr; | ||
318 | u32 __reserved[4]; | ||
319 | struct buffer_desc *next; | ||
320 | + enum dma_data_direction dir; | ||
321 | }; | ||
322 | |||
323 | struct crypt_ctl { | ||
324 | @@ -132,14 +133,10 @@ struct crypt_ctl { | ||
325 | struct ablk_ctx { | ||
326 | struct buffer_desc *src; | ||
327 | struct buffer_desc *dst; | ||
328 | - unsigned src_nents; | ||
329 | - unsigned dst_nents; | ||
330 | }; | ||
331 | |||
332 | struct aead_ctx { | ||
333 | struct buffer_desc *buffer; | ||
334 | - unsigned short assoc_nents; | ||
335 | - unsigned short src_nents; | ||
336 | struct scatterlist ivlist; | ||
337 | /* used when the hmac is not on one sg entry */ | ||
338 | u8 *hmac_virt; | ||
339 | @@ -312,7 +309,7 @@ static struct crypt_ctl *get_crypt_desc_emerg(void) | ||
340 | } | ||
341 | } | ||
342 | |||
343 | -static void free_buf_chain(struct buffer_desc *buf, u32 phys) | ||
344 | +static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys) | ||
345 | { | ||
346 | while (buf) { | ||
347 | struct buffer_desc *buf1; | ||
348 | @@ -320,6 +317,7 @@ static void free_buf_chain(struct buffer_desc *buf, u32 phys) | ||
349 | |||
350 | buf1 = buf->next; | ||
351 | phys1 = buf->phys_next; | ||
352 | + dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir); | ||
353 | dma_pool_free(buffer_pool, buf, phys); | ||
354 | buf = buf1; | ||
355 | phys = phys1; | ||
356 | @@ -348,7 +346,6 @@ static void one_packet(dma_addr_t phys) | ||
357 | struct crypt_ctl *crypt; | ||
358 | struct ixp_ctx *ctx; | ||
359 | int failed; | ||
360 | - enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; | ||
361 | |||
362 | failed = phys & 0x1 ? -EBADMSG : 0; | ||
363 | phys &= ~0x3; | ||
364 | @@ -358,13 +355,8 @@ static void one_packet(dma_addr_t phys) | ||
365 | case CTL_FLAG_PERFORM_AEAD: { | ||
366 | struct aead_request *req = crypt->data.aead_req; | ||
367 | struct aead_ctx *req_ctx = aead_request_ctx(req); | ||
368 | - dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, | ||
369 | - DMA_TO_DEVICE); | ||
370 | - dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL); | ||
371 | - dma_unmap_sg(dev, req->src, req_ctx->src_nents, | ||
372 | - DMA_BIDIRECTIONAL); | ||
373 | |||
374 | - free_buf_chain(req_ctx->buffer, crypt->src_buf); | ||
375 | + free_buf_chain(dev, req_ctx->buffer, crypt->src_buf); | ||
376 | if (req_ctx->hmac_virt) { | ||
377 | finish_scattered_hmac(crypt); | ||
378 | } | ||
379 | @@ -374,16 +366,11 @@ static void one_packet(dma_addr_t phys) | ||
380 | case CTL_FLAG_PERFORM_ABLK: { | ||
381 | struct ablkcipher_request *req = crypt->data.ablk_req; | ||
382 | struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
383 | - int nents; | ||
384 | + | ||
385 | if (req_ctx->dst) { | ||
386 | - nents = req_ctx->dst_nents; | ||
387 | - dma_unmap_sg(dev, req->dst, nents, DMA_FROM_DEVICE); | ||
388 | - free_buf_chain(req_ctx->dst, crypt->dst_buf); | ||
389 | - src_direction = DMA_TO_DEVICE; | ||
390 | + free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); | ||
391 | } | ||
392 | - nents = req_ctx->src_nents; | ||
393 | - dma_unmap_sg(dev, req->src, nents, src_direction); | ||
394 | - free_buf_chain(req_ctx->src, crypt->src_buf); | ||
395 | + free_buf_chain(dev, req_ctx->src, crypt->src_buf); | ||
396 | req->base.complete(&req->base, failed); | ||
397 | break; | ||
398 | } | ||
399 | @@ -750,56 +737,35 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt, | ||
400 | return 0; | ||
401 | } | ||
402 | |||
403 | -static int count_sg(struct scatterlist *sg, int nbytes) | ||
404 | +static struct buffer_desc *chainup_buffers(struct device *dev, | ||
405 | + struct scatterlist *sg, unsigned nbytes, | ||
406 | + struct buffer_desc *buf, gfp_t flags, | ||
407 | + enum dma_data_direction dir) | ||
408 | { | ||
409 | - int i; | ||
410 | - for (i = 0; nbytes > 0; i++, sg = sg_next(sg)) | ||
411 | - nbytes -= sg->length; | ||
412 | - return i; | ||
413 | -} | ||
414 | - | ||
415 | -static struct buffer_desc *chainup_buffers(struct scatterlist *sg, | ||
416 | - unsigned nbytes, struct buffer_desc *buf, gfp_t flags) | ||
417 | -{ | ||
418 | - int nents = 0; | ||
419 | - | ||
420 | - while (nbytes > 0) { | ||
421 | + for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) { | ||
422 | + unsigned len = min(nbytes, sg->length); | ||
423 | struct buffer_desc *next_buf; | ||
424 | u32 next_buf_phys; | ||
425 | - unsigned len = min(nbytes, sg_dma_len(sg)); | ||
426 | + void *ptr; | ||
427 | |||
428 | - nents++; | ||
429 | nbytes -= len; | ||
430 | - if (!buf->phys_addr) { | ||
431 | - buf->phys_addr = sg_dma_address(sg); | ||
432 | - buf->buf_len = len; | ||
433 | - buf->next = NULL; | ||
434 | - buf->phys_next = 0; | ||
435 | - goto next; | ||
436 | - } | ||
437 | - /* Two consecutive chunks on one page may be handled by the old | ||
438 | - * buffer descriptor, increased by the length of the new one | ||
439 | - */ | ||
440 | - if (sg_dma_address(sg) == buf->phys_addr + buf->buf_len) { | ||
441 | - buf->buf_len += len; | ||
442 | - goto next; | ||
443 | - } | ||
444 | + ptr = page_address(sg_page(sg)) + sg->offset; | ||
445 | next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys); | ||
446 | - if (!next_buf) | ||
447 | - return NULL; | ||
448 | + if (!next_buf) { | ||
449 | + buf = NULL; | ||
450 | + break; | ||
451 | + } | ||
452 | + sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir); | ||
453 | buf->next = next_buf; | ||
454 | buf->phys_next = next_buf_phys; | ||
455 | - | ||
456 | buf = next_buf; | ||
457 | - buf->next = NULL; | ||
458 | - buf->phys_next = 0; | ||
459 | + | ||
460 | buf->phys_addr = sg_dma_address(sg); | ||
461 | buf->buf_len = len; | ||
462 | -next: | ||
463 | - if (nbytes > 0) { | ||
464 | - sg = sg_next(sg); | ||
465 | - } | ||
466 | + buf->dir = dir; | ||
467 | } | ||
468 | + buf->next = NULL; | ||
469 | + buf->phys_next = 0; | ||
470 | return buf; | ||
471 | } | ||
472 | |||
473 | @@ -860,12 +826,12 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt) | ||
474 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | ||
475 | struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm); | ||
476 | unsigned ivsize = crypto_ablkcipher_ivsize(tfm); | ||
477 | - int ret = -ENOMEM; | ||
478 | struct ix_sa_dir *dir; | ||
479 | struct crypt_ctl *crypt; | ||
480 | - unsigned int nbytes = req->nbytes, nents; | ||
481 | + unsigned int nbytes = req->nbytes; | ||
482 | enum dma_data_direction src_direction = DMA_BIDIRECTIONAL; | ||
483 | struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req); | ||
484 | + struct buffer_desc src_hook; | ||
485 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? | ||
486 | GFP_KERNEL : GFP_ATOMIC; | ||
487 | |||
488 | @@ -878,7 +844,7 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt) | ||
489 | |||
490 | crypt = get_crypt_desc(); | ||
491 | if (!crypt) | ||
492 | - return ret; | ||
493 | + return -ENOMEM; | ||
494 | |||
495 | crypt->data.ablk_req = req; | ||
496 | crypt->crypto_ctx = dir->npe_ctx_phys; | ||
497 | @@ -891,53 +857,41 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt) | ||
498 | BUG_ON(ivsize && !req->info); | ||
499 | memcpy(crypt->iv, req->info, ivsize); | ||
500 | if (req->src != req->dst) { | ||
501 | + struct buffer_desc dst_hook; | ||
502 | crypt->mode |= NPE_OP_NOT_IN_PLACE; | ||
503 | - nents = count_sg(req->dst, nbytes); | ||
504 | /* This was never tested by Intel | ||
505 | * for more than one dst buffer, I think. */ | ||
506 | - BUG_ON(nents != 1); | ||
507 | - req_ctx->dst_nents = nents; | ||
508 | - dma_map_sg(dev, req->dst, nents, DMA_FROM_DEVICE); | ||
509 | - req_ctx->dst = dma_pool_alloc(buffer_pool, flags,&crypt->dst_buf); | ||
510 | - if (!req_ctx->dst) | ||
511 | - goto unmap_sg_dest; | ||
512 | - req_ctx->dst->phys_addr = 0; | ||
513 | - if (!chainup_buffers(req->dst, nbytes, req_ctx->dst, flags)) | ||
514 | + BUG_ON(req->dst->length < nbytes); | ||
515 | + req_ctx->dst = NULL; | ||
516 | + if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, | ||
517 | + flags, DMA_FROM_DEVICE)) | ||
518 | goto free_buf_dest; | ||
519 | src_direction = DMA_TO_DEVICE; | ||
520 | + req_ctx->dst = dst_hook.next; | ||
521 | + crypt->dst_buf = dst_hook.phys_next; | ||
522 | } else { | ||
523 | req_ctx->dst = NULL; | ||
524 | - req_ctx->dst_nents = 0; | ||
525 | } | ||
526 | - nents = count_sg(req->src, nbytes); | ||
527 | - req_ctx->src_nents = nents; | ||
528 | - dma_map_sg(dev, req->src, nents, src_direction); | ||
529 | - | ||
530 | - req_ctx->src = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf); | ||
531 | - if (!req_ctx->src) | ||
532 | - goto unmap_sg_src; | ||
533 | - req_ctx->src->phys_addr = 0; | ||
534 | - if (!chainup_buffers(req->src, nbytes, req_ctx->src, flags)) | ||
535 | + req_ctx->src = NULL; | ||
536 | + if (!chainup_buffers(dev, req->src, nbytes, &src_hook, | ||
537 | + flags, src_direction)) | ||
538 | goto free_buf_src; | ||
539 | |||
540 | + req_ctx->src = src_hook.next; | ||
541 | + crypt->src_buf = src_hook.phys_next; | ||
542 | crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK; | ||
543 | qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); | ||
544 | BUG_ON(qmgr_stat_overflow(SEND_QID)); | ||
545 | return -EINPROGRESS; | ||
546 | |||
547 | free_buf_src: | ||
548 | - free_buf_chain(req_ctx->src, crypt->src_buf); | ||
549 | -unmap_sg_src: | ||
550 | - dma_unmap_sg(dev, req->src, req_ctx->src_nents, src_direction); | ||
551 | + free_buf_chain(dev, req_ctx->src, crypt->src_buf); | ||
552 | free_buf_dest: | ||
553 | if (req->src != req->dst) { | ||
554 | - free_buf_chain(req_ctx->dst, crypt->dst_buf); | ||
555 | -unmap_sg_dest: | ||
556 | - dma_unmap_sg(dev, req->src, req_ctx->dst_nents, | ||
557 | - DMA_FROM_DEVICE); | ||
558 | + free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); | ||
559 | } | ||
560 | crypt->ctl_flags = CTL_FLAG_UNUSED; | ||
561 | - return ret; | ||
562 | + return -ENOMEM; | ||
563 | } | ||
564 | |||
565 | static int ablk_encrypt(struct ablkcipher_request *req) | ||
566 | @@ -985,7 +939,7 @@ static int hmac_inconsistent(struct scatterlist *sg, unsigned start, | ||
567 | break; | ||
568 | |||
569 | offset += sg->length; | ||
570 | - sg = sg_next(sg); | ||
571 | + sg = scatterwalk_sg_next(sg); | ||
572 | } | ||
573 | return (start + nbytes > offset + sg->length); | ||
574 | } | ||
575 | @@ -997,11 +951,10 @@ static int aead_perform(struct aead_request *req, int encrypt, | ||
576 | struct ixp_ctx *ctx = crypto_aead_ctx(tfm); | ||
577 | unsigned ivsize = crypto_aead_ivsize(tfm); | ||
578 | unsigned authsize = crypto_aead_authsize(tfm); | ||
579 | - int ret = -ENOMEM; | ||
580 | struct ix_sa_dir *dir; | ||
581 | struct crypt_ctl *crypt; | ||
582 | - unsigned int cryptlen, nents; | ||
583 | - struct buffer_desc *buf; | ||
584 | + unsigned int cryptlen; | ||
585 | + struct buffer_desc *buf, src_hook; | ||
586 | struct aead_ctx *req_ctx = aead_request_ctx(req); | ||
587 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? | ||
588 | GFP_KERNEL : GFP_ATOMIC; | ||
589 | @@ -1022,7 +975,7 @@ static int aead_perform(struct aead_request *req, int encrypt, | ||
590 | } | ||
591 | crypt = get_crypt_desc(); | ||
592 | if (!crypt) | ||
593 | - return ret; | ||
594 | + return -ENOMEM; | ||
595 | |||
596 | crypt->data.aead_req = req; | ||
597 | crypt->crypto_ctx = dir->npe_ctx_phys; | ||
598 | @@ -1041,31 +994,27 @@ static int aead_perform(struct aead_request *req, int encrypt, | ||
599 | BUG(); /* -ENOTSUP because of my lazyness */ | ||
600 | } | ||
601 | |||
602 | - req_ctx->buffer = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf); | ||
603 | - if (!req_ctx->buffer) | ||
604 | - goto out; | ||
605 | - req_ctx->buffer->phys_addr = 0; | ||
606 | /* ASSOC data */ | ||
607 | - nents = count_sg(req->assoc, req->assoclen); | ||
608 | - req_ctx->assoc_nents = nents; | ||
609 | - dma_map_sg(dev, req->assoc, nents, DMA_TO_DEVICE); | ||
610 | - buf = chainup_buffers(req->assoc, req->assoclen, req_ctx->buffer,flags); | ||
611 | + buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook, | ||
612 | + flags, DMA_TO_DEVICE); | ||
613 | + req_ctx->buffer = src_hook.next; | ||
614 | + crypt->src_buf = src_hook.phys_next; | ||
615 | if (!buf) | ||
616 | - goto unmap_sg_assoc; | ||
617 | + goto out; | ||
618 | /* IV */ | ||
619 | sg_init_table(&req_ctx->ivlist, 1); | ||
620 | sg_set_buf(&req_ctx->ivlist, iv, ivsize); | ||
621 | - dma_map_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL); | ||
622 | - buf = chainup_buffers(&req_ctx->ivlist, ivsize, buf, flags); | ||
623 | + buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags, | ||
624 | + DMA_BIDIRECTIONAL); | ||
625 | if (!buf) | ||
626 | - goto unmap_sg_iv; | ||
627 | + goto free_chain; | ||
628 | if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) { | ||
629 | /* The 12 hmac bytes are scattered, | ||
630 | * we need to copy them into a safe buffer */ | ||
631 | req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, | ||
632 | &crypt->icv_rev_aes); | ||
633 | if (unlikely(!req_ctx->hmac_virt)) | ||
634 | - goto unmap_sg_iv; | ||
635 | + goto free_chain; | ||
636 | if (!encrypt) { | ||
637 | scatterwalk_map_and_copy(req_ctx->hmac_virt, | ||
638 | req->src, cryptlen, authsize, 0); | ||
639 | @@ -1075,33 +1024,28 @@ static int aead_perform(struct aead_request *req, int encrypt, | ||
640 | req_ctx->hmac_virt = NULL; | ||
641 | } | ||
642 | /* Crypt */ | ||
643 | - nents = count_sg(req->src, cryptlen + authsize); | ||
644 | - req_ctx->src_nents = nents; | ||
645 | - dma_map_sg(dev, req->src, nents, DMA_BIDIRECTIONAL); | ||
646 | - buf = chainup_buffers(req->src, cryptlen + authsize, buf, flags); | ||
647 | + buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags, | ||
648 | + DMA_BIDIRECTIONAL); | ||
649 | if (!buf) | ||
650 | - goto unmap_sg_src; | ||
651 | + goto free_hmac_virt; | ||
652 | if (!req_ctx->hmac_virt) { | ||
653 | crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize; | ||
654 | } | ||
655 | + | ||
656 | crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD; | ||
657 | qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt)); | ||
658 | BUG_ON(qmgr_stat_overflow(SEND_QID)); | ||
659 | return -EINPROGRESS; | ||
660 | -unmap_sg_src: | ||
661 | - dma_unmap_sg(dev, req->src, req_ctx->src_nents, DMA_BIDIRECTIONAL); | ||
662 | +free_hmac_virt: | ||
663 | if (req_ctx->hmac_virt) { | ||
664 | dma_pool_free(buffer_pool, req_ctx->hmac_virt, | ||
665 | crypt->icv_rev_aes); | ||
666 | } | ||
667 | -unmap_sg_iv: | ||
668 | - dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL); | ||
669 | -unmap_sg_assoc: | ||
670 | - dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, DMA_TO_DEVICE); | ||
671 | - free_buf_chain(req_ctx->buffer, crypt->src_buf); | ||
672 | +free_chain: | ||
673 | + free_buf_chain(dev, req_ctx->buffer, crypt->src_buf); | ||
674 | out: | ||
675 | crypt->ctl_flags = CTL_FLAG_UNUSED; | ||
676 | - return ret; | ||
677 | + return -ENOMEM; | ||
678 | } | ||
679 | |||
680 | static int aead_setup(struct crypto_aead *tfm, unsigned int authsize) | ||
681 | diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h | ||
682 | index d6cc986..9239747 100644 | ||
683 | --- a/drivers/gpu/drm/i915/i915_drv.h | ||
684 | +++ b/drivers/gpu/drm/i915/i915_drv.h | ||
685 | @@ -773,7 +773,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | ||
686 | (dev)->pci_device == 0x2A42 || \ | ||
687 | (dev)->pci_device == 0x2E02 || \ | ||
688 | (dev)->pci_device == 0x2E12 || \ | ||
689 | - (dev)->pci_device == 0x2E22) | ||
690 | + (dev)->pci_device == 0x2E22 || \ | ||
691 | + (dev)->pci_device == 0x2E32) | ||
692 | |||
693 | #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) | ||
694 | |||
695 | @@ -782,6 +783,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | ||
696 | #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ | ||
697 | (dev)->pci_device == 0x2E12 || \ | ||
698 | (dev)->pci_device == 0x2E22 || \ | ||
699 | + (dev)->pci_device == 0x2E32 || \ | ||
700 | IS_GM45(dev)) | ||
701 | |||
702 | #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ | ||
703 | diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h | ||
704 | index cc2938d..a787fb8 100644 | ||
705 | --- a/drivers/gpu/drm/i915/i915_reg.h | ||
706 | +++ b/drivers/gpu/drm/i915/i915_reg.h | ||
707 | @@ -1431,6 +1431,7 @@ | ||
708 | #define DISPPLANE_NO_LINE_DOUBLE 0 | ||
709 | #define DISPPLANE_STEREO_POLARITY_FIRST 0 | ||
710 | #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) | ||
711 | +#define DISPPLANE_TILED (1<<10) | ||
712 | #define DSPAADDR 0x70184 | ||
713 | #define DSPASTRIDE 0x70188 | ||
714 | #define DSPAPOS 0x7018C /* reserved */ | ||
715 | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c | ||
716 | index 601a76f..254c5ca 100644 | ||
717 | --- a/drivers/gpu/drm/i915/intel_display.c | ||
718 | +++ b/drivers/gpu/drm/i915/intel_display.c | ||
719 | @@ -338,6 +338,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | ||
720 | int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR); | ||
721 | int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF); | ||
722 | int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; | ||
723 | + int dsptileoff = (pipe == 0 ? DSPATILEOFF : DSPBTILEOFF); | ||
724 | int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; | ||
725 | u32 dspcntr, alignment; | ||
726 | int ret; | ||
727 | @@ -414,6 +415,13 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | ||
728 | mutex_unlock(&dev->struct_mutex); | ||
729 | return -EINVAL; | ||
730 | } | ||
731 | + if (IS_I965G(dev)) { | ||
732 | + if (obj_priv->tiling_mode != I915_TILING_NONE) | ||
733 | + dspcntr |= DISPPLANE_TILED; | ||
734 | + else | ||
735 | + dspcntr &= ~DISPPLANE_TILED; | ||
736 | + } | ||
737 | + | ||
738 | I915_WRITE(dspcntr_reg, dspcntr); | ||
739 | |||
740 | Start = obj_priv->gtt_offset; | ||
741 | @@ -426,6 +434,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | ||
742 | I915_READ(dspbase); | ||
743 | I915_WRITE(dspsurf, Start); | ||
744 | I915_READ(dspsurf); | ||
745 | + I915_WRITE(dsptileoff, (y << 16) | x); | ||
746 | } else { | ||
747 | I915_WRITE(dspbase, Start + Offset); | ||
748 | I915_READ(dspbase); | ||
749 | diff --git a/drivers/ide/cs5536.c b/drivers/ide/cs5536.c | ||
750 | index 7a62db7..dc89bc2 100644 | ||
751 | --- a/drivers/ide/cs5536.c | ||
752 | +++ b/drivers/ide/cs5536.c | ||
753 | @@ -237,6 +237,7 @@ static const struct ide_dma_ops cs5536_dma_ops = { | ||
754 | .dma_test_irq = ide_dma_test_irq, | ||
755 | .dma_lost_irq = ide_dma_lost_irq, | ||
756 | .dma_timeout = ide_dma_timeout, | ||
757 | + .dma_sff_read_status = ide_dma_sff_read_status, | ||
758 | }; | ||
759 | |||
760 | static const struct ide_port_info cs5536_info = { | ||
761 | diff --git a/drivers/net/b44.c b/drivers/net/b44.c | ||
762 | index dc5f051..c2ffa8c 100644 | ||
763 | --- a/drivers/net/b44.c | ||
764 | +++ b/drivers/net/b44.c | ||
765 | @@ -750,7 +750,7 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | ||
766 | dest_idx * sizeof(dest_desc), | ||
767 | DMA_BIDIRECTIONAL); | ||
768 | |||
769 | - ssb_dma_sync_single_for_device(bp->sdev, le32_to_cpu(src_desc->addr), | ||
770 | + ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping, | ||
771 | RX_PKT_BUF_SZ, | ||
772 | DMA_FROM_DEVICE); | ||
773 | } | ||
774 | diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c | ||
775 | index b8251e8..df0794e 100644 | ||
776 | --- a/drivers/net/forcedeth.c | ||
777 | +++ b/drivers/net/forcedeth.c | ||
778 | @@ -5995,6 +5995,9 @@ static int nv_resume(struct pci_dev *pdev) | ||
779 | for (i = 0;i <= np->register_size/sizeof(u32); i++) | ||
780 | writel(np->saved_config_space[i], base+i*sizeof(u32)); | ||
781 | |||
782 | + /* restore phy state, including autoneg */ | ||
783 | + phy_init(dev); | ||
784 | + | ||
785 | netif_device_attach(dev); | ||
786 | if (netif_running(dev)) { | ||
787 | rc = nv_open(dev); | ||
788 | diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c | ||
789 | index b0bc3bc..67bb769 100644 | ||
790 | --- a/drivers/net/mv643xx_eth.c | ||
791 | +++ b/drivers/net/mv643xx_eth.c | ||
792 | @@ -372,12 +372,12 @@ struct mv643xx_eth_private { | ||
793 | struct work_struct tx_timeout_task; | ||
794 | |||
795 | struct napi_struct napi; | ||
796 | + u8 oom; | ||
797 | u8 work_link; | ||
798 | u8 work_tx; | ||
799 | u8 work_tx_end; | ||
800 | u8 work_rx; | ||
801 | u8 work_rx_refill; | ||
802 | - u8 work_rx_oom; | ||
803 | |||
804 | int skb_size; | ||
805 | struct sk_buff_head rx_recycle; | ||
806 | @@ -603,7 +603,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget) | ||
807 | dma_get_cache_alignment() - 1); | ||
808 | |||
809 | if (skb == NULL) { | ||
810 | - mp->work_rx_oom |= 1 << rxq->index; | ||
811 | + mp->oom = 1; | ||
812 | goto oom; | ||
813 | } | ||
814 | |||
815 | @@ -1177,7 +1177,6 @@ static void mib_counters_update(struct mv643xx_eth_private *mp) | ||
816 | |||
817 | spin_lock_bh(&mp->mib_counters_lock); | ||
818 | p->good_octets_received += mib_read(mp, 0x00); | ||
819 | - p->good_octets_received += (u64)mib_read(mp, 0x04) << 32; | ||
820 | p->bad_octets_received += mib_read(mp, 0x08); | ||
821 | p->internal_mac_transmit_err += mib_read(mp, 0x0c); | ||
822 | p->good_frames_received += mib_read(mp, 0x10); | ||
823 | @@ -1191,7 +1190,6 @@ static void mib_counters_update(struct mv643xx_eth_private *mp) | ||
824 | p->frames_512_to_1023_octets += mib_read(mp, 0x30); | ||
825 | p->frames_1024_to_max_octets += mib_read(mp, 0x34); | ||
826 | p->good_octets_sent += mib_read(mp, 0x38); | ||
827 | - p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32; | ||
828 | p->good_frames_sent += mib_read(mp, 0x40); | ||
829 | p->excessive_collision += mib_read(mp, 0x44); | ||
830 | p->multicast_frames_sent += mib_read(mp, 0x48); | ||
831 | @@ -1908,8 +1906,10 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) | ||
832 | |||
833 | mp = container_of(napi, struct mv643xx_eth_private, napi); | ||
834 | |||
835 | - mp->work_rx_refill |= mp->work_rx_oom; | ||
836 | - mp->work_rx_oom = 0; | ||
837 | + if (unlikely(mp->oom)) { | ||
838 | + mp->oom = 0; | ||
839 | + del_timer(&mp->rx_oom); | ||
840 | + } | ||
841 | |||
842 | work_done = 0; | ||
843 | while (work_done < budget) { | ||
844 | @@ -1923,8 +1923,10 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) | ||
845 | continue; | ||
846 | } | ||
847 | |||
848 | - queue_mask = mp->work_tx | mp->work_tx_end | | ||
849 | - mp->work_rx | mp->work_rx_refill; | ||
850 | + queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; | ||
851 | + if (likely(!mp->oom)) | ||
852 | + queue_mask |= mp->work_rx_refill; | ||
853 | + | ||
854 | if (!queue_mask) { | ||
855 | if (mv643xx_eth_collect_events(mp)) | ||
856 | continue; | ||
857 | @@ -1945,7 +1947,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) | ||
858 | txq_maybe_wake(mp->txq + queue); | ||
859 | } else if (mp->work_rx & queue_mask) { | ||
860 | work_done += rxq_process(mp->rxq + queue, work_tbd); | ||
861 | - } else if (mp->work_rx_refill & queue_mask) { | ||
862 | + } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { | ||
863 | work_done += rxq_refill(mp->rxq + queue, work_tbd); | ||
864 | } else { | ||
865 | BUG(); | ||
866 | @@ -1953,7 +1955,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) | ||
867 | } | ||
868 | |||
869 | if (work_done < budget) { | ||
870 | - if (mp->work_rx_oom) | ||
871 | + if (mp->oom) | ||
872 | mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); | ||
873 | napi_complete(napi); | ||
874 | wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT); | ||
875 | @@ -2145,7 +2147,7 @@ static int mv643xx_eth_open(struct net_device *dev) | ||
876 | rxq_refill(mp->rxq + i, INT_MAX); | ||
877 | } | ||
878 | |||
879 | - if (mp->work_rx_oom) { | ||
880 | + if (mp->oom) { | ||
881 | mp->rx_oom.expires = jiffies + (HZ / 10); | ||
882 | add_timer(&mp->rx_oom); | ||
883 | } | ||
884 | diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c | ||
885 | index ccaeb5c..9347a3c 100644 | ||
886 | --- a/drivers/net/wireless/ath5k/debug.c | ||
887 | +++ b/drivers/net/wireless/ath5k/debug.c | ||
888 | @@ -465,7 +465,7 @@ ath5k_debug_dump_bands(struct ath5k_softc *sc) | ||
889 | |||
890 | for (b = 0; b < IEEE80211_NUM_BANDS; b++) { | ||
891 | struct ieee80211_supported_band *band = &sc->sbands[b]; | ||
892 | - char bname[5]; | ||
893 | + char bname[6]; | ||
894 | switch (band->band) { | ||
895 | case IEEE80211_BAND_2GHZ: | ||
896 | strcpy(bname, "2 GHz"); | ||
897 | diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c | ||
898 | index 3c04044..1cc826b 100644 | ||
899 | --- a/drivers/net/wireless/ath9k/main.c | ||
900 | +++ b/drivers/net/wireless/ath9k/main.c | ||
901 | @@ -2300,11 +2300,6 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw, | ||
902 | rfilt = ath_calcrxfilter(sc); | ||
903 | ath9k_hw_setrxfilter(sc->sc_ah, rfilt); | ||
904 | |||
905 | - if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { | ||
906 | - if (*total_flags & FIF_BCN_PRBRESP_PROMISC) | ||
907 | - ath9k_hw_write_associd(sc->sc_ah, ath_bcast_mac, 0); | ||
908 | - } | ||
909 | - | ||
910 | DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", sc->rx.rxfilter); | ||
911 | } | ||
912 | |||
913 | diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c | ||
914 | index 6d65a02..dbae617 100644 | ||
915 | --- a/drivers/net/wireless/b43/dma.c | ||
916 | +++ b/drivers/net/wireless/b43/dma.c | ||
917 | @@ -551,11 +551,32 @@ address_error: | ||
918 | return 1; | ||
919 | } | ||
920 | |||
921 | +static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) | ||
922 | +{ | ||
923 | + unsigned char *f = skb->data + ring->frameoffset; | ||
924 | + | ||
925 | + return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF); | ||
926 | +} | ||
927 | + | ||
928 | +static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb) | ||
929 | +{ | ||
930 | + struct b43_rxhdr_fw4 *rxhdr; | ||
931 | + unsigned char *frame; | ||
932 | + | ||
933 | + /* This poisons the RX buffer to detect DMA failures. */ | ||
934 | + | ||
935 | + rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); | ||
936 | + rxhdr->frame_len = 0; | ||
937 | + | ||
938 | + B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2); | ||
939 | + frame = skb->data + ring->frameoffset; | ||
940 | + memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */); | ||
941 | +} | ||
942 | + | ||
943 | static int setup_rx_descbuffer(struct b43_dmaring *ring, | ||
944 | struct b43_dmadesc_generic *desc, | ||
945 | struct b43_dmadesc_meta *meta, gfp_t gfp_flags) | ||
946 | { | ||
947 | - struct b43_rxhdr_fw4 *rxhdr; | ||
948 | dma_addr_t dmaaddr; | ||
949 | struct sk_buff *skb; | ||
950 | |||
951 | @@ -564,6 +585,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring, | ||
952 | skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); | ||
953 | if (unlikely(!skb)) | ||
954 | return -ENOMEM; | ||
955 | + b43_poison_rx_buffer(ring, skb); | ||
956 | dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); | ||
957 | if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { | ||
958 | /* ugh. try to realloc in zone_dma */ | ||
959 | @@ -574,6 +596,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring, | ||
960 | skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); | ||
961 | if (unlikely(!skb)) | ||
962 | return -ENOMEM; | ||
963 | + b43_poison_rx_buffer(ring, skb); | ||
964 | dmaaddr = map_descbuffer(ring, skb->data, | ||
965 | ring->rx_buffersize, 0); | ||
966 | } | ||
967 | @@ -589,9 +612,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring, | ||
968 | ring->ops->fill_descriptor(ring, desc, dmaaddr, | ||
969 | ring->rx_buffersize, 0, 0, 0); | ||
970 | |||
971 | - rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); | ||
972 | - rxhdr->frame_len = 0; | ||
973 | - | ||
974 | return 0; | ||
975 | } | ||
976 | |||
977 | @@ -1476,12 +1496,17 @@ static void dma_rx(struct b43_dmaring *ring, int *slot) | ||
978 | len = le16_to_cpu(rxhdr->frame_len); | ||
979 | } while (len == 0 && i++ < 5); | ||
980 | if (unlikely(len == 0)) { | ||
981 | - /* recycle the descriptor buffer. */ | ||
982 | - sync_descbuffer_for_device(ring, meta->dmaaddr, | ||
983 | - ring->rx_buffersize); | ||
984 | - goto drop; | ||
985 | + dmaaddr = meta->dmaaddr; | ||
986 | + goto drop_recycle_buffer; | ||
987 | } | ||
988 | } | ||
989 | + if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) { | ||
990 | + /* Something went wrong with the DMA. | ||
991 | + * The device did not touch the buffer and did not overwrite the poison. */ | ||
992 | + b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n"); | ||
993 | + dmaaddr = meta->dmaaddr; | ||
994 | + goto drop_recycle_buffer; | ||
995 | + } | ||
996 | if (unlikely(len > ring->rx_buffersize)) { | ||
997 | /* The data did not fit into one descriptor buffer | ||
998 | * and is split over multiple buffers. | ||
999 | @@ -1494,6 +1519,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot) | ||
1000 | while (1) { | ||
1001 | desc = ops->idx2desc(ring, *slot, &meta); | ||
1002 | /* recycle the descriptor buffer. */ | ||
1003 | + b43_poison_rx_buffer(ring, meta->skb); | ||
1004 | sync_descbuffer_for_device(ring, meta->dmaaddr, | ||
1005 | ring->rx_buffersize); | ||
1006 | *slot = next_slot(ring, *slot); | ||
1007 | @@ -1512,8 +1538,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot) | ||
1008 | err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); | ||
1009 | if (unlikely(err)) { | ||
1010 | b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); | ||
1011 | - sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); | ||
1012 | - goto drop; | ||
1013 | + goto drop_recycle_buffer; | ||
1014 | } | ||
1015 | |||
1016 | unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); | ||
1017 | @@ -1523,6 +1548,11 @@ static void dma_rx(struct b43_dmaring *ring, int *slot) | ||
1018 | b43_rx(ring->dev, skb, rxhdr); | ||
1019 | drop: | ||
1020 | return; | ||
1021 | + | ||
1022 | +drop_recycle_buffer: | ||
1023 | + /* Poison and recycle the RX buffer. */ | ||
1024 | + b43_poison_rx_buffer(ring, skb); | ||
1025 | + sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); | ||
1026 | } | ||
1027 | |||
1028 | void b43_dma_rx(struct b43_dmaring *ring) | ||
1029 | diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c | ||
1030 | index ed93ac4..f6a9388 100644 | ||
1031 | --- a/drivers/net/wireless/rndis_wlan.c | ||
1032 | +++ b/drivers/net/wireless/rndis_wlan.c | ||
1033 | @@ -2550,6 +2550,11 @@ static int rndis_wext_bind(struct usbnet *usbdev, struct usb_interface *intf) | ||
1034 | mutex_init(&priv->command_lock); | ||
1035 | spin_lock_init(&priv->stats_lock); | ||
1036 | |||
1037 | + /* because rndis_command() sleeps we need to use workqueue */ | ||
1038 | + priv->workqueue = create_singlethread_workqueue("rndis_wlan"); | ||
1039 | + INIT_WORK(&priv->work, rndis_wext_worker); | ||
1040 | + INIT_DELAYED_WORK(&priv->stats_work, rndis_update_wireless_stats); | ||
1041 | + | ||
1042 | /* try bind rndis_host */ | ||
1043 | retval = generic_rndis_bind(usbdev, intf, FLAG_RNDIS_PHYM_WIRELESS); | ||
1044 | if (retval < 0) | ||
1045 | @@ -2594,16 +2599,17 @@ static int rndis_wext_bind(struct usbnet *usbdev, struct usb_interface *intf) | ||
1046 | disassociate(usbdev, 1); | ||
1047 | netif_carrier_off(usbdev->net); | ||
1048 | |||
1049 | - /* because rndis_command() sleeps we need to use workqueue */ | ||
1050 | - priv->workqueue = create_singlethread_workqueue("rndis_wlan"); | ||
1051 | - INIT_DELAYED_WORK(&priv->stats_work, rndis_update_wireless_stats); | ||
1052 | queue_delayed_work(priv->workqueue, &priv->stats_work, | ||
1053 | round_jiffies_relative(STATS_UPDATE_JIFFIES)); | ||
1054 | - INIT_WORK(&priv->work, rndis_wext_worker); | ||
1055 | |||
1056 | return 0; | ||
1057 | |||
1058 | fail: | ||
1059 | + cancel_delayed_work_sync(&priv->stats_work); | ||
1060 | + cancel_work_sync(&priv->work); | ||
1061 | + flush_workqueue(priv->workqueue); | ||
1062 | + destroy_workqueue(priv->workqueue); | ||
1063 | + | ||
1064 | kfree(priv); | ||
1065 | return retval; | ||
1066 | } | ||
1067 | diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c | ||
1068 | index 26c536b..8a01120 100644 | ||
1069 | --- a/drivers/pci/dmar.c | ||
1070 | +++ b/drivers/pci/dmar.c | ||
1071 | @@ -170,12 +170,21 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) | ||
1072 | struct dmar_drhd_unit *dmaru; | ||
1073 | int ret = 0; | ||
1074 | |||
1075 | + drhd = (struct acpi_dmar_hardware_unit *)header; | ||
1076 | + if (!drhd->address) { | ||
1077 | + /* Promote an attitude of violence to a BIOS engineer today */ | ||
1078 | + WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n" | ||
1079 | + "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
1080 | + dmi_get_system_info(DMI_BIOS_VENDOR), | ||
1081 | + dmi_get_system_info(DMI_BIOS_VERSION), | ||
1082 | + dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
1083 | + return -ENODEV; | ||
1084 | + } | ||
1085 | dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); | ||
1086 | if (!dmaru) | ||
1087 | return -ENOMEM; | ||
1088 | |||
1089 | dmaru->hdr = header; | ||
1090 | - drhd = (struct acpi_dmar_hardware_unit *)header; | ||
1091 | dmaru->reg_base_addr = drhd->address; | ||
1092 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ | ||
1093 | |||
1094 | diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c | ||
1095 | index f3f6865..7e4f9e6 100644 | ||
1096 | --- a/drivers/pci/intel-iommu.c | ||
1097 | +++ b/drivers/pci/intel-iommu.c | ||
1098 | @@ -447,11 +447,17 @@ static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn) | ||
1099 | if (drhd->ignored) | ||
1100 | continue; | ||
1101 | |||
1102 | - for (i = 0; i < drhd->devices_cnt; i++) | ||
1103 | + for (i = 0; i < drhd->devices_cnt; i++) { | ||
1104 | if (drhd->devices[i] && | ||
1105 | drhd->devices[i]->bus->number == bus && | ||
1106 | drhd->devices[i]->devfn == devfn) | ||
1107 | return drhd->iommu; | ||
1108 | + if (drhd->devices[i] && | ||
1109 | + drhd->devices[i]->subordinate && | ||
1110 | + drhd->devices[i]->subordinate->number <= bus && | ||
1111 | + drhd->devices[i]->subordinate->subordinate >= bus) | ||
1112 | + return drhd->iommu; | ||
1113 | + } | ||
1114 | |||
1115 | if (drhd->include_all) | ||
1116 | return drhd->iommu; | ||
1117 | diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c | ||
1118 | index 92b9efe..c65c2f4 100644 | ||
1119 | --- a/drivers/pci/quirks.c | ||
1120 | +++ b/drivers/pci/quirks.c | ||
1121 | @@ -1960,6 +1960,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_di | ||
1122 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi); | ||
1123 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi); | ||
1124 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi); | ||
1125 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi); | ||
1126 | |||
1127 | /* Disable MSI on chipsets that are known to not support it */ | ||
1128 | static void __devinit quirk_disable_msi(struct pci_dev *dev) | ||
1129 | diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c | ||
1130 | index d243320..99d32f7 100644 | ||
1131 | --- a/drivers/platform/x86/thinkpad_acpi.c | ||
1132 | +++ b/drivers/platform/x86/thinkpad_acpi.c | ||
1133 | @@ -306,11 +306,17 @@ static u32 dbg_level; | ||
1134 | |||
1135 | static struct workqueue_struct *tpacpi_wq; | ||
1136 | |||
1137 | +enum led_status_t { | ||
1138 | + TPACPI_LED_OFF = 0, | ||
1139 | + TPACPI_LED_ON, | ||
1140 | + TPACPI_LED_BLINK, | ||
1141 | +}; | ||
1142 | + | ||
1143 | /* Special LED class that can defer work */ | ||
1144 | struct tpacpi_led_classdev { | ||
1145 | struct led_classdev led_classdev; | ||
1146 | struct work_struct work; | ||
1147 | - enum led_brightness new_brightness; | ||
1148 | + enum led_status_t new_state; | ||
1149 | unsigned int led; | ||
1150 | }; | ||
1151 | |||
1152 | @@ -4057,7 +4063,7 @@ static void light_set_status_worker(struct work_struct *work) | ||
1153 | container_of(work, struct tpacpi_led_classdev, work); | ||
1154 | |||
1155 | if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING)) | ||
1156 | - light_set_status((data->new_brightness != LED_OFF)); | ||
1157 | + light_set_status((data->new_state != TPACPI_LED_OFF)); | ||
1158 | } | ||
1159 | |||
1160 | static void light_sysfs_set(struct led_classdev *led_cdev, | ||
1161 | @@ -4067,7 +4073,8 @@ static void light_sysfs_set(struct led_classdev *led_cdev, | ||
1162 | container_of(led_cdev, | ||
1163 | struct tpacpi_led_classdev, | ||
1164 | led_classdev); | ||
1165 | - data->new_brightness = brightness; | ||
1166 | + data->new_state = (brightness != LED_OFF) ? | ||
1167 | + TPACPI_LED_ON : TPACPI_LED_OFF; | ||
1168 | queue_work(tpacpi_wq, &data->work); | ||
1169 | } | ||
1170 | |||
1171 | @@ -4574,12 +4581,6 @@ enum { /* For TPACPI_LED_OLD */ | ||
1172 | TPACPI_LED_EC_HLMS = 0x0e, /* EC reg to select led to command */ | ||
1173 | }; | ||
1174 | |||
1175 | -enum led_status_t { | ||
1176 | - TPACPI_LED_OFF = 0, | ||
1177 | - TPACPI_LED_ON, | ||
1178 | - TPACPI_LED_BLINK, | ||
1179 | -}; | ||
1180 | - | ||
1181 | static enum led_access_mode led_supported; | ||
1182 | |||
1183 | TPACPI_HANDLE(led, ec, "SLED", /* 570 */ | ||
1184 | @@ -4673,23 +4674,13 @@ static int led_set_status(const unsigned int led, | ||
1185 | return rc; | ||
1186 | } | ||
1187 | |||
1188 | -static void led_sysfs_set_status(unsigned int led, | ||
1189 | - enum led_brightness brightness) | ||
1190 | -{ | ||
1191 | - led_set_status(led, | ||
1192 | - (brightness == LED_OFF) ? | ||
1193 | - TPACPI_LED_OFF : | ||
1194 | - (tpacpi_led_state_cache[led] == TPACPI_LED_BLINK) ? | ||
1195 | - TPACPI_LED_BLINK : TPACPI_LED_ON); | ||
1196 | -} | ||
1197 | - | ||
1198 | static void led_set_status_worker(struct work_struct *work) | ||
1199 | { | ||
1200 | struct tpacpi_led_classdev *data = | ||
1201 | container_of(work, struct tpacpi_led_classdev, work); | ||
1202 | |||
1203 | if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING)) | ||
1204 | - led_sysfs_set_status(data->led, data->new_brightness); | ||
1205 | + led_set_status(data->led, data->new_state); | ||
1206 | } | ||
1207 | |||
1208 | static void led_sysfs_set(struct led_classdev *led_cdev, | ||
1209 | @@ -4698,7 +4689,13 @@ static void led_sysfs_set(struct led_classdev *led_cdev, | ||
1210 | struct tpacpi_led_classdev *data = container_of(led_cdev, | ||
1211 | struct tpacpi_led_classdev, led_classdev); | ||
1212 | |||
1213 | - data->new_brightness = brightness; | ||
1214 | + if (brightness == LED_OFF) | ||
1215 | + data->new_state = TPACPI_LED_OFF; | ||
1216 | + else if (tpacpi_led_state_cache[data->led] != TPACPI_LED_BLINK) | ||
1217 | + data->new_state = TPACPI_LED_ON; | ||
1218 | + else | ||
1219 | + data->new_state = TPACPI_LED_BLINK; | ||
1220 | + | ||
1221 | queue_work(tpacpi_wq, &data->work); | ||
1222 | } | ||
1223 | |||
1224 | @@ -4716,7 +4713,7 @@ static int led_sysfs_blink_set(struct led_classdev *led_cdev, | ||
1225 | } else if ((*delay_on != 500) || (*delay_off != 500)) | ||
1226 | return -EINVAL; | ||
1227 | |||
1228 | - data->new_brightness = TPACPI_LED_BLINK; | ||
1229 | + data->new_state = TPACPI_LED_BLINK; | ||
1230 | queue_work(tpacpi_wq, &data->work); | ||
1231 | |||
1232 | return 0; | ||
1233 | diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c | ||
1234 | index cfcfd5a..4b36d88 100644 | ||
1235 | --- a/drivers/usb/serial/usb-serial.c | ||
1236 | +++ b/drivers/usb/serial/usb-serial.c | ||
1237 | @@ -136,22 +136,10 @@ static void destroy_serial(struct kref *kref) | ||
1238 | |||
1239 | dbg("%s - %s", __func__, serial->type->description); | ||
1240 | |||
1241 | - serial->type->shutdown(serial); | ||
1242 | - | ||
1243 | /* return the minor range that this device had */ | ||
1244 | if (serial->minor != SERIAL_TTY_NO_MINOR) | ||
1245 | return_serial(serial); | ||
1246 | |||
1247 | - for (i = 0; i < serial->num_ports; ++i) | ||
1248 | - serial->port[i]->port.count = 0; | ||
1249 | - | ||
1250 | - /* the ports are cleaned up and released in port_release() */ | ||
1251 | - for (i = 0; i < serial->num_ports; ++i) | ||
1252 | - if (serial->port[i]->dev.parent != NULL) { | ||
1253 | - device_unregister(&serial->port[i]->dev); | ||
1254 | - serial->port[i] = NULL; | ||
1255 | - } | ||
1256 | - | ||
1257 | /* If this is a "fake" port, we have to clean it up here, as it will | ||
1258 | * not get cleaned up in port_release() as it was never registered with | ||
1259 | * the driver core */ | ||
1260 | @@ -186,7 +174,7 @@ static int serial_open (struct tty_struct *tty, struct file *filp) | ||
1261 | struct usb_serial *serial; | ||
1262 | struct usb_serial_port *port; | ||
1263 | unsigned int portNumber; | ||
1264 | - int retval; | ||
1265 | + int retval = 0; | ||
1266 | |||
1267 | dbg("%s", __func__); | ||
1268 | |||
1269 | @@ -197,16 +185,24 @@ static int serial_open (struct tty_struct *tty, struct file *filp) | ||
1270 | return -ENODEV; | ||
1271 | } | ||
1272 | |||
1273 | + mutex_lock(&serial->disc_mutex); | ||
1274 | portNumber = tty->index - serial->minor; | ||
1275 | port = serial->port[portNumber]; | ||
1276 | - if (!port) { | ||
1277 | + if (!port || serial->disconnected) | ||
1278 | retval = -ENODEV; | ||
1279 | - goto bailout_kref_put; | ||
1280 | - } | ||
1281 | + else | ||
1282 | + get_device(&port->dev); | ||
1283 | + /* | ||
1284 | + * Note: Our locking order requirement does not allow port->mutex | ||
1285 | + * to be acquired while serial->disc_mutex is held. | ||
1286 | + */ | ||
1287 | + mutex_unlock(&serial->disc_mutex); | ||
1288 | + if (retval) | ||
1289 | + goto bailout_serial_put; | ||
1290 | |||
1291 | if (mutex_lock_interruptible(&port->mutex)) { | ||
1292 | retval = -ERESTARTSYS; | ||
1293 | - goto bailout_kref_put; | ||
1294 | + goto bailout_port_put; | ||
1295 | } | ||
1296 | |||
1297 | ++port->port.count; | ||
1298 | @@ -226,14 +222,20 @@ static int serial_open (struct tty_struct *tty, struct file *filp) | ||
1299 | goto bailout_mutex_unlock; | ||
1300 | } | ||
1301 | |||
1302 | - retval = usb_autopm_get_interface(serial->interface); | ||
1303 | + mutex_lock(&serial->disc_mutex); | ||
1304 | + if (serial->disconnected) | ||
1305 | + retval = -ENODEV; | ||
1306 | + else | ||
1307 | + retval = usb_autopm_get_interface(serial->interface); | ||
1308 | if (retval) | ||
1309 | goto bailout_module_put; | ||
1310 | + | ||
1311 | /* only call the device specific open if this | ||
1312 | * is the first time the port is opened */ | ||
1313 | retval = serial->type->open(tty, port, filp); | ||
1314 | if (retval) | ||
1315 | goto bailout_interface_put; | ||
1316 | + mutex_unlock(&serial->disc_mutex); | ||
1317 | } | ||
1318 | |||
1319 | mutex_unlock(&port->mutex); | ||
1320 | @@ -242,13 +244,16 @@ static int serial_open (struct tty_struct *tty, struct file *filp) | ||
1321 | bailout_interface_put: | ||
1322 | usb_autopm_put_interface(serial->interface); | ||
1323 | bailout_module_put: | ||
1324 | + mutex_unlock(&serial->disc_mutex); | ||
1325 | module_put(serial->type->driver.owner); | ||
1326 | bailout_mutex_unlock: | ||
1327 | port->port.count = 0; | ||
1328 | tty->driver_data = NULL; | ||
1329 | tty_port_tty_set(&port->port, NULL); | ||
1330 | mutex_unlock(&port->mutex); | ||
1331 | -bailout_kref_put: | ||
1332 | +bailout_port_put: | ||
1333 | + put_device(&port->dev); | ||
1334 | +bailout_serial_put: | ||
1335 | usb_serial_put(serial); | ||
1336 | return retval; | ||
1337 | } | ||
1338 | @@ -256,6 +261,9 @@ bailout_kref_put: | ||
1339 | static void serial_close(struct tty_struct *tty, struct file *filp) | ||
1340 | { | ||
1341 | struct usb_serial_port *port = tty->driver_data; | ||
1342 | + struct usb_serial *serial; | ||
1343 | + struct module *owner; | ||
1344 | + int count; | ||
1345 | |||
1346 | if (!port) | ||
1347 | return; | ||
1348 | @@ -263,6 +271,8 @@ static void serial_close(struct tty_struct *tty, struct file *filp) | ||
1349 | dbg("%s - port %d", __func__, port->number); | ||
1350 | |||
1351 | mutex_lock(&port->mutex); | ||
1352 | + serial = port->serial; | ||
1353 | + owner = serial->type->driver.owner; | ||
1354 | |||
1355 | if (port->port.count == 0) { | ||
1356 | mutex_unlock(&port->mutex); | ||
1357 | @@ -275,7 +285,7 @@ static void serial_close(struct tty_struct *tty, struct file *filp) | ||
1358 | * this before we drop the port count. The call is protected | ||
1359 | * by the port mutex | ||
1360 | */ | ||
1361 | - port->serial->type->close(tty, port, filp); | ||
1362 | + serial->type->close(tty, port, filp); | ||
1363 | |||
1364 | if (port->port.count == (port->console ? 2 : 1)) { | ||
1365 | struct tty_struct *tty = tty_port_tty_get(&port->port); | ||
1366 | @@ -289,17 +299,23 @@ static void serial_close(struct tty_struct *tty, struct file *filp) | ||
1367 | } | ||
1368 | } | ||
1369 | |||
1370 | - if (port->port.count == 1) { | ||
1371 | - mutex_lock(&port->serial->disc_mutex); | ||
1372 | - if (!port->serial->disconnected) | ||
1373 | - usb_autopm_put_interface(port->serial->interface); | ||
1374 | - mutex_unlock(&port->serial->disc_mutex); | ||
1375 | - module_put(port->serial->type->driver.owner); | ||
1376 | - } | ||
1377 | --port->port.count; | ||
1378 | - | ||
1379 | + count = port->port.count; | ||
1380 | mutex_unlock(&port->mutex); | ||
1381 | - usb_serial_put(port->serial); | ||
1382 | + put_device(&port->dev); | ||
1383 | + | ||
1384 | + /* Mustn't dereference port any more */ | ||
1385 | + if (count == 0) { | ||
1386 | + mutex_lock(&serial->disc_mutex); | ||
1387 | + if (!serial->disconnected) | ||
1388 | + usb_autopm_put_interface(serial->interface); | ||
1389 | + mutex_unlock(&serial->disc_mutex); | ||
1390 | + } | ||
1391 | + usb_serial_put(serial); | ||
1392 | + | ||
1393 | + /* Mustn't dereference serial any more */ | ||
1394 | + if (count == 0) | ||
1395 | + module_put(owner); | ||
1396 | } | ||
1397 | |||
1398 | static int serial_write(struct tty_struct *tty, const unsigned char *buf, | ||
1399 | @@ -548,7 +564,13 @@ static void kill_traffic(struct usb_serial_port *port) | ||
1400 | |||
1401 | static void port_free(struct usb_serial_port *port) | ||
1402 | { | ||
1403 | + /* | ||
1404 | + * Stop all the traffic before cancelling the work, so that | ||
1405 | + * nobody will restart it by calling usb_serial_port_softint. | ||
1406 | + */ | ||
1407 | kill_traffic(port); | ||
1408 | + cancel_work_sync(&port->work); | ||
1409 | + | ||
1410 | usb_free_urb(port->read_urb); | ||
1411 | usb_free_urb(port->write_urb); | ||
1412 | usb_free_urb(port->interrupt_in_urb); | ||
1413 | @@ -557,7 +579,6 @@ static void port_free(struct usb_serial_port *port) | ||
1414 | kfree(port->bulk_out_buffer); | ||
1415 | kfree(port->interrupt_in_buffer); | ||
1416 | kfree(port->interrupt_out_buffer); | ||
1417 | - flush_scheduled_work(); /* port->work */ | ||
1418 | kfree(port); | ||
1419 | } | ||
1420 | |||
1421 | @@ -1042,6 +1063,12 @@ void usb_serial_disconnect(struct usb_interface *interface) | ||
1422 | usb_set_intfdata(interface, NULL); | ||
1423 | /* must set a flag, to signal subdrivers */ | ||
1424 | serial->disconnected = 1; | ||
1425 | + mutex_unlock(&serial->disc_mutex); | ||
1426 | + | ||
1427 | + /* Unfortunately, many of the sub-drivers expect the port structures | ||
1428 | + * to exist when their shutdown method is called, so we have to go | ||
1429 | + * through this awkward two-step unregistration procedure. | ||
1430 | + */ | ||
1431 | for (i = 0; i < serial->num_ports; ++i) { | ||
1432 | port = serial->port[i]; | ||
1433 | if (port) { | ||
1434 | @@ -1051,11 +1078,21 @@ void usb_serial_disconnect(struct usb_interface *interface) | ||
1435 | tty_kref_put(tty); | ||
1436 | } | ||
1437 | kill_traffic(port); | ||
1438 | + cancel_work_sync(&port->work); | ||
1439 | + device_del(&port->dev); | ||
1440 | + } | ||
1441 | + } | ||
1442 | + serial->type->shutdown(serial); | ||
1443 | + for (i = 0; i < serial->num_ports; ++i) { | ||
1444 | + port = serial->port[i]; | ||
1445 | + if (port) { | ||
1446 | + put_device(&port->dev); | ||
1447 | + serial->port[i] = NULL; | ||
1448 | } | ||
1449 | } | ||
1450 | + | ||
1451 | /* let the last holder of this object | ||
1452 | * cause it to be cleaned up */ | ||
1453 | - mutex_unlock(&serial->disc_mutex); | ||
1454 | usb_serial_put(serial); | ||
1455 | dev_info(dev, "device disconnected\n"); | ||
1456 | } | ||
1457 | diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h | ||
1458 | index 0f54399..af39dec 100644 | ||
1459 | --- a/drivers/usb/storage/unusual_devs.h | ||
1460 | +++ b/drivers/usb/storage/unusual_devs.h | ||
1461 | @@ -2134,6 +2134,12 @@ UNUSUAL_DEV( 0xed06, 0x4500, 0x0001, 0x0001, | ||
1462 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
1463 | US_FL_CAPACITY_HEURISTICS), | ||
1464 | |||
1465 | +/* Reported by Alessio Treglia <quadrispro@ubuntu.com> */ | ||
1466 | +UNUSUAL_DEV( 0xed10, 0x7636, 0x0001, 0x0001, | ||
1467 | + "TGE", | ||
1468 | + "Digital MP3 Audio Player", | ||
1469 | + US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ), | ||
1470 | + | ||
1471 | /* Control/Bulk transport for all SubClass values */ | ||
1472 | USUAL_DEV(US_SC_RBC, US_PR_CB, USB_US_TYPE_STOR), | ||
1473 | USUAL_DEV(US_SC_8020, US_PR_CB, USB_US_TYPE_STOR), | ||
1474 | diff --git a/fs/Makefile b/fs/Makefile | ||
1475 | index dc20db3..0cd7097 100644 | ||
1476 | --- a/fs/Makefile | ||
1477 | +++ b/fs/Makefile | ||
1478 | @@ -11,7 +11,7 @@ obj-y := open.o read_write.o file_table.o super.o \ | ||
1479 | attr.o bad_inode.o file.o filesystems.o namespace.o \ | ||
1480 | seq_file.o xattr.o libfs.o fs-writeback.o \ | ||
1481 | pnode.o drop_caches.o splice.o sync.o utimes.o \ | ||
1482 | - stack.o | ||
1483 | + stack.o fs_struct.o | ||
1484 | |||
1485 | ifeq ($(CONFIG_BLOCK),y) | ||
1486 | obj-y += buffer.o bio.o block_dev.o direct-io.o mpage.o ioprio.o | ||
1487 | diff --git a/fs/bio.c b/fs/bio.c | ||
1488 | index d4f0632..bfdfe57 100644 | ||
1489 | --- a/fs/bio.c | ||
1490 | +++ b/fs/bio.c | ||
1491 | @@ -806,6 +806,9 @@ struct bio *bio_copy_user_iov(struct request_queue *q, | ||
1492 | len += iov[i].iov_len; | ||
1493 | } | ||
1494 | |||
1495 | + if (offset) | ||
1496 | + nr_pages++; | ||
1497 | + | ||
1498 | bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask); | ||
1499 | if (!bmd) | ||
1500 | return ERR_PTR(-ENOMEM); | ||
1501 | diff --git a/fs/compat.c b/fs/compat.c | ||
1502 | index d0145ca..1df8926 100644 | ||
1503 | --- a/fs/compat.c | ||
1504 | +++ b/fs/compat.c | ||
1505 | @@ -51,6 +51,7 @@ | ||
1506 | #include <linux/poll.h> | ||
1507 | #include <linux/mm.h> | ||
1508 | #include <linux/eventpoll.h> | ||
1509 | +#include <linux/fs_struct.h> | ||
1510 | |||
1511 | #include <asm/uaccess.h> | ||
1512 | #include <asm/mmu_context.h> | ||
1513 | @@ -1392,12 +1393,18 @@ int compat_do_execve(char * filename, | ||
1514 | { | ||
1515 | struct linux_binprm *bprm; | ||
1516 | struct file *file; | ||
1517 | + struct files_struct *displaced; | ||
1518 | + bool clear_in_exec; | ||
1519 | int retval; | ||
1520 | |||
1521 | + retval = unshare_files(&displaced); | ||
1522 | + if (retval) | ||
1523 | + goto out_ret; | ||
1524 | + | ||
1525 | retval = -ENOMEM; | ||
1526 | bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); | ||
1527 | if (!bprm) | ||
1528 | - goto out_ret; | ||
1529 | + goto out_files; | ||
1530 | |||
1531 | retval = mutex_lock_interruptible(¤t->cred_exec_mutex); | ||
1532 | if (retval < 0) | ||
1533 | @@ -1407,12 +1414,16 @@ int compat_do_execve(char * filename, | ||
1534 | bprm->cred = prepare_exec_creds(); | ||
1535 | if (!bprm->cred) | ||
1536 | goto out_unlock; | ||
1537 | - check_unsafe_exec(bprm, current->files); | ||
1538 | + | ||
1539 | + retval = check_unsafe_exec(bprm); | ||
1540 | + if (retval < 0) | ||
1541 | + goto out_unlock; | ||
1542 | + clear_in_exec = retval; | ||
1543 | |||
1544 | file = open_exec(filename); | ||
1545 | retval = PTR_ERR(file); | ||
1546 | if (IS_ERR(file)) | ||
1547 | - goto out_unlock; | ||
1548 | + goto out_unmark; | ||
1549 | |||
1550 | sched_exec(); | ||
1551 | |||
1552 | @@ -1454,9 +1465,12 @@ int compat_do_execve(char * filename, | ||
1553 | goto out; | ||
1554 | |||
1555 | /* execve succeeded */ | ||
1556 | + current->fs->in_exec = 0; | ||
1557 | mutex_unlock(¤t->cred_exec_mutex); | ||
1558 | acct_update_integrals(current); | ||
1559 | free_bprm(bprm); | ||
1560 | + if (displaced) | ||
1561 | + put_files_struct(displaced); | ||
1562 | return retval; | ||
1563 | |||
1564 | out: | ||
1565 | @@ -1469,12 +1483,19 @@ out_file: | ||
1566 | fput(bprm->file); | ||
1567 | } | ||
1568 | |||
1569 | +out_unmark: | ||
1570 | + if (clear_in_exec) | ||
1571 | + current->fs->in_exec = 0; | ||
1572 | + | ||
1573 | out_unlock: | ||
1574 | mutex_unlock(¤t->cred_exec_mutex); | ||
1575 | |||
1576 | out_free: | ||
1577 | free_bprm(bprm); | ||
1578 | |||
1579 | +out_files: | ||
1580 | + if (displaced) | ||
1581 | + reset_files_struct(displaced); | ||
1582 | out_ret: | ||
1583 | return retval; | ||
1584 | } | ||
1585 | diff --git a/fs/exec.c b/fs/exec.c | ||
1586 | index 929b580..3b36c69 100644 | ||
1587 | --- a/fs/exec.c | ||
1588 | +++ b/fs/exec.c | ||
1589 | @@ -1049,32 +1049,35 @@ EXPORT_SYMBOL(install_exec_creds); | ||
1590 | * - the caller must hold current->cred_exec_mutex to protect against | ||
1591 | * PTRACE_ATTACH | ||
1592 | */ | ||
1593 | -void check_unsafe_exec(struct linux_binprm *bprm, struct files_struct *files) | ||
1594 | +int check_unsafe_exec(struct linux_binprm *bprm) | ||
1595 | { | ||
1596 | struct task_struct *p = current, *t; | ||
1597 | - unsigned long flags; | ||
1598 | - unsigned n_fs, n_files, n_sighand; | ||
1599 | + unsigned n_fs; | ||
1600 | + int res = 0; | ||
1601 | |||
1602 | bprm->unsafe = tracehook_unsafe_exec(p); | ||
1603 | |||
1604 | n_fs = 1; | ||
1605 | - n_files = 1; | ||
1606 | - n_sighand = 1; | ||
1607 | - lock_task_sighand(p, &flags); | ||
1608 | + write_lock(&p->fs->lock); | ||
1609 | + rcu_read_lock(); | ||
1610 | for (t = next_thread(p); t != p; t = next_thread(t)) { | ||
1611 | if (t->fs == p->fs) | ||
1612 | n_fs++; | ||
1613 | - if (t->files == files) | ||
1614 | - n_files++; | ||
1615 | - n_sighand++; | ||
1616 | } | ||
1617 | + rcu_read_unlock(); | ||
1618 | |||
1619 | - if (atomic_read(&p->fs->count) > n_fs || | ||
1620 | - atomic_read(&p->files->count) > n_files || | ||
1621 | - atomic_read(&p->sighand->count) > n_sighand) | ||
1622 | + if (p->fs->users > n_fs) { | ||
1623 | bprm->unsafe |= LSM_UNSAFE_SHARE; | ||
1624 | + } else { | ||
1625 | + res = -EAGAIN; | ||
1626 | + if (!p->fs->in_exec) { | ||
1627 | + p->fs->in_exec = 1; | ||
1628 | + res = 1; | ||
1629 | + } | ||
1630 | + } | ||
1631 | + write_unlock(&p->fs->lock); | ||
1632 | |||
1633 | - unlock_task_sighand(p, &flags); | ||
1634 | + return res; | ||
1635 | } | ||
1636 | |||
1637 | /* | ||
1638 | @@ -1270,6 +1273,7 @@ int do_execve(char * filename, | ||
1639 | struct linux_binprm *bprm; | ||
1640 | struct file *file; | ||
1641 | struct files_struct *displaced; | ||
1642 | + bool clear_in_exec; | ||
1643 | int retval; | ||
1644 | |||
1645 | retval = unshare_files(&displaced); | ||
1646 | @@ -1289,12 +1293,16 @@ int do_execve(char * filename, | ||
1647 | bprm->cred = prepare_exec_creds(); | ||
1648 | if (!bprm->cred) | ||
1649 | goto out_unlock; | ||
1650 | - check_unsafe_exec(bprm, displaced); | ||
1651 | + | ||
1652 | + retval = check_unsafe_exec(bprm); | ||
1653 | + if (retval < 0) | ||
1654 | + goto out_unlock; | ||
1655 | + clear_in_exec = retval; | ||
1656 | |||
1657 | file = open_exec(filename); | ||
1658 | retval = PTR_ERR(file); | ||
1659 | if (IS_ERR(file)) | ||
1660 | - goto out_unlock; | ||
1661 | + goto out_unmark; | ||
1662 | |||
1663 | sched_exec(); | ||
1664 | |||
1665 | @@ -1337,6 +1345,7 @@ int do_execve(char * filename, | ||
1666 | goto out; | ||
1667 | |||
1668 | /* execve succeeded */ | ||
1669 | + current->fs->in_exec = 0; | ||
1670 | mutex_unlock(¤t->cred_exec_mutex); | ||
1671 | acct_update_integrals(current); | ||
1672 | free_bprm(bprm); | ||
1673 | @@ -1354,6 +1363,10 @@ out_file: | ||
1674 | fput(bprm->file); | ||
1675 | } | ||
1676 | |||
1677 | +out_unmark: | ||
1678 | + if (clear_in_exec) | ||
1679 | + current->fs->in_exec = 0; | ||
1680 | + | ||
1681 | out_unlock: | ||
1682 | mutex_unlock(¤t->cred_exec_mutex); | ||
1683 | |||
1684 | diff --git a/fs/fs_struct.c b/fs/fs_struct.c | ||
1685 | new file mode 100644 | ||
1686 | index 0000000..41cff72 | ||
1687 | --- /dev/null | ||
1688 | +++ b/fs/fs_struct.c | ||
1689 | @@ -0,0 +1,170 @@ | ||
1690 | +#include <linux/module.h> | ||
1691 | +#include <linux/sched.h> | ||
1692 | +#include <linux/fs.h> | ||
1693 | +#include <linux/path.h> | ||
1694 | +#include <linux/slab.h> | ||
1695 | + | ||
1696 | +/* | ||
1697 | + * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values. | ||
1698 | + * It can block. | ||
1699 | + */ | ||
1700 | +void set_fs_root(struct fs_struct *fs, struct path *path) | ||
1701 | +{ | ||
1702 | + struct path old_root; | ||
1703 | + | ||
1704 | + write_lock(&fs->lock); | ||
1705 | + old_root = fs->root; | ||
1706 | + fs->root = *path; | ||
1707 | + path_get(path); | ||
1708 | + write_unlock(&fs->lock); | ||
1709 | + if (old_root.dentry) | ||
1710 | + path_put(&old_root); | ||
1711 | +} | ||
1712 | + | ||
1713 | +/* | ||
1714 | + * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values. | ||
1715 | + * It can block. | ||
1716 | + */ | ||
1717 | +void set_fs_pwd(struct fs_struct *fs, struct path *path) | ||
1718 | +{ | ||
1719 | + struct path old_pwd; | ||
1720 | + | ||
1721 | + write_lock(&fs->lock); | ||
1722 | + old_pwd = fs->pwd; | ||
1723 | + fs->pwd = *path; | ||
1724 | + path_get(path); | ||
1725 | + write_unlock(&fs->lock); | ||
1726 | + | ||
1727 | + if (old_pwd.dentry) | ||
1728 | + path_put(&old_pwd); | ||
1729 | +} | ||
1730 | + | ||
1731 | +void chroot_fs_refs(struct path *old_root, struct path *new_root) | ||
1732 | +{ | ||
1733 | + struct task_struct *g, *p; | ||
1734 | + struct fs_struct *fs; | ||
1735 | + int count = 0; | ||
1736 | + | ||
1737 | + read_lock(&tasklist_lock); | ||
1738 | + do_each_thread(g, p) { | ||
1739 | + task_lock(p); | ||
1740 | + fs = p->fs; | ||
1741 | + if (fs) { | ||
1742 | + write_lock(&fs->lock); | ||
1743 | + if (fs->root.dentry == old_root->dentry | ||
1744 | + && fs->root.mnt == old_root->mnt) { | ||
1745 | + path_get(new_root); | ||
1746 | + fs->root = *new_root; | ||
1747 | + count++; | ||
1748 | + } | ||
1749 | + if (fs->pwd.dentry == old_root->dentry | ||
1750 | + && fs->pwd.mnt == old_root->mnt) { | ||
1751 | + path_get(new_root); | ||
1752 | + fs->pwd = *new_root; | ||
1753 | + count++; | ||
1754 | + } | ||
1755 | + write_unlock(&fs->lock); | ||
1756 | + } | ||
1757 | + task_unlock(p); | ||
1758 | + } while_each_thread(g, p); | ||
1759 | + read_unlock(&tasklist_lock); | ||
1760 | + while (count--) | ||
1761 | + path_put(old_root); | ||
1762 | +} | ||
1763 | + | ||
1764 | +void free_fs_struct(struct fs_struct *fs) | ||
1765 | +{ | ||
1766 | + path_put(&fs->root); | ||
1767 | + path_put(&fs->pwd); | ||
1768 | + kmem_cache_free(fs_cachep, fs); | ||
1769 | +} | ||
1770 | + | ||
1771 | +void exit_fs(struct task_struct *tsk) | ||
1772 | +{ | ||
1773 | + struct fs_struct *fs = tsk->fs; | ||
1774 | + | ||
1775 | + if (fs) { | ||
1776 | + int kill; | ||
1777 | + task_lock(tsk); | ||
1778 | + write_lock(&fs->lock); | ||
1779 | + tsk->fs = NULL; | ||
1780 | + kill = !--fs->users; | ||
1781 | + write_unlock(&fs->lock); | ||
1782 | + task_unlock(tsk); | ||
1783 | + if (kill) | ||
1784 | + free_fs_struct(fs); | ||
1785 | + } | ||
1786 | +} | ||
1787 | + | ||
1788 | +struct fs_struct *copy_fs_struct(struct fs_struct *old) | ||
1789 | +{ | ||
1790 | + struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); | ||
1791 | + /* We don't need to lock fs - think why ;-) */ | ||
1792 | + if (fs) { | ||
1793 | + fs->users = 1; | ||
1794 | + fs->in_exec = 0; | ||
1795 | + rwlock_init(&fs->lock); | ||
1796 | + fs->umask = old->umask; | ||
1797 | + read_lock(&old->lock); | ||
1798 | + fs->root = old->root; | ||
1799 | + path_get(&old->root); | ||
1800 | + fs->pwd = old->pwd; | ||
1801 | + path_get(&old->pwd); | ||
1802 | + read_unlock(&old->lock); | ||
1803 | + } | ||
1804 | + return fs; | ||
1805 | +} | ||
1806 | + | ||
1807 | +int unshare_fs_struct(void) | ||
1808 | +{ | ||
1809 | + struct fs_struct *fs = current->fs; | ||
1810 | + struct fs_struct *new_fs = copy_fs_struct(fs); | ||
1811 | + int kill; | ||
1812 | + | ||
1813 | + if (!new_fs) | ||
1814 | + return -ENOMEM; | ||
1815 | + | ||
1816 | + task_lock(current); | ||
1817 | + write_lock(&fs->lock); | ||
1818 | + kill = !--fs->users; | ||
1819 | + current->fs = new_fs; | ||
1820 | + write_unlock(&fs->lock); | ||
1821 | + task_unlock(current); | ||
1822 | + | ||
1823 | + if (kill) | ||
1824 | + free_fs_struct(fs); | ||
1825 | + | ||
1826 | + return 0; | ||
1827 | +} | ||
1828 | +EXPORT_SYMBOL_GPL(unshare_fs_struct); | ||
1829 | + | ||
1830 | +/* to be mentioned only in INIT_TASK */ | ||
1831 | +struct fs_struct init_fs = { | ||
1832 | + .users = 1, | ||
1833 | + .lock = __RW_LOCK_UNLOCKED(init_fs.lock), | ||
1834 | + .umask = 0022, | ||
1835 | +}; | ||
1836 | + | ||
1837 | +void daemonize_fs_struct(void) | ||
1838 | +{ | ||
1839 | + struct fs_struct *fs = current->fs; | ||
1840 | + | ||
1841 | + if (fs) { | ||
1842 | + int kill; | ||
1843 | + | ||
1844 | + task_lock(current); | ||
1845 | + | ||
1846 | + write_lock(&init_fs.lock); | ||
1847 | + init_fs.users++; | ||
1848 | + write_unlock(&init_fs.lock); | ||
1849 | + | ||
1850 | + write_lock(&fs->lock); | ||
1851 | + current->fs = &init_fs; | ||
1852 | + kill = !--fs->users; | ||
1853 | + write_unlock(&fs->lock); | ||
1854 | + | ||
1855 | + task_unlock(current); | ||
1856 | + if (kill) | ||
1857 | + free_fs_struct(fs); | ||
1858 | + } | ||
1859 | +} | ||
1860 | diff --git a/fs/internal.h b/fs/internal.h | ||
1861 | index 0d8ac49..b4dac4f 100644 | ||
1862 | --- a/fs/internal.h | ||
1863 | +++ b/fs/internal.h | ||
1864 | @@ -11,6 +11,7 @@ | ||
1865 | |||
1866 | struct super_block; | ||
1867 | struct linux_binprm; | ||
1868 | +struct path; | ||
1869 | |||
1870 | /* | ||
1871 | * block_dev.c | ||
1872 | @@ -43,7 +44,7 @@ extern void __init chrdev_init(void); | ||
1873 | /* | ||
1874 | * exec.c | ||
1875 | */ | ||
1876 | -extern void check_unsafe_exec(struct linux_binprm *, struct files_struct *); | ||
1877 | +extern int check_unsafe_exec(struct linux_binprm *); | ||
1878 | |||
1879 | /* | ||
1880 | * namespace.c | ||
1881 | @@ -60,3 +61,8 @@ extern void umount_tree(struct vfsmount *, int, struct list_head *); | ||
1882 | extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int); | ||
1883 | |||
1884 | extern void __init mnt_init(void); | ||
1885 | + | ||
1886 | +/* | ||
1887 | + * fs_struct.c | ||
1888 | + */ | ||
1889 | +extern void chroot_fs_refs(struct path *, struct path *); | ||
1890 | diff --git a/fs/namei.c b/fs/namei.c | ||
1891 | index bbc15c2..2389dda 100644 | ||
1892 | --- a/fs/namei.c | ||
1893 | +++ b/fs/namei.c | ||
1894 | @@ -2891,10 +2891,3 @@ EXPORT_SYMBOL(vfs_symlink); | ||
1895 | EXPORT_SYMBOL(vfs_unlink); | ||
1896 | EXPORT_SYMBOL(dentry_unhash); | ||
1897 | EXPORT_SYMBOL(generic_readlink); | ||
1898 | - | ||
1899 | -/* to be mentioned only in INIT_TASK */ | ||
1900 | -struct fs_struct init_fs = { | ||
1901 | - .count = ATOMIC_INIT(1), | ||
1902 | - .lock = __RW_LOCK_UNLOCKED(init_fs.lock), | ||
1903 | - .umask = 0022, | ||
1904 | -}; | ||
1905 | diff --git a/fs/namespace.c b/fs/namespace.c | ||
1906 | index 06f8e63..685e354 100644 | ||
1907 | --- a/fs/namespace.c | ||
1908 | +++ b/fs/namespace.c | ||
1909 | @@ -2089,66 +2089,6 @@ out1: | ||
1910 | } | ||
1911 | |||
1912 | /* | ||
1913 | - * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values. | ||
1914 | - * It can block. Requires the big lock held. | ||
1915 | - */ | ||
1916 | -void set_fs_root(struct fs_struct *fs, struct path *path) | ||
1917 | -{ | ||
1918 | - struct path old_root; | ||
1919 | - | ||
1920 | - write_lock(&fs->lock); | ||
1921 | - old_root = fs->root; | ||
1922 | - fs->root = *path; | ||
1923 | - path_get(path); | ||
1924 | - write_unlock(&fs->lock); | ||
1925 | - if (old_root.dentry) | ||
1926 | - path_put(&old_root); | ||
1927 | -} | ||
1928 | - | ||
1929 | -/* | ||
1930 | - * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values. | ||
1931 | - * It can block. Requires the big lock held. | ||
1932 | - */ | ||
1933 | -void set_fs_pwd(struct fs_struct *fs, struct path *path) | ||
1934 | -{ | ||
1935 | - struct path old_pwd; | ||
1936 | - | ||
1937 | - write_lock(&fs->lock); | ||
1938 | - old_pwd = fs->pwd; | ||
1939 | - fs->pwd = *path; | ||
1940 | - path_get(path); | ||
1941 | - write_unlock(&fs->lock); | ||
1942 | - | ||
1943 | - if (old_pwd.dentry) | ||
1944 | - path_put(&old_pwd); | ||
1945 | -} | ||
1946 | - | ||
1947 | -static void chroot_fs_refs(struct path *old_root, struct path *new_root) | ||
1948 | -{ | ||
1949 | - struct task_struct *g, *p; | ||
1950 | - struct fs_struct *fs; | ||
1951 | - | ||
1952 | - read_lock(&tasklist_lock); | ||
1953 | - do_each_thread(g, p) { | ||
1954 | - task_lock(p); | ||
1955 | - fs = p->fs; | ||
1956 | - if (fs) { | ||
1957 | - atomic_inc(&fs->count); | ||
1958 | - task_unlock(p); | ||
1959 | - if (fs->root.dentry == old_root->dentry | ||
1960 | - && fs->root.mnt == old_root->mnt) | ||
1961 | - set_fs_root(fs, new_root); | ||
1962 | - if (fs->pwd.dentry == old_root->dentry | ||
1963 | - && fs->pwd.mnt == old_root->mnt) | ||
1964 | - set_fs_pwd(fs, new_root); | ||
1965 | - put_fs_struct(fs); | ||
1966 | - } else | ||
1967 | - task_unlock(p); | ||
1968 | - } while_each_thread(g, p); | ||
1969 | - read_unlock(&tasklist_lock); | ||
1970 | -} | ||
1971 | - | ||
1972 | -/* | ||
1973 | * pivot_root Semantics: | ||
1974 | * Moves the root file system of the current process to the directory put_old, | ||
1975 | * makes new_root as the new root file system of the current process, and sets | ||
1976 | diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c | ||
1977 | index 07e4f5d..144d699 100644 | ||
1978 | --- a/fs/nfsd/nfssvc.c | ||
1979 | +++ b/fs/nfsd/nfssvc.c | ||
1980 | @@ -404,7 +404,6 @@ static int | ||
1981 | nfsd(void *vrqstp) | ||
1982 | { | ||
1983 | struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp; | ||
1984 | - struct fs_struct *fsp; | ||
1985 | int err, preverr = 0; | ||
1986 | |||
1987 | /* Lock module and set up kernel thread */ | ||
1988 | @@ -413,13 +412,11 @@ nfsd(void *vrqstp) | ||
1989 | /* At this point, the thread shares current->fs | ||
1990 | * with the init process. We need to create files with a | ||
1991 | * umask of 0 instead of init's umask. */ | ||
1992 | - fsp = copy_fs_struct(current->fs); | ||
1993 | - if (!fsp) { | ||
1994 | + if (unshare_fs_struct() < 0) { | ||
1995 | printk("Unable to start nfsd thread: out of memory\n"); | ||
1996 | goto out; | ||
1997 | } | ||
1998 | - exit_fs(current); | ||
1999 | - current->fs = fsp; | ||
2000 | + | ||
2001 | current->fs->umask = 0; | ||
2002 | |||
2003 | /* | ||
2004 | diff --git a/fs/proc/array.c b/fs/proc/array.c | ||
2005 | index 7e4877d..725a650 100644 | ||
2006 | --- a/fs/proc/array.c | ||
2007 | +++ b/fs/proc/array.c | ||
2008 | @@ -80,6 +80,7 @@ | ||
2009 | #include <linux/delayacct.h> | ||
2010 | #include <linux/seq_file.h> | ||
2011 | #include <linux/pid_namespace.h> | ||
2012 | +#include <linux/ptrace.h> | ||
2013 | #include <linux/tracehook.h> | ||
2014 | |||
2015 | #include <asm/pgtable.h> | ||
2016 | @@ -352,6 +353,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | ||
2017 | char state; | ||
2018 | pid_t ppid = 0, pgid = -1, sid = -1; | ||
2019 | int num_threads = 0; | ||
2020 | + int permitted; | ||
2021 | struct mm_struct *mm; | ||
2022 | unsigned long long start_time; | ||
2023 | unsigned long cmin_flt = 0, cmaj_flt = 0; | ||
2024 | @@ -364,11 +366,14 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | ||
2025 | |||
2026 | state = *get_task_state(task); | ||
2027 | vsize = eip = esp = 0; | ||
2028 | + permitted = ptrace_may_access(task, PTRACE_MODE_READ); | ||
2029 | mm = get_task_mm(task); | ||
2030 | if (mm) { | ||
2031 | vsize = task_vsize(mm); | ||
2032 | - eip = KSTK_EIP(task); | ||
2033 | - esp = KSTK_ESP(task); | ||
2034 | + if (permitted) { | ||
2035 | + eip = KSTK_EIP(task); | ||
2036 | + esp = KSTK_ESP(task); | ||
2037 | + } | ||
2038 | } | ||
2039 | |||
2040 | get_task_comm(tcomm, task); | ||
2041 | @@ -424,7 +429,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | ||
2042 | unlock_task_sighand(task, &flags); | ||
2043 | } | ||
2044 | |||
2045 | - if (!whole || num_threads < 2) | ||
2046 | + if (permitted && (!whole || num_threads < 2)) | ||
2047 | wchan = get_wchan(task); | ||
2048 | if (!whole) { | ||
2049 | min_flt = task->min_flt; | ||
2050 | @@ -476,7 +481,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | ||
2051 | rsslim, | ||
2052 | mm ? mm->start_code : 0, | ||
2053 | mm ? mm->end_code : 0, | ||
2054 | - mm ? mm->start_stack : 0, | ||
2055 | + (permitted && mm) ? mm->start_stack : 0, | ||
2056 | esp, | ||
2057 | eip, | ||
2058 | /* The signal information here is obsolete. | ||
2059 | diff --git a/fs/proc/base.c b/fs/proc/base.c | ||
2060 | index beaa0ce..74e83e7 100644 | ||
2061 | --- a/fs/proc/base.c | ||
2062 | +++ b/fs/proc/base.c | ||
2063 | @@ -146,15 +146,22 @@ static unsigned int pid_entry_count_dirs(const struct pid_entry *entries, | ||
2064 | return count; | ||
2065 | } | ||
2066 | |||
2067 | -static struct fs_struct *get_fs_struct(struct task_struct *task) | ||
2068 | +static int get_fs_path(struct task_struct *task, struct path *path, bool root) | ||
2069 | { | ||
2070 | struct fs_struct *fs; | ||
2071 | + int result = -ENOENT; | ||
2072 | + | ||
2073 | task_lock(task); | ||
2074 | fs = task->fs; | ||
2075 | - if(fs) | ||
2076 | - atomic_inc(&fs->count); | ||
2077 | + if (fs) { | ||
2078 | + read_lock(&fs->lock); | ||
2079 | + *path = root ? fs->root : fs->pwd; | ||
2080 | + path_get(path); | ||
2081 | + read_unlock(&fs->lock); | ||
2082 | + result = 0; | ||
2083 | + } | ||
2084 | task_unlock(task); | ||
2085 | - return fs; | ||
2086 | + return result; | ||
2087 | } | ||
2088 | |||
2089 | static int get_nr_threads(struct task_struct *tsk) | ||
2090 | @@ -172,42 +179,24 @@ static int get_nr_threads(struct task_struct *tsk) | ||
2091 | static int proc_cwd_link(struct inode *inode, struct path *path) | ||
2092 | { | ||
2093 | struct task_struct *task = get_proc_task(inode); | ||
2094 | - struct fs_struct *fs = NULL; | ||
2095 | int result = -ENOENT; | ||
2096 | |||
2097 | if (task) { | ||
2098 | - fs = get_fs_struct(task); | ||
2099 | + result = get_fs_path(task, path, 0); | ||
2100 | put_task_struct(task); | ||
2101 | } | ||
2102 | - if (fs) { | ||
2103 | - read_lock(&fs->lock); | ||
2104 | - *path = fs->pwd; | ||
2105 | - path_get(&fs->pwd); | ||
2106 | - read_unlock(&fs->lock); | ||
2107 | - result = 0; | ||
2108 | - put_fs_struct(fs); | ||
2109 | - } | ||
2110 | return result; | ||
2111 | } | ||
2112 | |||
2113 | static int proc_root_link(struct inode *inode, struct path *path) | ||
2114 | { | ||
2115 | struct task_struct *task = get_proc_task(inode); | ||
2116 | - struct fs_struct *fs = NULL; | ||
2117 | int result = -ENOENT; | ||
2118 | |||
2119 | if (task) { | ||
2120 | - fs = get_fs_struct(task); | ||
2121 | + result = get_fs_path(task, path, 1); | ||
2122 | put_task_struct(task); | ||
2123 | } | ||
2124 | - if (fs) { | ||
2125 | - read_lock(&fs->lock); | ||
2126 | - *path = fs->root; | ||
2127 | - path_get(&fs->root); | ||
2128 | - read_unlock(&fs->lock); | ||
2129 | - result = 0; | ||
2130 | - put_fs_struct(fs); | ||
2131 | - } | ||
2132 | return result; | ||
2133 | } | ||
2134 | |||
2135 | @@ -332,7 +321,10 @@ static int proc_pid_wchan(struct task_struct *task, char *buffer) | ||
2136 | wchan = get_wchan(task); | ||
2137 | |||
2138 | if (lookup_symbol_name(wchan, symname) < 0) | ||
2139 | - return sprintf(buffer, "%lu", wchan); | ||
2140 | + if (!ptrace_may_access(task, PTRACE_MODE_READ)) | ||
2141 | + return 0; | ||
2142 | + else | ||
2143 | + return sprintf(buffer, "%lu", wchan); | ||
2144 | else | ||
2145 | return sprintf(buffer, "%s", symname); | ||
2146 | } | ||
2147 | @@ -596,7 +588,6 @@ static int mounts_open_common(struct inode *inode, struct file *file, | ||
2148 | struct task_struct *task = get_proc_task(inode); | ||
2149 | struct nsproxy *nsp; | ||
2150 | struct mnt_namespace *ns = NULL; | ||
2151 | - struct fs_struct *fs = NULL; | ||
2152 | struct path root; | ||
2153 | struct proc_mounts *p; | ||
2154 | int ret = -EINVAL; | ||
2155 | @@ -610,22 +601,16 @@ static int mounts_open_common(struct inode *inode, struct file *file, | ||
2156 | get_mnt_ns(ns); | ||
2157 | } | ||
2158 | rcu_read_unlock(); | ||
2159 | - if (ns) | ||
2160 | - fs = get_fs_struct(task); | ||
2161 | + if (ns && get_fs_path(task, &root, 1) == 0) | ||
2162 | + ret = 0; | ||
2163 | put_task_struct(task); | ||
2164 | } | ||
2165 | |||
2166 | if (!ns) | ||
2167 | goto err; | ||
2168 | - if (!fs) | ||
2169 | + if (ret) | ||
2170 | goto err_put_ns; | ||
2171 | |||
2172 | - read_lock(&fs->lock); | ||
2173 | - root = fs->root; | ||
2174 | - path_get(&root); | ||
2175 | - read_unlock(&fs->lock); | ||
2176 | - put_fs_struct(fs); | ||
2177 | - | ||
2178 | ret = -ENOMEM; | ||
2179 | p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL); | ||
2180 | if (!p) | ||
2181 | diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c | ||
2182 | index 43d2394..52981cd 100644 | ||
2183 | --- a/fs/proc/meminfo.c | ||
2184 | +++ b/fs/proc/meminfo.c | ||
2185 | @@ -35,7 +35,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) | ||
2186 | #define K(x) ((x) << (PAGE_SHIFT - 10)) | ||
2187 | si_meminfo(&i); | ||
2188 | si_swapinfo(&i); | ||
2189 | - committed = atomic_long_read(&vm_committed_space); | ||
2190 | + committed = percpu_counter_read_positive(&vm_committed_as); | ||
2191 | allowed = ((totalram_pages - hugetlb_total_pages()) | ||
2192 | * sysctl_overcommit_ratio / 100) + total_swap_pages; | ||
2193 | |||
2194 | diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c | ||
2195 | index 9406384..c93ed2d 100644 | ||
2196 | --- a/fs/proc/task_mmu.c | ||
2197 | +++ b/fs/proc/task_mmu.c | ||
2198 | @@ -663,6 +663,10 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | ||
2199 | goto out_task; | ||
2200 | |||
2201 | ret = 0; | ||
2202 | + | ||
2203 | + if (!count) | ||
2204 | + goto out_task; | ||
2205 | + | ||
2206 | mm = get_task_mm(task); | ||
2207 | if (!mm) | ||
2208 | goto out_task; | ||
2209 | diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c | ||
2210 | index 343ea12..6ca0105 100644 | ||
2211 | --- a/fs/proc/task_nommu.c | ||
2212 | +++ b/fs/proc/task_nommu.c | ||
2213 | @@ -49,7 +49,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) | ||
2214 | else | ||
2215 | bytes += kobjsize(mm); | ||
2216 | |||
2217 | - if (current->fs && atomic_read(¤t->fs->count) > 1) | ||
2218 | + if (current->fs && current->fs->users > 1) | ||
2219 | sbytes += kobjsize(current->fs); | ||
2220 | else | ||
2221 | bytes += kobjsize(current->fs); | ||
2222 | diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h | ||
2223 | index 5165f24..671fab3 100644 | ||
2224 | --- a/include/drm/drm_pciids.h | ||
2225 | +++ b/include/drm/drm_pciids.h | ||
2226 | @@ -418,4 +418,5 @@ | ||
2227 | {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ | ||
2228 | {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ | ||
2229 | {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ | ||
2230 | + {0x8086, 0x2e32, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ | ||
2231 | {0, 0, 0} | ||
2232 | diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h | ||
2233 | index a97c053..78a05bf 100644 | ||
2234 | --- a/include/linux/fs_struct.h | ||
2235 | +++ b/include/linux/fs_struct.h | ||
2236 | @@ -4,9 +4,10 @@ | ||
2237 | #include <linux/path.h> | ||
2238 | |||
2239 | struct fs_struct { | ||
2240 | - atomic_t count; | ||
2241 | + int users; | ||
2242 | rwlock_t lock; | ||
2243 | int umask; | ||
2244 | + int in_exec; | ||
2245 | struct path root, pwd; | ||
2246 | }; | ||
2247 | |||
2248 | @@ -16,6 +17,8 @@ extern void exit_fs(struct task_struct *); | ||
2249 | extern void set_fs_root(struct fs_struct *, struct path *); | ||
2250 | extern void set_fs_pwd(struct fs_struct *, struct path *); | ||
2251 | extern struct fs_struct *copy_fs_struct(struct fs_struct *); | ||
2252 | -extern void put_fs_struct(struct fs_struct *); | ||
2253 | +extern void free_fs_struct(struct fs_struct *); | ||
2254 | +extern void daemonize_fs_struct(void); | ||
2255 | +extern int unshare_fs_struct(void); | ||
2256 | |||
2257 | #endif /* _LINUX_FS_STRUCT_H */ | ||
2258 | diff --git a/include/linux/genhd.h b/include/linux/genhd.h | ||
2259 | index 16948ea..102d9e9 100644 | ||
2260 | --- a/include/linux/genhd.h | ||
2261 | +++ b/include/linux/genhd.h | ||
2262 | @@ -214,6 +214,7 @@ static inline void disk_put_part(struct hd_struct *part) | ||
2263 | #define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */ | ||
2264 | #define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */ | ||
2265 | #define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */ | ||
2266 | +#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */ | ||
2267 | |||
2268 | struct disk_part_iter { | ||
2269 | struct gendisk *disk; | ||
2270 | diff --git a/include/linux/kvm.h b/include/linux/kvm.h | ||
2271 | index 0424326..c344599 100644 | ||
2272 | --- a/include/linux/kvm.h | ||
2273 | +++ b/include/linux/kvm.h | ||
2274 | @@ -396,6 +396,8 @@ struct kvm_trace_rec { | ||
2275 | #ifdef __KVM_HAVE_USER_NMI | ||
2276 | #define KVM_CAP_USER_NMI 22 | ||
2277 | #endif | ||
2278 | +/* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */ | ||
2279 | +#define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30 | ||
2280 | |||
2281 | /* | ||
2282 | * ioctls for VM fds | ||
2283 | diff --git a/include/linux/mman.h b/include/linux/mman.h | ||
2284 | index 30d1073..9872d6c 100644 | ||
2285 | --- a/include/linux/mman.h | ||
2286 | +++ b/include/linux/mman.h | ||
2287 | @@ -12,21 +12,18 @@ | ||
2288 | |||
2289 | #ifdef __KERNEL__ | ||
2290 | #include <linux/mm.h> | ||
2291 | +#include <linux/percpu_counter.h> | ||
2292 | |||
2293 | #include <asm/atomic.h> | ||
2294 | |||
2295 | extern int sysctl_overcommit_memory; | ||
2296 | extern int sysctl_overcommit_ratio; | ||
2297 | -extern atomic_long_t vm_committed_space; | ||
2298 | +extern struct percpu_counter vm_committed_as; | ||
2299 | |||
2300 | -#ifdef CONFIG_SMP | ||
2301 | -extern void vm_acct_memory(long pages); | ||
2302 | -#else | ||
2303 | static inline void vm_acct_memory(long pages) | ||
2304 | { | ||
2305 | - atomic_long_add(pages, &vm_committed_space); | ||
2306 | + percpu_counter_add(&vm_committed_as, pages); | ||
2307 | } | ||
2308 | -#endif | ||
2309 | |||
2310 | static inline void vm_unacct_memory(long pages) | ||
2311 | { | ||
2312 | diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h | ||
2313 | index 027815b..b647a4d 100644 | ||
2314 | --- a/include/linux/pci_regs.h | ||
2315 | +++ b/include/linux/pci_regs.h | ||
2316 | @@ -235,7 +235,7 @@ | ||
2317 | #define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */ | ||
2318 | #define PCI_PM_CTRL 4 /* PM control and status register */ | ||
2319 | #define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */ | ||
2320 | -#define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */ | ||
2321 | +#define PCI_PM_CTRL_NO_SOFT_RESET 0x0008 /* No reset for D3hot->D0 */ | ||
2322 | #define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */ | ||
2323 | #define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */ | ||
2324 | #define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */ | ||
2325 | diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c | ||
2326 | index 667c841..cb8e962 100644 | ||
2327 | --- a/kernel/exec_domain.c | ||
2328 | +++ b/kernel/exec_domain.c | ||
2329 | @@ -145,28 +145,6 @@ __set_personality(u_long personality) | ||
2330 | return 0; | ||
2331 | } | ||
2332 | |||
2333 | - if (atomic_read(¤t->fs->count) != 1) { | ||
2334 | - struct fs_struct *fsp, *ofsp; | ||
2335 | - | ||
2336 | - fsp = copy_fs_struct(current->fs); | ||
2337 | - if (fsp == NULL) { | ||
2338 | - module_put(ep->module); | ||
2339 | - return -ENOMEM; | ||
2340 | - } | ||
2341 | - | ||
2342 | - task_lock(current); | ||
2343 | - ofsp = current->fs; | ||
2344 | - current->fs = fsp; | ||
2345 | - task_unlock(current); | ||
2346 | - | ||
2347 | - put_fs_struct(ofsp); | ||
2348 | - } | ||
2349 | - | ||
2350 | - /* | ||
2351 | - * At that point we are guaranteed to be the sole owner of | ||
2352 | - * current->fs. | ||
2353 | - */ | ||
2354 | - | ||
2355 | current->personality = personality; | ||
2356 | oep = current_thread_info()->exec_domain; | ||
2357 | current_thread_info()->exec_domain = ep; | ||
2358 | diff --git a/kernel/exit.c b/kernel/exit.c | ||
2359 | index efd30cc..467ffcd 100644 | ||
2360 | --- a/kernel/exit.c | ||
2361 | +++ b/kernel/exit.c | ||
2362 | @@ -429,7 +429,6 @@ EXPORT_SYMBOL(disallow_signal); | ||
2363 | void daemonize(const char *name, ...) | ||
2364 | { | ||
2365 | va_list args; | ||
2366 | - struct fs_struct *fs; | ||
2367 | sigset_t blocked; | ||
2368 | |||
2369 | va_start(args, name); | ||
2370 | @@ -462,11 +461,7 @@ void daemonize(const char *name, ...) | ||
2371 | |||
2372 | /* Become as one with the init task */ | ||
2373 | |||
2374 | - exit_fs(current); /* current->fs->count--; */ | ||
2375 | - fs = init_task.fs; | ||
2376 | - current->fs = fs; | ||
2377 | - atomic_inc(&fs->count); | ||
2378 | - | ||
2379 | + daemonize_fs_struct(); | ||
2380 | exit_files(current); | ||
2381 | current->files = init_task.files; | ||
2382 | atomic_inc(¤t->files->count); | ||
2383 | @@ -565,30 +560,6 @@ void exit_files(struct task_struct *tsk) | ||
2384 | } | ||
2385 | } | ||
2386 | |||
2387 | -void put_fs_struct(struct fs_struct *fs) | ||
2388 | -{ | ||
2389 | - /* No need to hold fs->lock if we are killing it */ | ||
2390 | - if (atomic_dec_and_test(&fs->count)) { | ||
2391 | - path_put(&fs->root); | ||
2392 | - path_put(&fs->pwd); | ||
2393 | - kmem_cache_free(fs_cachep, fs); | ||
2394 | - } | ||
2395 | -} | ||
2396 | - | ||
2397 | -void exit_fs(struct task_struct *tsk) | ||
2398 | -{ | ||
2399 | - struct fs_struct * fs = tsk->fs; | ||
2400 | - | ||
2401 | - if (fs) { | ||
2402 | - task_lock(tsk); | ||
2403 | - tsk->fs = NULL; | ||
2404 | - task_unlock(tsk); | ||
2405 | - put_fs_struct(fs); | ||
2406 | - } | ||
2407 | -} | ||
2408 | - | ||
2409 | -EXPORT_SYMBOL_GPL(exit_fs); | ||
2410 | - | ||
2411 | #ifdef CONFIG_MM_OWNER | ||
2412 | /* | ||
2413 | * Task p is exiting and it owned mm, lets find a new owner for it | ||
2414 | @@ -950,8 +921,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead) | ||
2415 | */ | ||
2416 | if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) && | ||
2417 | (tsk->parent_exec_id != tsk->real_parent->self_exec_id || | ||
2418 | - tsk->self_exec_id != tsk->parent_exec_id) && | ||
2419 | - !capable(CAP_KILL)) | ||
2420 | + tsk->self_exec_id != tsk->parent_exec_id)) | ||
2421 | tsk->exit_signal = SIGCHLD; | ||
2422 | |||
2423 | signal = tracehook_notify_death(tsk, &cookie, group_dead); | ||
2424 | diff --git a/kernel/fork.c b/kernel/fork.c | ||
2425 | index 9b51a1b..8727a5a 100644 | ||
2426 | --- a/kernel/fork.c | ||
2427 | +++ b/kernel/fork.c | ||
2428 | @@ -676,38 +676,21 @@ fail_nomem: | ||
2429 | return retval; | ||
2430 | } | ||
2431 | |||
2432 | -static struct fs_struct *__copy_fs_struct(struct fs_struct *old) | ||
2433 | -{ | ||
2434 | - struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); | ||
2435 | - /* We don't need to lock fs - think why ;-) */ | ||
2436 | - if (fs) { | ||
2437 | - atomic_set(&fs->count, 1); | ||
2438 | - rwlock_init(&fs->lock); | ||
2439 | - fs->umask = old->umask; | ||
2440 | - read_lock(&old->lock); | ||
2441 | - fs->root = old->root; | ||
2442 | - path_get(&old->root); | ||
2443 | - fs->pwd = old->pwd; | ||
2444 | - path_get(&old->pwd); | ||
2445 | - read_unlock(&old->lock); | ||
2446 | - } | ||
2447 | - return fs; | ||
2448 | -} | ||
2449 | - | ||
2450 | -struct fs_struct *copy_fs_struct(struct fs_struct *old) | ||
2451 | -{ | ||
2452 | - return __copy_fs_struct(old); | ||
2453 | -} | ||
2454 | - | ||
2455 | -EXPORT_SYMBOL_GPL(copy_fs_struct); | ||
2456 | - | ||
2457 | static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) | ||
2458 | { | ||
2459 | + struct fs_struct *fs = current->fs; | ||
2460 | if (clone_flags & CLONE_FS) { | ||
2461 | - atomic_inc(¤t->fs->count); | ||
2462 | + /* tsk->fs is already what we want */ | ||
2463 | + write_lock(&fs->lock); | ||
2464 | + if (fs->in_exec) { | ||
2465 | + write_unlock(&fs->lock); | ||
2466 | + return -EAGAIN; | ||
2467 | + } | ||
2468 | + fs->users++; | ||
2469 | + write_unlock(&fs->lock); | ||
2470 | return 0; | ||
2471 | } | ||
2472 | - tsk->fs = __copy_fs_struct(current->fs); | ||
2473 | + tsk->fs = copy_fs_struct(fs); | ||
2474 | if (!tsk->fs) | ||
2475 | return -ENOMEM; | ||
2476 | return 0; | ||
2477 | @@ -1543,12 +1526,16 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) | ||
2478 | { | ||
2479 | struct fs_struct *fs = current->fs; | ||
2480 | |||
2481 | - if ((unshare_flags & CLONE_FS) && | ||
2482 | - (fs && atomic_read(&fs->count) > 1)) { | ||
2483 | - *new_fsp = __copy_fs_struct(current->fs); | ||
2484 | - if (!*new_fsp) | ||
2485 | - return -ENOMEM; | ||
2486 | - } | ||
2487 | + if (!(unshare_flags & CLONE_FS) || !fs) | ||
2488 | + return 0; | ||
2489 | + | ||
2490 | + /* don't need lock here; in the worst case we'll do useless copy */ | ||
2491 | + if (fs->users == 1) | ||
2492 | + return 0; | ||
2493 | + | ||
2494 | + *new_fsp = copy_fs_struct(fs); | ||
2495 | + if (!*new_fsp) | ||
2496 | + return -ENOMEM; | ||
2497 | |||
2498 | return 0; | ||
2499 | } | ||
2500 | @@ -1664,8 +1651,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) | ||
2501 | |||
2502 | if (new_fs) { | ||
2503 | fs = current->fs; | ||
2504 | + write_lock(&fs->lock); | ||
2505 | current->fs = new_fs; | ||
2506 | - new_fs = fs; | ||
2507 | + if (--fs->users) | ||
2508 | + new_fs = NULL; | ||
2509 | + else | ||
2510 | + new_fs = fs; | ||
2511 | + write_unlock(&fs->lock); | ||
2512 | } | ||
2513 | |||
2514 | if (new_mm) { | ||
2515 | @@ -1704,7 +1696,7 @@ bad_unshare_cleanup_sigh: | ||
2516 | |||
2517 | bad_unshare_cleanup_fs: | ||
2518 | if (new_fs) | ||
2519 | - put_fs_struct(new_fs); | ||
2520 | + free_fs_struct(new_fs); | ||
2521 | |||
2522 | bad_unshare_cleanup_thread: | ||
2523 | bad_unshare_out: | ||
2524 | diff --git a/kernel/ptrace.c b/kernel/ptrace.c | ||
2525 | index c9cf48b..dc3b98e 100644 | ||
2526 | --- a/kernel/ptrace.c | ||
2527 | +++ b/kernel/ptrace.c | ||
2528 | @@ -186,7 +186,7 @@ int ptrace_attach(struct task_struct *task) | ||
2529 | /* Protect exec's credential calculations against our interference; | ||
2530 | * SUID, SGID and LSM creds get determined differently under ptrace. | ||
2531 | */ | ||
2532 | - retval = mutex_lock_interruptible(¤t->cred_exec_mutex); | ||
2533 | + retval = mutex_lock_interruptible(&task->cred_exec_mutex); | ||
2534 | if (retval < 0) | ||
2535 | goto out; | ||
2536 | |||
2537 | @@ -230,7 +230,7 @@ repeat: | ||
2538 | bad: | ||
2539 | write_unlock_irqrestore(&tasklist_lock, flags); | ||
2540 | task_unlock(task); | ||
2541 | - mutex_unlock(¤t->cred_exec_mutex); | ||
2542 | + mutex_unlock(&task->cred_exec_mutex); | ||
2543 | out: | ||
2544 | return retval; | ||
2545 | } | ||
2546 | diff --git a/kernel/sched.c b/kernel/sched.c | ||
2547 | index 5e80629..7d13deb 100644 | ||
2548 | --- a/kernel/sched.c | ||
2549 | +++ b/kernel/sched.c | ||
2550 | @@ -4347,7 +4347,7 @@ void account_process_tick(struct task_struct *p, int user_tick) | ||
2551 | |||
2552 | if (user_tick) | ||
2553 | account_user_time(p, one_jiffy, one_jiffy_scaled); | ||
2554 | - else if (p != rq->idle) | ||
2555 | + else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) | ||
2556 | account_system_time(p, HARDIRQ_OFFSET, one_jiffy, | ||
2557 | one_jiffy_scaled); | ||
2558 | else | ||
2559 | diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c | ||
2560 | index 21a5ca8..83c4417 100644 | ||
2561 | --- a/kernel/time/tick-common.c | ||
2562 | +++ b/kernel/time/tick-common.c | ||
2563 | @@ -93,7 +93,17 @@ void tick_handle_periodic(struct clock_event_device *dev) | ||
2564 | for (;;) { | ||
2565 | if (!clockevents_program_event(dev, next, ktime_get())) | ||
2566 | return; | ||
2567 | - tick_periodic(cpu); | ||
2568 | + /* | ||
2569 | + * Have to be careful here. If we're in oneshot mode, | ||
2570 | + * before we call tick_periodic() in a loop, we need | ||
2571 | + * to be sure we're using a real hardware clocksource. | ||
2572 | + * Otherwise we could get trapped in an infinite | ||
2573 | + * loop, as the tick_periodic() increments jiffies, | ||
2574 | + * when then will increment time, posibly causing | ||
2575 | + * the loop to trigger again and again. | ||
2576 | + */ | ||
2577 | + if (timekeeping_valid_for_hres()) | ||
2578 | + tick_periodic(cpu); | ||
2579 | next = ktime_add(next, tick_period); | ||
2580 | } | ||
2581 | } | ||
2582 | diff --git a/mm/madvise.c b/mm/madvise.c | ||
2583 | index b9ce574..36d6ea2 100644 | ||
2584 | --- a/mm/madvise.c | ||
2585 | +++ b/mm/madvise.c | ||
2586 | @@ -112,6 +112,14 @@ static long madvise_willneed(struct vm_area_struct * vma, | ||
2587 | if (!file) | ||
2588 | return -EBADF; | ||
2589 | |||
2590 | + /* | ||
2591 | + * Page cache readahead assumes page cache pages are order-0 which | ||
2592 | + * is not the case for hugetlbfs. Do not give a bad return value | ||
2593 | + * but ignore the advice. | ||
2594 | + */ | ||
2595 | + if (vma->vm_flags & VM_HUGETLB) | ||
2596 | + return 0; | ||
2597 | + | ||
2598 | if (file->f_mapping->a_ops->get_xip_mem) { | ||
2599 | /* no bad return value, but ignore advice */ | ||
2600 | return 0; | ||
2601 | diff --git a/mm/mmap.c b/mm/mmap.c | ||
2602 | index f1aa6f9..efff81b 100644 | ||
2603 | --- a/mm/mmap.c | ||
2604 | +++ b/mm/mmap.c | ||
2605 | @@ -84,7 +84,7 @@ EXPORT_SYMBOL(vm_get_page_prot); | ||
2606 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | ||
2607 | int sysctl_overcommit_ratio = 50; /* default is 50% */ | ||
2608 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; | ||
2609 | -atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); | ||
2610 | +struct percpu_counter vm_committed_as; | ||
2611 | |||
2612 | /* | ||
2613 | * Check that a process has enough memory to allocate a new virtual | ||
2614 | @@ -178,11 +178,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | ||
2615 | if (mm) | ||
2616 | allowed -= mm->total_vm / 32; | ||
2617 | |||
2618 | - /* | ||
2619 | - * cast `allowed' as a signed long because vm_committed_space | ||
2620 | - * sometimes has a negative value | ||
2621 | - */ | ||
2622 | - if (atomic_long_read(&vm_committed_space) < (long)allowed) | ||
2623 | + if (percpu_counter_read_positive(&vm_committed_as) < allowed) | ||
2624 | return 0; | ||
2625 | error: | ||
2626 | vm_unacct_memory(pages); | ||
2627 | @@ -2477,6 +2473,10 @@ void mm_drop_all_locks(struct mm_struct *mm) | ||
2628 | */ | ||
2629 | void __init mmap_init(void) | ||
2630 | { | ||
2631 | + int ret; | ||
2632 | + | ||
2633 | + ret = percpu_counter_init(&vm_committed_as, 0); | ||
2634 | + VM_BUG_ON(ret); | ||
2635 | vm_area_cachep = kmem_cache_create("vm_area_struct", | ||
2636 | sizeof(struct vm_area_struct), 0, | ||
2637 | SLAB_PANIC, NULL); | ||
2638 | diff --git a/mm/nommu.c b/mm/nommu.c | ||
2639 | index 2fcf47d..ee955bc 100644 | ||
2640 | --- a/mm/nommu.c | ||
2641 | +++ b/mm/nommu.c | ||
2642 | @@ -62,7 +62,7 @@ void *high_memory; | ||
2643 | struct page *mem_map; | ||
2644 | unsigned long max_mapnr; | ||
2645 | unsigned long num_physpages; | ||
2646 | -atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); | ||
2647 | +struct percpu_counter vm_committed_as; | ||
2648 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | ||
2649 | int sysctl_overcommit_ratio = 50; /* default is 50% */ | ||
2650 | int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; | ||
2651 | @@ -463,6 +463,10 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) | ||
2652 | */ | ||
2653 | void __init mmap_init(void) | ||
2654 | { | ||
2655 | + int ret; | ||
2656 | + | ||
2657 | + ret = percpu_counter_init(&vm_committed_as, 0); | ||
2658 | + VM_BUG_ON(ret); | ||
2659 | vm_region_jar = kmem_cache_create("vm_region_jar", | ||
2660 | sizeof(struct vm_region), 0, | ||
2661 | SLAB_PANIC, NULL); | ||
2662 | @@ -1849,12 +1853,9 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) | ||
2663 | if (mm) | ||
2664 | allowed -= mm->total_vm / 32; | ||
2665 | |||
2666 | - /* | ||
2667 | - * cast `allowed' as a signed long because vm_committed_space | ||
2668 | - * sometimes has a negative value | ||
2669 | - */ | ||
2670 | - if (atomic_long_read(&vm_committed_space) < (long)allowed) | ||
2671 | + if (percpu_counter_read_positive(&vm_committed_as) < allowed) | ||
2672 | return 0; | ||
2673 | + | ||
2674 | error: | ||
2675 | vm_unacct_memory(pages); | ||
2676 | |||
2677 | diff --git a/mm/swap.c b/mm/swap.c | ||
2678 | index 8adb9fe..2460f7d 100644 | ||
2679 | --- a/mm/swap.c | ||
2680 | +++ b/mm/swap.c | ||
2681 | @@ -514,49 +514,6 @@ unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, | ||
2682 | |||
2683 | EXPORT_SYMBOL(pagevec_lookup_tag); | ||
2684 | |||
2685 | -#ifdef CONFIG_SMP | ||
2686 | -/* | ||
2687 | - * We tolerate a little inaccuracy to avoid ping-ponging the counter between | ||
2688 | - * CPUs | ||
2689 | - */ | ||
2690 | -#define ACCT_THRESHOLD max(16, NR_CPUS * 2) | ||
2691 | - | ||
2692 | -static DEFINE_PER_CPU(long, committed_space); | ||
2693 | - | ||
2694 | -void vm_acct_memory(long pages) | ||
2695 | -{ | ||
2696 | - long *local; | ||
2697 | - | ||
2698 | - preempt_disable(); | ||
2699 | - local = &__get_cpu_var(committed_space); | ||
2700 | - *local += pages; | ||
2701 | - if (*local > ACCT_THRESHOLD || *local < -ACCT_THRESHOLD) { | ||
2702 | - atomic_long_add(*local, &vm_committed_space); | ||
2703 | - *local = 0; | ||
2704 | - } | ||
2705 | - preempt_enable(); | ||
2706 | -} | ||
2707 | - | ||
2708 | -#ifdef CONFIG_HOTPLUG_CPU | ||
2709 | - | ||
2710 | -/* Drop the CPU's cached committed space back into the central pool. */ | ||
2711 | -static int cpu_swap_callback(struct notifier_block *nfb, | ||
2712 | - unsigned long action, | ||
2713 | - void *hcpu) | ||
2714 | -{ | ||
2715 | - long *committed; | ||
2716 | - | ||
2717 | - committed = &per_cpu(committed_space, (long)hcpu); | ||
2718 | - if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | ||
2719 | - atomic_long_add(*committed, &vm_committed_space); | ||
2720 | - *committed = 0; | ||
2721 | - drain_cpu_pagevecs((long)hcpu); | ||
2722 | - } | ||
2723 | - return NOTIFY_OK; | ||
2724 | -} | ||
2725 | -#endif /* CONFIG_HOTPLUG_CPU */ | ||
2726 | -#endif /* CONFIG_SMP */ | ||
2727 | - | ||
2728 | /* | ||
2729 | * Perform any setup for the swap system | ||
2730 | */ | ||
2731 | @@ -577,7 +534,4 @@ void __init swap_setup(void) | ||
2732 | * Right now other parts of the system means that we | ||
2733 | * _really_ don't want to cluster much more | ||
2734 | */ | ||
2735 | -#ifdef CONFIG_HOTPLUG_CPU | ||
2736 | - hotcpu_notifier(cpu_swap_callback, 0); | ||
2737 | -#endif | ||
2738 | } | ||
2739 | diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c | ||
2740 | index 2b890af..4a78c17 100644 | ||
2741 | --- a/net/mac80211/mlme.c | ||
2742 | +++ b/net/mac80211/mlme.c | ||
2743 | @@ -1342,7 +1342,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, | ||
2744 | |||
2745 | for (i = 0; i < elems.ext_supp_rates_len; i++) { | ||
2746 | int rate = (elems.ext_supp_rates[i] & 0x7f) * 5; | ||
2747 | - bool is_basic = !!(elems.supp_rates[i] & 0x80); | ||
2748 | + bool is_basic = !!(elems.ext_supp_rates[i] & 0x80); | ||
2749 | |||
2750 | if (rate > 110) | ||
2751 | have_higher_than_11mbit = true; | ||
2752 | diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c | ||
2753 | index 7175ae8..75837ca 100644 | ||
2754 | --- a/net/mac80211/rx.c | ||
2755 | +++ b/net/mac80211/rx.c | ||
2756 | @@ -29,6 +29,7 @@ | ||
2757 | static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | ||
2758 | struct tid_ampdu_rx *tid_agg_rx, | ||
2759 | struct sk_buff *skb, | ||
2760 | + struct ieee80211_rx_status *status, | ||
2761 | u16 mpdu_seq_num, | ||
2762 | int bar_req); | ||
2763 | /* | ||
2764 | @@ -1538,7 +1539,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) | ||
2765 | /* manage reordering buffer according to requested */ | ||
2766 | /* sequence number */ | ||
2767 | rcu_read_lock(); | ||
2768 | - ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, | ||
2769 | + ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, NULL, | ||
2770 | start_seq_num, 1); | ||
2771 | rcu_read_unlock(); | ||
2772 | return RX_DROP_UNUSABLE; | ||
2773 | @@ -2034,6 +2035,7 @@ static inline u16 seq_sub(u16 sq1, u16 sq2) | ||
2774 | static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | ||
2775 | struct tid_ampdu_rx *tid_agg_rx, | ||
2776 | struct sk_buff *skb, | ||
2777 | + struct ieee80211_rx_status *rxstatus, | ||
2778 | u16 mpdu_seq_num, | ||
2779 | int bar_req) | ||
2780 | { | ||
2781 | @@ -2115,6 +2117,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | ||
2782 | |||
2783 | /* put the frame in the reordering buffer */ | ||
2784 | tid_agg_rx->reorder_buf[index] = skb; | ||
2785 | + memcpy(tid_agg_rx->reorder_buf[index]->cb, rxstatus, | ||
2786 | + sizeof(*rxstatus)); | ||
2787 | tid_agg_rx->stored_mpdu_num++; | ||
2788 | /* release the buffer until next missing frame */ | ||
2789 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) | ||
2790 | @@ -2140,7 +2144,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | ||
2791 | } | ||
2792 | |||
2793 | static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, | ||
2794 | - struct sk_buff *skb) | ||
2795 | + struct sk_buff *skb, | ||
2796 | + struct ieee80211_rx_status *status) | ||
2797 | { | ||
2798 | struct ieee80211_hw *hw = &local->hw; | ||
2799 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
2800 | @@ -2191,7 +2196,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, | ||
2801 | |||
2802 | /* according to mpdu sequence number deal with reordering buffer */ | ||
2803 | mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; | ||
2804 | - ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, | ||
2805 | + ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, status, | ||
2806 | mpdu_seq_num, 0); | ||
2807 | end_reorder: | ||
2808 | return ret; | ||
2809 | @@ -2255,7 +2260,7 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, | ||
2810 | return; | ||
2811 | } | ||
2812 | |||
2813 | - if (!ieee80211_rx_reorder_ampdu(local, skb)) | ||
2814 | + if (!ieee80211_rx_reorder_ampdu(local, skb, status)) | ||
2815 | __ieee80211_rx_handle_packet(hw, skb, status, rate); | ||
2816 | |||
2817 | rcu_read_unlock(); | ||
2818 | diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c | ||
2819 | index 8892161..723b647 100644 | ||
2820 | --- a/scripts/mod/modpost.c | ||
2821 | +++ b/scripts/mod/modpost.c | ||
2822 | @@ -2005,6 +2005,7 @@ static void read_markers(const char *fname) | ||
2823 | if (!mod->skip) | ||
2824 | add_marker(mod, marker, fmt); | ||
2825 | } | ||
2826 | + release_file(file, size); | ||
2827 | return; | ||
2828 | fail: | ||
2829 | fatal("parse error in markers list file\n"); | ||
2830 | diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c | ||
2831 | index 0081597..e210b21 100644 | ||
2832 | --- a/security/selinux/hooks.c | ||
2833 | +++ b/security/selinux/hooks.c | ||
2834 | @@ -4661,6 +4661,7 @@ static int selinux_ip_postroute_iptables_compat(struct sock *sk, | ||
2835 | if (err) | ||
2836 | return err; | ||
2837 | err = avc_has_perm(sk_sid, if_sid, SECCLASS_NETIF, netif_perm, ad); | ||
2838 | + if (err) | ||
2839 | return err; | ||
2840 | |||
2841 | err = sel_netnode_sid(addrp, family, &node_sid); | ||
2842 | diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c | ||
2843 | index d004e58..f3d15d5 100644 | ||
2844 | --- a/sound/soc/codecs/wm8580.c | ||
2845 | +++ b/sound/soc/codecs/wm8580.c | ||
2846 | @@ -533,7 +533,7 @@ static int wm8580_set_dai_pll(struct snd_soc_dai *codec_dai, | ||
2847 | reg = wm8580_read(codec, WM8580_PLLA4 + offset); | ||
2848 | reg &= ~0x3f; | ||
2849 | reg |= pll_div.prescale | pll_div.postscale << 1 | | ||
2850 | - pll_div.freqmode << 4; | ||
2851 | + pll_div.freqmode << 3; | ||
2852 | |||
2853 | wm8580_write(codec, WM8580_PLLA4 + offset, reg); | ||
2854 | |||
2855 | diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c | ||
2856 | index 73e59f4..9ce1c59 100644 | ||
2857 | --- a/sound/usb/usx2y/us122l.c | ||
2858 | +++ b/sound/usb/usx2y/us122l.c | ||
2859 | @@ -478,6 +478,14 @@ static bool us122l_create_card(struct snd_card *card) | ||
2860 | return true; | ||
2861 | } | ||
2862 | |||
2863 | +static void snd_us122l_free(struct snd_card *card) | ||
2864 | +{ | ||
2865 | + struct us122l *us122l = US122L(card); | ||
2866 | + int index = us122l->chip.index; | ||
2867 | + if (index >= 0 && index < SNDRV_CARDS) | ||
2868 | + snd_us122l_card_used[index] = 0; | ||
2869 | +} | ||
2870 | + | ||
2871 | static struct snd_card *usx2y_create_card(struct usb_device *device) | ||
2872 | { | ||
2873 | int dev; | ||
2874 | @@ -492,7 +500,7 @@ static struct snd_card *usx2y_create_card(struct usb_device *device) | ||
2875 | if (!card) | ||
2876 | return NULL; | ||
2877 | snd_us122l_card_used[US122L(card)->chip.index = dev] = 1; | ||
2878 | - | ||
2879 | + card->private_free = snd_us122l_free; | ||
2880 | US122L(card)->chip.dev = device; | ||
2881 | US122L(card)->chip.card = card; | ||
2882 | mutex_init(&US122L(card)->mutex); | ||
2883 | @@ -575,7 +583,7 @@ static void snd_us122l_disconnect(struct usb_interface *intf) | ||
2884 | } | ||
2885 | |||
2886 | usb_put_intf(intf); | ||
2887 | - usb_put_dev(US122L(card)->chip.dev); | ||
2888 | + usb_put_dev(us122l->chip.dev); | ||
2889 | |||
2890 | while (atomic_read(&us122l->mmap_count)) | ||
2891 | msleep(500); | ||
2892 | diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c | ||
2893 | index 6723411..d85642e 100644 | ||
2894 | --- a/virt/kvm/kvm_main.c | ||
2895 | +++ b/virt/kvm/kvm_main.c | ||
2896 | @@ -964,6 +964,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | ||
2897 | int r; | ||
2898 | gfn_t base_gfn; | ||
2899 | unsigned long npages; | ||
2900 | + int largepages; | ||
2901 | unsigned long i; | ||
2902 | struct kvm_memory_slot *memslot; | ||
2903 | struct kvm_memory_slot old, new; | ||
2904 | @@ -1004,7 +1005,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | ||
2905 | for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { | ||
2906 | struct kvm_memory_slot *s = &kvm->memslots[i]; | ||
2907 | |||
2908 | - if (s == memslot) | ||
2909 | + if (s == memslot || !s->npages) | ||
2910 | continue; | ||
2911 | if (!((base_gfn + npages <= s->base_gfn) || | ||
2912 | (base_gfn >= s->base_gfn + s->npages))) | ||
2913 | @@ -1039,11 +1040,8 @@ int __kvm_set_memory_region(struct kvm *kvm, | ||
2914 | new.userspace_addr = 0; | ||
2915 | } | ||
2916 | if (npages && !new.lpage_info) { | ||
2917 | - int largepages = npages / KVM_PAGES_PER_HPAGE; | ||
2918 | - if (npages % KVM_PAGES_PER_HPAGE) | ||
2919 | - largepages++; | ||
2920 | - if (base_gfn % KVM_PAGES_PER_HPAGE) | ||
2921 | - largepages++; | ||
2922 | + largepages = 1 + (base_gfn + npages - 1) / KVM_PAGES_PER_HPAGE; | ||
2923 | + largepages -= base_gfn / KVM_PAGES_PER_HPAGE; | ||
2924 | |||
2925 | new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info)); | ||
2926 | |||
2927 | @@ -1999,6 +1997,7 @@ static long kvm_dev_ioctl_check_extension_generic(long arg) | ||
2928 | switch (arg) { | ||
2929 | case KVM_CAP_USER_MEMORY: | ||
2930 | case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: | ||
2931 | + case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS: | ||
2932 | return 1; | ||
2933 | default: | ||
2934 | break; |