Magellan Linux

Contents of /trunk/kernel26-alx/patches-2.6.27-r3/0121-2.6.27.22-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1176 - (show annotations) (download)
Thu Oct 14 15:11:06 2010 UTC (13 years, 6 months ago) by niro
File size: 77360 byte(s)
-2.6.27-alx-r3: new magellan 0.5.2 kernel
1 diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
2 index 6d406c5..9696cc3 100644
3 --- a/arch/powerpc/include/asm/futex.h
4 +++ b/arch/powerpc/include/asm/futex.h
5 @@ -27,7 +27,7 @@
6 PPC_LONG "1b,4b,2b,4b\n" \
7 ".previous" \
8 : "=&r" (oldval), "=&r" (ret) \
9 - : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \
10 + : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
11 : "cr0", "memory")
12
13 static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
14 @@ -47,19 +47,19 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
15
16 switch (op) {
17 case FUTEX_OP_SET:
18 - __futex_atomic_op("", ret, oldval, uaddr, oparg);
19 + __futex_atomic_op("mr %1,%4\n", ret, oldval, uaddr, oparg);
20 break;
21 case FUTEX_OP_ADD:
22 - __futex_atomic_op("add %1,%0,%1\n", ret, oldval, uaddr, oparg);
23 + __futex_atomic_op("add %1,%0,%4\n", ret, oldval, uaddr, oparg);
24 break;
25 case FUTEX_OP_OR:
26 - __futex_atomic_op("or %1,%0,%1\n", ret, oldval, uaddr, oparg);
27 + __futex_atomic_op("or %1,%0,%4\n", ret, oldval, uaddr, oparg);
28 break;
29 case FUTEX_OP_ANDN:
30 - __futex_atomic_op("andc %1,%0,%1\n", ret, oldval, uaddr, oparg);
31 + __futex_atomic_op("andc %1,%0,%4\n", ret, oldval, uaddr, oparg);
32 break;
33 case FUTEX_OP_XOR:
34 - __futex_atomic_op("xor %1,%0,%1\n", ret, oldval, uaddr, oparg);
35 + __futex_atomic_op("xor %1,%0,%4\n", ret, oldval, uaddr, oparg);
36 break;
37 default:
38 ret = -ENOSYS;
39 diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
40 index 101ed87..ae1c5b5 100644
41 --- a/arch/powerpc/include/asm/processor.h
42 +++ b/arch/powerpc/include/asm/processor.h
43 @@ -309,6 +309,25 @@ static inline void prefetchw(const void *x)
44 #define HAVE_ARCH_PICK_MMAP_LAYOUT
45 #endif
46
47 +#ifdef CONFIG_PPC64
48 +static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
49 +{
50 + unsigned long sp;
51 +
52 + if (is_32)
53 + sp = regs->gpr[1] & 0x0ffffffffUL;
54 + else
55 + sp = regs->gpr[1];
56 +
57 + return sp;
58 +}
59 +#else
60 +static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
61 +{
62 + return regs->gpr[1];
63 +}
64 +#endif
65 +
66 #endif /* __KERNEL__ */
67 #endif /* __ASSEMBLY__ */
68 #endif /* _ASM_POWERPC_PROCESSOR_H */
69 diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
70 index a54405e..00b5078 100644
71 --- a/arch/powerpc/kernel/signal.c
72 +++ b/arch/powerpc/kernel/signal.c
73 @@ -26,12 +26,12 @@ int show_unhandled_signals = 0;
74 * Allocate space for the signal frame
75 */
76 void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
77 - size_t frame_size)
78 + size_t frame_size, int is_32)
79 {
80 unsigned long oldsp, newsp;
81
82 /* Default to using normal stack */
83 - oldsp = regs->gpr[1];
84 + oldsp = get_clean_sp(regs, is_32);
85
86 /* Check for alt stack */
87 if ((ka->sa.sa_flags & SA_ONSTACK) &&
88 diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
89 index 28f4b9f..f77d502 100644
90 --- a/arch/powerpc/kernel/signal.h
91 +++ b/arch/powerpc/kernel/signal.h
92 @@ -13,7 +13,7 @@
93 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
94
95 extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
96 - size_t frame_size);
97 + size_t frame_size, int is_32);
98 extern void restore_sigmask(sigset_t *set);
99
100 extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
101 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
102 index a6a4310..9084a27 100644
103 --- a/arch/powerpc/kernel/signal_32.c
104 +++ b/arch/powerpc/kernel/signal_32.c
105 @@ -836,7 +836,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
106
107 /* Set up Signal Frame */
108 /* Put a Real Time Context onto stack */
109 - rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf));
110 + rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1);
111 addr = rt_sf;
112 if (unlikely(rt_sf == NULL))
113 goto badframe;
114 @@ -1170,7 +1170,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
115 unsigned long newsp = 0;
116
117 /* Set up Signal Frame */
118 - frame = get_sigframe(ka, regs, sizeof(*frame));
119 + frame = get_sigframe(ka, regs, sizeof(*frame), 1);
120 if (unlikely(frame == NULL))
121 goto badframe;
122 sc = (struct sigcontext __user *) &frame->sctx;
123 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
124 index e4acdbd..3de15b0 100644
125 --- a/arch/powerpc/kernel/signal_64.c
126 +++ b/arch/powerpc/kernel/signal_64.c
127 @@ -404,7 +404,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
128 unsigned long newsp = 0;
129 long err = 0;
130
131 - frame = get_sigframe(ka, regs, sizeof(*frame));
132 + frame = get_sigframe(ka, regs, sizeof(*frame), 0);
133 if (unlikely(frame == NULL))
134 goto badframe;
135
136 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
137 index 8c3c25f..a99dbbe 100644
138 --- a/arch/x86/boot/memory.c
139 +++ b/arch/x86/boot/memory.c
140 @@ -27,13 +27,14 @@ static int detect_memory_e820(void)
141 do {
142 size = sizeof(struct e820entry);
143
144 - /* Important: %edx is clobbered by some BIOSes,
145 - so it must be either used for the error output
146 + /* Important: %edx and %esi are clobbered by some BIOSes,
147 + so they must be either used for the error output
148 or explicitly marked clobbered. */
149 asm("int $0x15; setc %0"
150 : "=d" (err), "+b" (next), "=a" (id), "+c" (size),
151 "=m" (*desc)
152 - : "D" (desc), "d" (SMAP), "a" (0xe820));
153 + : "D" (desc), "d" (SMAP), "a" (0xe820)
154 + : "esi");
155
156 /* BIOSes which terminate the chain with CF = 1 as opposed
157 to %ebx = 0 don't always report the SMAP signature on
158 diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
159 index cb7d3b6..26baabd 100644
160 --- a/arch/x86/kernel/cpu/mtrr/generic.c
161 +++ b/arch/x86/kernel/cpu/mtrr/generic.c
162 @@ -45,6 +45,32 @@ u64 mtrr_tom2;
163 static int mtrr_show;
164 module_param_named(show, mtrr_show, bool, 0);
165
166 +/**
167 + * BIOS is expected to clear MtrrFixDramModEn bit, see for example
168 + * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
169 + * Opteron Processors" (26094 Rev. 3.30 February 2006), section
170 + * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
171 + * to 1 during BIOS initalization of the fixed MTRRs, then cleared to
172 + * 0 for operation."
173 + */
174 +static inline void k8_check_syscfg_dram_mod_en(void)
175 +{
176 + u32 lo, hi;
177 +
178 + if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
179 + (boot_cpu_data.x86 >= 0x0f)))
180 + return;
181 +
182 + rdmsr(MSR_K8_SYSCFG, lo, hi);
183 + if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
184 + printk(KERN_ERR "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
185 + " not cleared by BIOS, clearing this bit\n",
186 + smp_processor_id());
187 + lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
188 + mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
189 + }
190 +}
191 +
192 /*
193 * Returns the effective MTRR type for the region
194 * Error returns:
195 @@ -178,6 +204,8 @@ get_fixed_ranges(mtrr_type * frs)
196 unsigned int *p = (unsigned int *) frs;
197 int i;
198
199 + k8_check_syscfg_dram_mod_en();
200 +
201 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
202
203 for (i = 0; i < 2; i++)
204 @@ -312,27 +340,10 @@ void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
205 }
206
207 /**
208 - * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs
209 - * see AMD publication no. 24593, chapter 3.2.1 for more information
210 - */
211 -static inline void k8_enable_fixed_iorrs(void)
212 -{
213 - unsigned lo, hi;
214 -
215 - rdmsr(MSR_K8_SYSCFG, lo, hi);
216 - mtrr_wrmsr(MSR_K8_SYSCFG, lo
217 - | K8_MTRRFIXRANGE_DRAM_ENABLE
218 - | K8_MTRRFIXRANGE_DRAM_MODIFY, hi);
219 -}
220 -
221 -/**
222 * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
223 * @msr: MSR address of the MTTR which should be checked and updated
224 * @changed: pointer which indicates whether the MTRR needed to be changed
225 * @msrwords: pointer to the MSR values which the MSR should have
226 - *
227 - * If K8 extentions are wanted, update the K8 SYSCFG MSR also.
228 - * See AMD publication no. 24593, chapter 7.8.1, page 233 for more information.
229 */
230 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
231 {
232 @@ -341,10 +352,6 @@ static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
233 rdmsr(msr, lo, hi);
234
235 if (lo != msrwords[0] || hi != msrwords[1]) {
236 - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
237 - (boot_cpu_data.x86 >= 0x0f && boot_cpu_data.x86 <= 0x11) &&
238 - ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
239 - k8_enable_fixed_iorrs();
240 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
241 *changed = true;
242 }
243 @@ -428,6 +435,8 @@ static int set_fixed_ranges(mtrr_type * frs)
244 bool changed = false;
245 int block=-1, range;
246
247 + k8_check_syscfg_dram_mod_en();
248 +
249 while (fixed_range_blocks[++block].ranges)
250 for (range=0; range < fixed_range_blocks[block].ranges; range++)
251 set_fixed_range(fixed_range_blocks[block].base_msr + range,
252 diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
253 index 8791fc5..b4b899a 100644
254 --- a/arch/x86/pci/i386.c
255 +++ b/arch/x86/pci/i386.c
256 @@ -326,6 +326,9 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
257 return -EINVAL;
258 }
259 flags = new_flags;
260 + vma->vm_page_prot = __pgprot(
261 + (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK) |
262 + flags);
263 }
264
265 if (((vma->vm_pgoff < max_low_pfn_mapped) ||
266 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
267 index 2694998..e618cbe 100644
268 --- a/drivers/acpi/ec.c
269 +++ b/drivers/acpi/ec.c
270 @@ -968,9 +968,9 @@ static const struct acpi_device_id ec_device_ids[] = {
271
272 int __init acpi_ec_ecdt_probe(void)
273 {
274 - int ret;
275 acpi_status status;
276 struct acpi_table_ecdt *ecdt_ptr;
277 + acpi_handle dummy;
278
279 boot_ec = make_acpi_ec();
280 if (!boot_ec)
281 @@ -996,30 +996,31 @@ int __init acpi_ec_ecdt_probe(void)
282 boot_ec->gpe = ecdt_ptr->gpe;
283 boot_ec->handle = ACPI_ROOT_OBJECT;
284 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle);
285 - } else {
286 - /* This workaround is needed only on some broken machines,
287 - * which require early EC, but fail to provide ECDT */
288 - acpi_handle x;
289 - printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
290 - status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
291 - boot_ec, NULL);
292 - /* Check that acpi_get_devices actually find something */
293 - if (ACPI_FAILURE(status) || !boot_ec->handle)
294 - goto error;
295 - /* We really need to limit this workaround, the only ASUS,
296 - * which needs it, has fake EC._INI method, so use it as flag.
297 - * Keep boot_ec struct as it will be needed soon.
298 - */
299 - if (ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI", &x)))
300 - return -ENODEV;
301 + /* Add some basic check against completely broken table */
302 + if (boot_ec->data_addr != boot_ec->command_addr)
303 + goto install;
304 + /* fall through */
305 }
306 -
307 - ret = ec_install_handlers(boot_ec);
308 - if (!ret) {
309 + /* This workaround is needed only on some broken machines,
310 + * which require early EC, but fail to provide ECDT */
311 + printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
312 + status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
313 + boot_ec, NULL);
314 + /* Check that acpi_get_devices actually find something */
315 + if (ACPI_FAILURE(status) || !boot_ec->handle)
316 + goto error;
317 + /* We really need to limit this workaround, the only ASUS,
318 + * which needs it, has fake EC._INI method, so use it as flag.
319 + * Keep boot_ec struct as it will be needed soon.
320 + */
321 + if (ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI", &dummy)))
322 + return -ENODEV;
323 +install:
324 + if (!ec_install_handlers(boot_ec)) {
325 first_ec = boot_ec;
326 return 0;
327 }
328 - error:
329 +error:
330 kfree(boot_ec);
331 boot_ec = NULL;
332 return -ENODEV;
333 diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
334 index 4216399..233a5fd 100644
335 --- a/drivers/ata/pata_hpt37x.c
336 +++ b/drivers/ata/pata_hpt37x.c
337 @@ -8,7 +8,7 @@
338 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
339 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
340 * Portions Copyright (C) 2003 Red Hat Inc
341 - * Portions Copyright (C) 2005-2007 MontaVista Software, Inc.
342 + * Portions Copyright (C) 2005-2009 MontaVista Software, Inc.
343 *
344 * TODO
345 * Look into engine reset on timeout errors. Should not be required.
346 @@ -24,7 +24,7 @@
347 #include <linux/libata.h>
348
349 #define DRV_NAME "pata_hpt37x"
350 -#define DRV_VERSION "0.6.11"
351 +#define DRV_VERSION "0.6.12"
352
353 struct hpt_clock {
354 u8 xfer_speed;
355 @@ -445,23 +445,6 @@ static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev)
356 }
357
358 /**
359 - * hpt370_bmdma_start - DMA engine begin
360 - * @qc: ATA command
361 - *
362 - * The 370 and 370A want us to reset the DMA engine each time we
363 - * use it. The 372 and later are fine.
364 - */
365 -
366 -static void hpt370_bmdma_start(struct ata_queued_cmd *qc)
367 -{
368 - struct ata_port *ap = qc->ap;
369 - struct pci_dev *pdev = to_pci_dev(ap->host->dev);
370 - pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
371 - udelay(10);
372 - ata_bmdma_start(qc);
373 -}
374 -
375 -/**
376 * hpt370_bmdma_end - DMA engine stop
377 * @qc: ATA command
378 *
379 @@ -598,7 +581,6 @@ static struct scsi_host_template hpt37x_sht = {
380 static struct ata_port_operations hpt370_port_ops = {
381 .inherits = &ata_bmdma_port_ops,
382
383 - .bmdma_start = hpt370_bmdma_start,
384 .bmdma_stop = hpt370_bmdma_stop,
385
386 .mode_filter = hpt370_filter,
387 diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
388 index 118dbde..8cf3dca 100644
389 --- a/drivers/char/agp/generic.c
390 +++ b/drivers/char/agp/generic.c
391 @@ -1207,7 +1207,7 @@ void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
392 {
393 struct page * page;
394
395 - page = alloc_page(GFP_KERNEL | GFP_DMA32);
396 + page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
397 if (page == NULL)
398 return NULL;
399
400 diff --git a/drivers/char/raw.c b/drivers/char/raw.c
401 index 47b8cf2..92022aa 100644
402 --- a/drivers/char/raw.c
403 +++ b/drivers/char/raw.c
404 @@ -90,6 +90,7 @@ out1:
405 blkdev_put(bdev);
406 out:
407 mutex_unlock(&raw_mutex);
408 + unlock_kernel();
409 return err;
410 }
411
412 diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
413 index 2d637e0..fdcd0ab 100644
414 --- a/drivers/crypto/ixp4xx_crypto.c
415 +++ b/drivers/crypto/ixp4xx_crypto.c
416 @@ -101,6 +101,7 @@ struct buffer_desc {
417 u32 phys_addr;
418 u32 __reserved[4];
419 struct buffer_desc *next;
420 + enum dma_data_direction dir;
421 };
422
423 struct crypt_ctl {
424 @@ -132,14 +133,10 @@ struct crypt_ctl {
425 struct ablk_ctx {
426 struct buffer_desc *src;
427 struct buffer_desc *dst;
428 - unsigned src_nents;
429 - unsigned dst_nents;
430 };
431
432 struct aead_ctx {
433 struct buffer_desc *buffer;
434 - unsigned short assoc_nents;
435 - unsigned short src_nents;
436 struct scatterlist ivlist;
437 /* used when the hmac is not on one sg entry */
438 u8 *hmac_virt;
439 @@ -312,7 +309,7 @@ static struct crypt_ctl *get_crypt_desc_emerg(void)
440 }
441 }
442
443 -static void free_buf_chain(struct buffer_desc *buf, u32 phys)
444 +static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
445 {
446 while (buf) {
447 struct buffer_desc *buf1;
448 @@ -320,6 +317,7 @@ static void free_buf_chain(struct buffer_desc *buf, u32 phys)
449
450 buf1 = buf->next;
451 phys1 = buf->phys_next;
452 + dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
453 dma_pool_free(buffer_pool, buf, phys);
454 buf = buf1;
455 phys = phys1;
456 @@ -348,7 +346,6 @@ static void one_packet(dma_addr_t phys)
457 struct crypt_ctl *crypt;
458 struct ixp_ctx *ctx;
459 int failed;
460 - enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
461
462 failed = phys & 0x1 ? -EBADMSG : 0;
463 phys &= ~0x3;
464 @@ -358,13 +355,8 @@ static void one_packet(dma_addr_t phys)
465 case CTL_FLAG_PERFORM_AEAD: {
466 struct aead_request *req = crypt->data.aead_req;
467 struct aead_ctx *req_ctx = aead_request_ctx(req);
468 - dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents,
469 - DMA_TO_DEVICE);
470 - dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
471 - dma_unmap_sg(dev, req->src, req_ctx->src_nents,
472 - DMA_BIDIRECTIONAL);
473
474 - free_buf_chain(req_ctx->buffer, crypt->src_buf);
475 + free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
476 if (req_ctx->hmac_virt) {
477 finish_scattered_hmac(crypt);
478 }
479 @@ -374,16 +366,11 @@ static void one_packet(dma_addr_t phys)
480 case CTL_FLAG_PERFORM_ABLK: {
481 struct ablkcipher_request *req = crypt->data.ablk_req;
482 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
483 - int nents;
484 +
485 if (req_ctx->dst) {
486 - nents = req_ctx->dst_nents;
487 - dma_unmap_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
488 - free_buf_chain(req_ctx->dst, crypt->dst_buf);
489 - src_direction = DMA_TO_DEVICE;
490 + free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
491 }
492 - nents = req_ctx->src_nents;
493 - dma_unmap_sg(dev, req->src, nents, src_direction);
494 - free_buf_chain(req_ctx->src, crypt->src_buf);
495 + free_buf_chain(dev, req_ctx->src, crypt->src_buf);
496 req->base.complete(&req->base, failed);
497 break;
498 }
499 @@ -748,56 +735,35 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
500 return 0;
501 }
502
503 -static int count_sg(struct scatterlist *sg, int nbytes)
504 +static struct buffer_desc *chainup_buffers(struct device *dev,
505 + struct scatterlist *sg, unsigned nbytes,
506 + struct buffer_desc *buf, gfp_t flags,
507 + enum dma_data_direction dir)
508 {
509 - int i;
510 - for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
511 - nbytes -= sg->length;
512 - return i;
513 -}
514 -
515 -static struct buffer_desc *chainup_buffers(struct scatterlist *sg,
516 - unsigned nbytes, struct buffer_desc *buf, gfp_t flags)
517 -{
518 - int nents = 0;
519 -
520 - while (nbytes > 0) {
521 + for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
522 + unsigned len = min(nbytes, sg->length);
523 struct buffer_desc *next_buf;
524 u32 next_buf_phys;
525 - unsigned len = min(nbytes, sg_dma_len(sg));
526 + void *ptr;
527
528 - nents++;
529 nbytes -= len;
530 - if (!buf->phys_addr) {
531 - buf->phys_addr = sg_dma_address(sg);
532 - buf->buf_len = len;
533 - buf->next = NULL;
534 - buf->phys_next = 0;
535 - goto next;
536 - }
537 - /* Two consecutive chunks on one page may be handled by the old
538 - * buffer descriptor, increased by the length of the new one
539 - */
540 - if (sg_dma_address(sg) == buf->phys_addr + buf->buf_len) {
541 - buf->buf_len += len;
542 - goto next;
543 - }
544 + ptr = page_address(sg_page(sg)) + sg->offset;
545 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
546 - if (!next_buf)
547 - return NULL;
548 + if (!next_buf) {
549 + buf = NULL;
550 + break;
551 + }
552 + sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
553 buf->next = next_buf;
554 buf->phys_next = next_buf_phys;
555 -
556 buf = next_buf;
557 - buf->next = NULL;
558 - buf->phys_next = 0;
559 +
560 buf->phys_addr = sg_dma_address(sg);
561 buf->buf_len = len;
562 -next:
563 - if (nbytes > 0) {
564 - sg = sg_next(sg);
565 - }
566 + buf->dir = dir;
567 }
568 + buf->next = NULL;
569 + buf->phys_next = 0;
570 return buf;
571 }
572
573 @@ -858,12 +824,12 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
574 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
575 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
576 unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
577 - int ret = -ENOMEM;
578 struct ix_sa_dir *dir;
579 struct crypt_ctl *crypt;
580 - unsigned int nbytes = req->nbytes, nents;
581 + unsigned int nbytes = req->nbytes;
582 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
583 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
584 + struct buffer_desc src_hook;
585 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
586 GFP_KERNEL : GFP_ATOMIC;
587
588 @@ -876,7 +842,7 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
589
590 crypt = get_crypt_desc();
591 if (!crypt)
592 - return ret;
593 + return -ENOMEM;
594
595 crypt->data.ablk_req = req;
596 crypt->crypto_ctx = dir->npe_ctx_phys;
597 @@ -889,53 +855,41 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
598 BUG_ON(ivsize && !req->info);
599 memcpy(crypt->iv, req->info, ivsize);
600 if (req->src != req->dst) {
601 + struct buffer_desc dst_hook;
602 crypt->mode |= NPE_OP_NOT_IN_PLACE;
603 - nents = count_sg(req->dst, nbytes);
604 /* This was never tested by Intel
605 * for more than one dst buffer, I think. */
606 - BUG_ON(nents != 1);
607 - req_ctx->dst_nents = nents;
608 - dma_map_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
609 - req_ctx->dst = dma_pool_alloc(buffer_pool, flags,&crypt->dst_buf);
610 - if (!req_ctx->dst)
611 - goto unmap_sg_dest;
612 - req_ctx->dst->phys_addr = 0;
613 - if (!chainup_buffers(req->dst, nbytes, req_ctx->dst, flags))
614 + BUG_ON(req->dst->length < nbytes);
615 + req_ctx->dst = NULL;
616 + if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
617 + flags, DMA_FROM_DEVICE))
618 goto free_buf_dest;
619 src_direction = DMA_TO_DEVICE;
620 + req_ctx->dst = dst_hook.next;
621 + crypt->dst_buf = dst_hook.phys_next;
622 } else {
623 req_ctx->dst = NULL;
624 - req_ctx->dst_nents = 0;
625 }
626 - nents = count_sg(req->src, nbytes);
627 - req_ctx->src_nents = nents;
628 - dma_map_sg(dev, req->src, nents, src_direction);
629 -
630 - req_ctx->src = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
631 - if (!req_ctx->src)
632 - goto unmap_sg_src;
633 - req_ctx->src->phys_addr = 0;
634 - if (!chainup_buffers(req->src, nbytes, req_ctx->src, flags))
635 + req_ctx->src = NULL;
636 + if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
637 + flags, src_direction))
638 goto free_buf_src;
639
640 + req_ctx->src = src_hook.next;
641 + crypt->src_buf = src_hook.phys_next;
642 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
643 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
644 BUG_ON(qmgr_stat_overflow(SEND_QID));
645 return -EINPROGRESS;
646
647 free_buf_src:
648 - free_buf_chain(req_ctx->src, crypt->src_buf);
649 -unmap_sg_src:
650 - dma_unmap_sg(dev, req->src, req_ctx->src_nents, src_direction);
651 + free_buf_chain(dev, req_ctx->src, crypt->src_buf);
652 free_buf_dest:
653 if (req->src != req->dst) {
654 - free_buf_chain(req_ctx->dst, crypt->dst_buf);
655 -unmap_sg_dest:
656 - dma_unmap_sg(dev, req->src, req_ctx->dst_nents,
657 - DMA_FROM_DEVICE);
658 + free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
659 }
660 crypt->ctl_flags = CTL_FLAG_UNUSED;
661 - return ret;
662 + return -ENOMEM;
663 }
664
665 static int ablk_encrypt(struct ablkcipher_request *req)
666 @@ -983,7 +937,7 @@ static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
667 break;
668
669 offset += sg->length;
670 - sg = sg_next(sg);
671 + sg = scatterwalk_sg_next(sg);
672 }
673 return (start + nbytes > offset + sg->length);
674 }
675 @@ -995,11 +949,10 @@ static int aead_perform(struct aead_request *req, int encrypt,
676 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
677 unsigned ivsize = crypto_aead_ivsize(tfm);
678 unsigned authsize = crypto_aead_authsize(tfm);
679 - int ret = -ENOMEM;
680 struct ix_sa_dir *dir;
681 struct crypt_ctl *crypt;
682 - unsigned int cryptlen, nents;
683 - struct buffer_desc *buf;
684 + unsigned int cryptlen;
685 + struct buffer_desc *buf, src_hook;
686 struct aead_ctx *req_ctx = aead_request_ctx(req);
687 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
688 GFP_KERNEL : GFP_ATOMIC;
689 @@ -1020,7 +973,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
690 }
691 crypt = get_crypt_desc();
692 if (!crypt)
693 - return ret;
694 + return -ENOMEM;
695
696 crypt->data.aead_req = req;
697 crypt->crypto_ctx = dir->npe_ctx_phys;
698 @@ -1039,31 +992,27 @@ static int aead_perform(struct aead_request *req, int encrypt,
699 BUG(); /* -ENOTSUP because of my lazyness */
700 }
701
702 - req_ctx->buffer = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
703 - if (!req_ctx->buffer)
704 - goto out;
705 - req_ctx->buffer->phys_addr = 0;
706 /* ASSOC data */
707 - nents = count_sg(req->assoc, req->assoclen);
708 - req_ctx->assoc_nents = nents;
709 - dma_map_sg(dev, req->assoc, nents, DMA_TO_DEVICE);
710 - buf = chainup_buffers(req->assoc, req->assoclen, req_ctx->buffer,flags);
711 + buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
712 + flags, DMA_TO_DEVICE);
713 + req_ctx->buffer = src_hook.next;
714 + crypt->src_buf = src_hook.phys_next;
715 if (!buf)
716 - goto unmap_sg_assoc;
717 + goto out;
718 /* IV */
719 sg_init_table(&req_ctx->ivlist, 1);
720 sg_set_buf(&req_ctx->ivlist, iv, ivsize);
721 - dma_map_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
722 - buf = chainup_buffers(&req_ctx->ivlist, ivsize, buf, flags);
723 + buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
724 + DMA_BIDIRECTIONAL);
725 if (!buf)
726 - goto unmap_sg_iv;
727 + goto free_chain;
728 if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
729 /* The 12 hmac bytes are scattered,
730 * we need to copy them into a safe buffer */
731 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
732 &crypt->icv_rev_aes);
733 if (unlikely(!req_ctx->hmac_virt))
734 - goto unmap_sg_iv;
735 + goto free_chain;
736 if (!encrypt) {
737 scatterwalk_map_and_copy(req_ctx->hmac_virt,
738 req->src, cryptlen, authsize, 0);
739 @@ -1073,33 +1022,28 @@ static int aead_perform(struct aead_request *req, int encrypt,
740 req_ctx->hmac_virt = NULL;
741 }
742 /* Crypt */
743 - nents = count_sg(req->src, cryptlen + authsize);
744 - req_ctx->src_nents = nents;
745 - dma_map_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
746 - buf = chainup_buffers(req->src, cryptlen + authsize, buf, flags);
747 + buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
748 + DMA_BIDIRECTIONAL);
749 if (!buf)
750 - goto unmap_sg_src;
751 + goto free_hmac_virt;
752 if (!req_ctx->hmac_virt) {
753 crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
754 }
755 +
756 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
757 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
758 BUG_ON(qmgr_stat_overflow(SEND_QID));
759 return -EINPROGRESS;
760 -unmap_sg_src:
761 - dma_unmap_sg(dev, req->src, req_ctx->src_nents, DMA_BIDIRECTIONAL);
762 +free_hmac_virt:
763 if (req_ctx->hmac_virt) {
764 dma_pool_free(buffer_pool, req_ctx->hmac_virt,
765 crypt->icv_rev_aes);
766 }
767 -unmap_sg_iv:
768 - dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
769 -unmap_sg_assoc:
770 - dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, DMA_TO_DEVICE);
771 - free_buf_chain(req_ctx->buffer, crypt->src_buf);
772 +free_chain:
773 + free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
774 out:
775 crypt->ctl_flags = CTL_FLAG_UNUSED;
776 - return ret;
777 + return -ENOMEM;
778 }
779
780 static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
781 diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
782 index c37ab17..7e443a3 100644
783 --- a/drivers/ide/pci/hpt366.c
784 +++ b/drivers/ide/pci/hpt366.c
785 @@ -114,6 +114,8 @@
786 * the register setting lists into the table indexed by the clock selected
787 * - set the correct hwif->ultra_mask for each individual chip
788 * - add Ultra and MW DMA mode filtering for the HPT37[24] based SATA cards
789 + * - stop resetting HPT370's state machine before each DMA transfer as that has
790 + * caused more harm than good
791 * Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com>
792 */
793
794 @@ -134,7 +136,7 @@
795 #define DRV_NAME "hpt366"
796
797 /* various tuning parameters */
798 -#define HPT_RESET_STATE_ENGINE
799 +#undef HPT_RESET_STATE_ENGINE
800 #undef HPT_DELAY_INTERRUPT
801 #define HPT_SERIALIZE_IO 0
802
803 diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
804 index 3f11910..fcec2df 100644
805 --- a/drivers/isdn/gigaset/bas-gigaset.c
806 +++ b/drivers/isdn/gigaset/bas-gigaset.c
807 @@ -46,6 +46,9 @@ MODULE_PARM_DESC(cidmode, "Call-ID mode");
808 /* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */
809 #define IF_WRITEBUF 264
810
811 +/* interrupt pipe message size according to ibid. ch. 2.2 */
812 +#define IP_MSGSIZE 3
813 +
814 /* Values for the Gigaset 307x */
815 #define USB_GIGA_VENDOR_ID 0x0681
816 #define USB_3070_PRODUCT_ID 0x0001
817 @@ -110,7 +113,7 @@ struct bas_cardstate {
818 unsigned char *rcvbuf; /* AT reply receive buffer */
819
820 struct urb *urb_int_in; /* URB for interrupt pipe */
821 - unsigned char int_in_buf[3];
822 + unsigned char *int_in_buf;
823
824 spinlock_t lock; /* locks all following */
825 int basstate; /* bitmap (BS_*) */
826 @@ -657,7 +660,7 @@ static void read_int_callback(struct urb *urb)
827 }
828
829 /* drop incomplete packets even if the missing bytes wouldn't matter */
830 - if (unlikely(urb->actual_length < 3)) {
831 + if (unlikely(urb->actual_length < IP_MSGSIZE)) {
832 dev_warn(cs->dev, "incomplete interrupt packet (%d bytes)\n",
833 urb->actual_length);
834 goto resubmit;
835 @@ -2127,6 +2130,7 @@ static void gigaset_reinitbcshw(struct bc_state *bcs)
836 static void gigaset_freecshw(struct cardstate *cs)
837 {
838 /* timers, URBs and rcvbuf are disposed of in disconnect */
839 + kfree(cs->hw.bas->int_in_buf);
840 kfree(cs->hw.bas);
841 cs->hw.bas = NULL;
842 }
843 @@ -2232,6 +2236,12 @@ static int gigaset_probe(struct usb_interface *interface,
844 }
845 hostif = interface->cur_altsetting;
846 }
847 + ucs->int_in_buf = kmalloc(IP_MSGSIZE, GFP_KERNEL);
848 + if (!ucs->int_in_buf) {
849 + kfree(ucs);
850 + pr_err("out of memory\n");
851 + return 0;
852 + }
853
854 /* Reject application specific interfaces
855 */
856 @@ -2290,7 +2300,7 @@ static int gigaset_probe(struct usb_interface *interface,
857 usb_fill_int_urb(ucs->urb_int_in, udev,
858 usb_rcvintpipe(udev,
859 (endpoint->bEndpointAddress) & 0x0f),
860 - ucs->int_in_buf, 3, read_int_callback, cs,
861 + ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs,
862 endpoint->bInterval);
863 if ((rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL)) != 0) {
864 dev_err(cs->dev, "could not submit interrupt URB: %s\n",
865 diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c
866 index 34bb0e4..1a551a8 100644
867 --- a/drivers/misc/thinkpad_acpi.c
868 +++ b/drivers/misc/thinkpad_acpi.c
869 @@ -282,11 +282,17 @@ static u32 dbg_level;
870
871 static struct workqueue_struct *tpacpi_wq;
872
873 +enum led_status_t {
874 + TPACPI_LED_OFF = 0,
875 + TPACPI_LED_ON,
876 + TPACPI_LED_BLINK,
877 +};
878 +
879 /* Special LED class that can defer work */
880 struct tpacpi_led_classdev {
881 struct led_classdev led_classdev;
882 struct work_struct work;
883 - enum led_brightness new_brightness;
884 + enum led_status_t new_state;
885 unsigned int led;
886 };
887
888 @@ -3478,7 +3484,7 @@ static void light_set_status_worker(struct work_struct *work)
889 container_of(work, struct tpacpi_led_classdev, work);
890
891 if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
892 - light_set_status((data->new_brightness != LED_OFF));
893 + light_set_status((data->new_state != TPACPI_LED_OFF));
894 }
895
896 static void light_sysfs_set(struct led_classdev *led_cdev,
897 @@ -3488,7 +3494,8 @@ static void light_sysfs_set(struct led_classdev *led_cdev,
898 container_of(led_cdev,
899 struct tpacpi_led_classdev,
900 led_classdev);
901 - data->new_brightness = brightness;
902 + data->new_state = (brightness != LED_OFF) ?
903 + TPACPI_LED_ON : TPACPI_LED_OFF;
904 queue_work(tpacpi_wq, &data->work);
905 }
906
907 @@ -3995,12 +4002,6 @@ enum { /* For TPACPI_LED_OLD */
908 TPACPI_LED_EC_HLMS = 0x0e, /* EC reg to select led to command */
909 };
910
911 -enum led_status_t {
912 - TPACPI_LED_OFF = 0,
913 - TPACPI_LED_ON,
914 - TPACPI_LED_BLINK,
915 -};
916 -
917 static enum led_access_mode led_supported;
918
919 TPACPI_HANDLE(led, ec, "SLED", /* 570 */
920 @@ -4094,23 +4095,13 @@ static int led_set_status(const unsigned int led,
921 return rc;
922 }
923
924 -static void led_sysfs_set_status(unsigned int led,
925 - enum led_brightness brightness)
926 -{
927 - led_set_status(led,
928 - (brightness == LED_OFF) ?
929 - TPACPI_LED_OFF :
930 - (tpacpi_led_state_cache[led] == TPACPI_LED_BLINK) ?
931 - TPACPI_LED_BLINK : TPACPI_LED_ON);
932 -}
933 -
934 static void led_set_status_worker(struct work_struct *work)
935 {
936 struct tpacpi_led_classdev *data =
937 container_of(work, struct tpacpi_led_classdev, work);
938
939 if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
940 - led_sysfs_set_status(data->led, data->new_brightness);
941 + led_set_status(data->led, data->new_state);
942 }
943
944 static void led_sysfs_set(struct led_classdev *led_cdev,
945 @@ -4119,7 +4110,13 @@ static void led_sysfs_set(struct led_classdev *led_cdev,
946 struct tpacpi_led_classdev *data = container_of(led_cdev,
947 struct tpacpi_led_classdev, led_classdev);
948
949 - data->new_brightness = brightness;
950 + if (brightness == LED_OFF)
951 + data->new_state = TPACPI_LED_OFF;
952 + else if (tpacpi_led_state_cache[data->led] != TPACPI_LED_BLINK)
953 + data->new_state = TPACPI_LED_ON;
954 + else
955 + data->new_state = TPACPI_LED_BLINK;
956 +
957 queue_work(tpacpi_wq, &data->work);
958 }
959
960 @@ -4137,7 +4134,7 @@ static int led_sysfs_blink_set(struct led_classdev *led_cdev,
961 } else if ((*delay_on != 500) || (*delay_off != 500))
962 return -EINVAL;
963
964 - data->new_brightness = TPACPI_LED_BLINK;
965 + data->new_state = TPACPI_LED_BLINK;
966 queue_work(tpacpi_wq, &data->work);
967
968 return 0;
969 diff --git a/drivers/net/b44.c b/drivers/net/b44.c
970 index c3bda5c..f1521c6 100644
971 --- a/drivers/net/b44.c
972 +++ b/drivers/net/b44.c
973 @@ -750,7 +750,7 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
974 dest_idx * sizeof(dest_desc),
975 DMA_BIDIRECTIONAL);
976
977 - ssb_dma_sync_single_for_device(bp->sdev, le32_to_cpu(src_desc->addr),
978 + ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
979 RX_PKT_BUF_SZ,
980 DMA_FROM_DEVICE);
981 }
982 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
983 index 1b9c4dc..f9f29c9 100644
984 --- a/drivers/net/bonding/bond_main.c
985 +++ b/drivers/net/bonding/bond_main.c
986 @@ -3516,11 +3516,26 @@ static int bond_slave_netdev_event(unsigned long event, struct net_device *slave
987 }
988 break;
989 case NETDEV_CHANGE:
990 - /*
991 - * TODO: is this what we get if somebody
992 - * sets up a hierarchical bond, then rmmod's
993 - * one of the slave bonding devices?
994 - */
995 + if (bond->params.mode == BOND_MODE_8023AD || bond_is_lb(bond)) {
996 + struct slave *slave;
997 +
998 + slave = bond_get_slave_by_dev(bond, slave_dev);
999 + if (slave) {
1000 + u16 old_speed = slave->speed;
1001 + u16 old_duplex = slave->duplex;
1002 +
1003 + bond_update_speed_duplex(slave);
1004 +
1005 + if (bond_is_lb(bond))
1006 + break;
1007 +
1008 + if (old_speed != slave->speed)
1009 + bond_3ad_adapter_speed_changed(slave);
1010 + if (old_duplex != slave->duplex)
1011 + bond_3ad_adapter_duplex_changed(slave);
1012 + }
1013 + }
1014 +
1015 break;
1016 case NETDEV_DOWN:
1017 /*
1018 diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
1019 index fb730ec..0f6de5f 100644
1020 --- a/drivers/net/bonding/bonding.h
1021 +++ b/drivers/net/bonding/bonding.h
1022 @@ -248,6 +248,12 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
1023 return (struct bonding *)slave->dev->master->priv;
1024 }
1025
1026 +static inline bool bond_is_lb(const struct bonding *bond)
1027 +{
1028 + return bond->params.mode == BOND_MODE_TLB
1029 + || bond->params.mode == BOND_MODE_ALB;
1030 +}
1031 +
1032 #define BOND_FOM_NONE 0
1033 #define BOND_FOM_ACTIVE 1
1034 #define BOND_FOM_FOLLOW 2
1035 diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
1036 index 39c17bb..a98d31a 100644
1037 --- a/drivers/net/r8169.c
1038 +++ b/drivers/net/r8169.c
1039 @@ -375,6 +375,22 @@ enum features {
1040 RTL_FEATURE_GMII = (1 << 2),
1041 };
1042
1043 +struct rtl8169_counters {
1044 + __le64 tx_packets;
1045 + __le64 rx_packets;
1046 + __le64 tx_errors;
1047 + __le32 rx_errors;
1048 + __le16 rx_missed;
1049 + __le16 align_errors;
1050 + __le32 tx_one_collision;
1051 + __le32 tx_multi_collision;
1052 + __le64 rx_unicast;
1053 + __le64 rx_broadcast;
1054 + __le32 rx_multicast;
1055 + __le16 tx_aborted;
1056 + __le16 tx_underun;
1057 +};
1058 +
1059 struct rtl8169_private {
1060 void __iomem *mmio_addr; /* memory map physical address */
1061 struct pci_dev *pci_dev; /* Index of PCI device */
1062 @@ -416,6 +432,7 @@ struct rtl8169_private {
1063 unsigned features;
1064
1065 struct mii_if_info mii;
1066 + struct rtl8169_counters counters;
1067 };
1068
1069 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
1070 @@ -960,22 +977,6 @@ static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1071 "tx_underrun",
1072 };
1073
1074 -struct rtl8169_counters {
1075 - __le64 tx_packets;
1076 - __le64 rx_packets;
1077 - __le64 tx_errors;
1078 - __le32 rx_errors;
1079 - __le16 rx_missed;
1080 - __le16 align_errors;
1081 - __le32 tx_one_collision;
1082 - __le32 tx_multi_collision;
1083 - __le64 rx_unicast;
1084 - __le64 rx_broadcast;
1085 - __le32 rx_multicast;
1086 - __le16 tx_aborted;
1087 - __le16 tx_underun;
1088 -};
1089 -
1090 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1091 {
1092 switch (sset) {
1093 @@ -986,16 +987,21 @@ static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1094 }
1095 }
1096
1097 -static void rtl8169_get_ethtool_stats(struct net_device *dev,
1098 - struct ethtool_stats *stats, u64 *data)
1099 +static void rtl8169_update_counters(struct net_device *dev)
1100 {
1101 struct rtl8169_private *tp = netdev_priv(dev);
1102 void __iomem *ioaddr = tp->mmio_addr;
1103 struct rtl8169_counters *counters;
1104 dma_addr_t paddr;
1105 u32 cmd;
1106 + int wait = 1000;
1107
1108 - ASSERT_RTNL();
1109 + /*
1110 + * Some chips are unable to dump tally counters when the receiver
1111 + * is disabled.
1112 + */
1113 + if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1114 + return;
1115
1116 counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr);
1117 if (!counters)
1118 @@ -1006,31 +1012,45 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
1119 RTL_W32(CounterAddrLow, cmd);
1120 RTL_W32(CounterAddrLow, cmd | CounterDump);
1121
1122 - while (RTL_R32(CounterAddrLow) & CounterDump) {
1123 - if (msleep_interruptible(1))
1124 + while (wait--) {
1125 + if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
1126 + /* copy updated counters */
1127 + memcpy(&tp->counters, counters, sizeof(*counters));
1128 break;
1129 + }
1130 + udelay(10);
1131 }
1132
1133 RTL_W32(CounterAddrLow, 0);
1134 RTL_W32(CounterAddrHigh, 0);
1135
1136 - data[0] = le64_to_cpu(counters->tx_packets);
1137 - data[1] = le64_to_cpu(counters->rx_packets);
1138 - data[2] = le64_to_cpu(counters->tx_errors);
1139 - data[3] = le32_to_cpu(counters->rx_errors);
1140 - data[4] = le16_to_cpu(counters->rx_missed);
1141 - data[5] = le16_to_cpu(counters->align_errors);
1142 - data[6] = le32_to_cpu(counters->tx_one_collision);
1143 - data[7] = le32_to_cpu(counters->tx_multi_collision);
1144 - data[8] = le64_to_cpu(counters->rx_unicast);
1145 - data[9] = le64_to_cpu(counters->rx_broadcast);
1146 - data[10] = le32_to_cpu(counters->rx_multicast);
1147 - data[11] = le16_to_cpu(counters->tx_aborted);
1148 - data[12] = le16_to_cpu(counters->tx_underun);
1149 -
1150 pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr);
1151 }
1152
1153 +static void rtl8169_get_ethtool_stats(struct net_device *dev,
1154 + struct ethtool_stats *stats, u64 *data)
1155 +{
1156 + struct rtl8169_private *tp = netdev_priv(dev);
1157 +
1158 + ASSERT_RTNL();
1159 +
1160 + rtl8169_update_counters(dev);
1161 +
1162 + data[0] = le64_to_cpu(tp->counters.tx_packets);
1163 + data[1] = le64_to_cpu(tp->counters.rx_packets);
1164 + data[2] = le64_to_cpu(tp->counters.tx_errors);
1165 + data[3] = le32_to_cpu(tp->counters.rx_errors);
1166 + data[4] = le16_to_cpu(tp->counters.rx_missed);
1167 + data[5] = le16_to_cpu(tp->counters.align_errors);
1168 + data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1169 + data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1170 + data[8] = le64_to_cpu(tp->counters.rx_unicast);
1171 + data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1172 + data[10] = le32_to_cpu(tp->counters.rx_multicast);
1173 + data[11] = le16_to_cpu(tp->counters.tx_aborted);
1174 + data[12] = le16_to_cpu(tp->counters.tx_underun);
1175 +}
1176 +
1177 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1178 {
1179 switch(stringset) {
1180 @@ -1667,8 +1687,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1181 goto err_out_free_res_4;
1182 }
1183
1184 - /* Unneeded ? Don't mess with Mrs. Murphy. */
1185 - rtl8169_irq_mask_and_ack(ioaddr);
1186 + RTL_W16(IntrMask, 0x0000);
1187
1188 /* Soft reset the chip. */
1189 RTL_W8(ChipCmd, CmdReset);
1190 @@ -1680,6 +1699,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1191 msleep_interruptible(1);
1192 }
1193
1194 + RTL_W16(IntrStatus, 0xffff);
1195 +
1196 /* Identify chip attached to board */
1197 rtl8169_get_mac_version(tp, ioaddr);
1198
1199 @@ -2529,13 +2550,6 @@ static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
1200 opts1 |= FirstFrag;
1201 } else {
1202 len = skb->len;
1203 -
1204 - if (unlikely(len < ETH_ZLEN)) {
1205 - if (skb_padto(skb, ETH_ZLEN))
1206 - goto err_update_stats;
1207 - len = ETH_ZLEN;
1208 - }
1209 -
1210 opts1 |= FirstFrag | LastFrag;
1211 tp->tx_skb[entry].skb = skb;
1212 }
1213 @@ -2573,7 +2587,6 @@ out:
1214 err_stop:
1215 netif_stop_queue(dev);
1216 ret = NETDEV_TX_BUSY;
1217 -err_update_stats:
1218 dev->stats.tx_dropped++;
1219 goto out;
1220 }
1221 @@ -2979,6 +2992,9 @@ static int rtl8169_close(struct net_device *dev)
1222 struct rtl8169_private *tp = netdev_priv(dev);
1223 struct pci_dev *pdev = tp->pci_dev;
1224
1225 + /* update counters before going down */
1226 + rtl8169_update_counters(dev);
1227 +
1228 rtl8169_down(dev);
1229
1230 free_irq(dev->irq, dev);
1231 diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h
1232 index d1b0fba..8ccf374 100644
1233 --- a/drivers/net/wireless/ath9k/ath9k.h
1234 +++ b/drivers/net/wireless/ath9k/ath9k.h
1235 @@ -591,8 +591,8 @@ struct ath9k_country_entry {
1236 u8 iso[3];
1237 };
1238
1239 -#define REG_WRITE(_ah, _reg, _val) iowrite32(_val, _ah->ah_sh + _reg)
1240 -#define REG_READ(_ah, _reg) ioread32(_ah->ah_sh + _reg)
1241 +#define REG_WRITE(_ah, _reg, _val) ath9k_iowrite32((_ah), (_reg), (_val))
1242 +#define REG_READ(_ah, _reg) ath9k_ioread32((_ah), (_reg))
1243
1244 #define SM(_v, _f) (((_v) << _f##_S) & _f)
1245 #define MS(_v, _f) (((_v) & _f) >> _f##_S)
1246 diff --git a/drivers/net/wireless/ath9k/core.c b/drivers/net/wireless/ath9k/core.c
1247 index 87e37bc..e50ba6d 100644
1248 --- a/drivers/net/wireless/ath9k/core.c
1249 +++ b/drivers/net/wireless/ath9k/core.c
1250 @@ -1120,6 +1120,7 @@ int ath_init(u16 devid, struct ath_softc *sc)
1251 sc->sc_cachelsz = csz << 2; /* convert to bytes */
1252
1253 spin_lock_init(&sc->sc_resetlock);
1254 + spin_lock_init(&sc->sc_serial_rw);
1255
1256 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
1257 if (ah == NULL) {
1258 diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h
1259 index 88f4cc3..51ef315 100644
1260 --- a/drivers/net/wireless/ath9k/core.h
1261 +++ b/drivers/net/wireless/ath9k/core.h
1262 @@ -1022,6 +1022,7 @@ struct ath_softc {
1263 spinlock_t sc_rxbuflock;
1264 spinlock_t sc_txbuflock;
1265 spinlock_t sc_resetlock;
1266 + spinlock_t sc_serial_rw;
1267 spinlock_t node_lock;
1268 };
1269
1270 @@ -1069,4 +1070,36 @@ void ath_skb_unmap_single(struct ath_softc *sc,
1271 void ath_mcast_merge(struct ath_softc *sc, u32 mfilt[2]);
1272 enum ath9k_ht_macmode ath_cwm_macmode(struct ath_softc *sc);
1273
1274 +/*
1275 + * Read and write, they both share the same lock. We do this to serialize
1276 + * reads and writes on Atheros 802.11n PCI devices only. This is required
1277 + * as the FIFO on these devices can only accept sanely 2 requests. After
1278 + * that the device goes bananas. Serializing the reads/writes prevents this
1279 + * from happening.
1280 + */
1281 +
1282 +static inline void ath9k_iowrite32(struct ath_hal *ah, u32 reg_offset, u32 val)
1283 +{
1284 + if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
1285 + unsigned long flags;
1286 + spin_lock_irqsave(&ah->ah_sc->sc_serial_rw, flags);
1287 + iowrite32(val, ah->ah_sc->mem + reg_offset);
1288 + spin_unlock_irqrestore(&ah->ah_sc->sc_serial_rw, flags);
1289 + } else
1290 + iowrite32(val, ah->ah_sc->mem + reg_offset);
1291 +}
1292 +
1293 +static inline unsigned int ath9k_ioread32(struct ath_hal *ah, u32 reg_offset)
1294 +{
1295 + u32 val;
1296 + if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
1297 + unsigned long flags;
1298 + spin_lock_irqsave(&ah->ah_sc->sc_serial_rw, flags);
1299 + val = ioread32(ah->ah_sc->mem + reg_offset);
1300 + spin_unlock_irqrestore(&ah->ah_sc->sc_serial_rw, flags);
1301 + } else
1302 + val = ioread32(ah->ah_sc->mem + reg_offset);
1303 + return val;
1304 +}
1305 +
1306 #endif /* CORE_H */
1307 diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
1308 index 69120b5..c40b677 100644
1309 --- a/drivers/net/wireless/ath9k/hw.c
1310 +++ b/drivers/net/wireless/ath9k/hw.c
1311 @@ -369,6 +369,25 @@ static void ath9k_hw_set_defaults(struct ath_hal *ah)
1312 }
1313
1314 ah->ah_config.intr_mitigation = 0;
1315 +
1316 + /*
1317 + * We need this for PCI devices only (Cardbus, PCI, miniPCI)
1318 + * _and_ if on non-uniprocessor systems (Multiprocessor/HT).
1319 + * This means we use it for all AR5416 devices, and the few
1320 + * minor PCI AR9280 devices out there.
1321 + *
1322 + * Serialization is required because these devices do not handle
1323 + * well the case of two concurrent reads/writes due to the latency
1324 + * involved. During one read/write another read/write can be issued
1325 + * on another CPU while the previous read/write may still be working
1326 + * on our hardware, if we hit this case the hardware poops in a loop.
1327 + * We prevent this by serializing reads and writes.
1328 + *
1329 + * This issue is not present on PCI-Express devices or pre-AR5416
1330 + * devices (legacy, 802.11abg).
1331 + */
1332 + if (num_possible_cpus() > 1)
1333 + ah->ah_config.serialize_regmode = SER_REG_MODE_AUTO;
1334 }
1335
1336 static inline void ath9k_hw_override_ini(struct ath_hal *ah,
1337 @@ -3294,7 +3313,8 @@ static struct ath_hal *ath9k_hw_do_attach(u16 devid,
1338 }
1339
1340 if (ah->ah_config.serialize_regmode == SER_REG_MODE_AUTO) {
1341 - if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) {
1342 + if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCI ||
1343 + (AR_SREV_9280(ah) && !ah->ah_isPciExpress)) {
1344 ah->ah_config.serialize_regmode =
1345 SER_REG_MODE_ON;
1346 } else {
1347 diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
1348 index 9dda816..4102aaa 100644
1349 --- a/drivers/net/wireless/b43/xmit.c
1350 +++ b/drivers/net/wireless/b43/xmit.c
1351 @@ -51,7 +51,7 @@ static int b43_plcp_get_bitrate_idx_cck(struct b43_plcp_hdr6 *plcp)
1352 }
1353
1354 /* Extract the bitrate index out of an OFDM PLCP header. */
1355 -static u8 b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool aphy)
1356 +static int b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool aphy)
1357 {
1358 int base = aphy ? 0 : 4;
1359
1360 diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
1361 index 55ac5c3..cb009f3 100644
1362 --- a/drivers/scsi/libiscsi.c
1363 +++ b/drivers/scsi/libiscsi.c
1364 @@ -1807,12 +1807,14 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
1365 num_arrays++;
1366 q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
1367 if (q->pool == NULL)
1368 - goto enomem;
1369 + return -ENOMEM;
1370
1371 q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
1372 GFP_KERNEL, NULL);
1373 - if (q->queue == ERR_PTR(-ENOMEM))
1374 + if (IS_ERR(q->queue)) {
1375 + q->queue = NULL;
1376 goto enomem;
1377 + }
1378
1379 for (i = 0; i < max; i++) {
1380 q->pool[i] = kzalloc(item_size, GFP_KERNEL);
1381 @@ -1842,8 +1844,7 @@ void iscsi_pool_free(struct iscsi_pool *q)
1382
1383 for (i = 0; i < q->max; i++)
1384 kfree(q->pool[i]);
1385 - if (q->pool)
1386 - kfree(q->pool);
1387 + kfree(q->pool);
1388 kfree(q->queue);
1389 }
1390 EXPORT_SYMBOL_GPL(iscsi_pool_free);
1391 diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
1392 index 023a4e9..abbdb31 100644
1393 --- a/drivers/usb/class/cdc-wdm.c
1394 +++ b/drivers/usb/class/cdc-wdm.c
1395 @@ -641,7 +641,7 @@ next_desc:
1396
1397 iface = &intf->altsetting[0];
1398 ep = &iface->endpoint[0].desc;
1399 - if (!usb_endpoint_is_int_in(ep)) {
1400 + if (!ep || !usb_endpoint_is_int_in(ep)) {
1401 rv = -EINVAL;
1402 goto err;
1403 }
1404 diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
1405 index 3a8bb53..fd7b356 100644
1406 --- a/drivers/usb/gadget/f_rndis.c
1407 +++ b/drivers/usb/gadget/f_rndis.c
1408 @@ -437,7 +437,7 @@ invalid:
1409 DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n",
1410 ctrl->bRequestType, ctrl->bRequest,
1411 w_value, w_index, w_length);
1412 - req->zero = 0;
1413 + req->zero = (value < w_length);
1414 req->length = value;
1415 value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
1416 if (value < 0)
1417 diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
1418 index 38a55af..bb3d7c3 100644
1419 --- a/drivers/usb/gadget/u_ether.c
1420 +++ b/drivers/usb/gadget/u_ether.c
1421 @@ -175,12 +175,6 @@ static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
1422 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
1423 }
1424
1425 -static u32 eth_get_link(struct net_device *net)
1426 -{
1427 - struct eth_dev *dev = netdev_priv(net);
1428 - return dev->gadget->speed != USB_SPEED_UNKNOWN;
1429 -}
1430 -
1431 /* REVISIT can also support:
1432 * - WOL (by tracking suspends and issuing remote wakeup)
1433 * - msglevel (implies updated messaging)
1434 @@ -189,7 +183,7 @@ static u32 eth_get_link(struct net_device *net)
1435
1436 static struct ethtool_ops ops = {
1437 .get_drvinfo = eth_get_drvinfo,
1438 - .get_link = eth_get_link
1439 + .get_link = ethtool_op_get_link,
1440 };
1441
1442 static void defer_kevent(struct eth_dev *dev, int flag)
1443 diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
1444 index ecc9b66..01132ac 100644
1445 --- a/drivers/usb/host/ehci-q.c
1446 +++ b/drivers/usb/host/ehci-q.c
1447 @@ -333,12 +333,40 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
1448 token = hc32_to_cpu(ehci, qtd->hw_token);
1449
1450 /* always clean up qtds the hc de-activated */
1451 + retry_xacterr:
1452 if ((token & QTD_STS_ACTIVE) == 0) {
1453
1454 /* on STALL, error, and short reads this urb must
1455 * complete and all its qtds must be recycled.
1456 */
1457 if ((token & QTD_STS_HALT) != 0) {
1458 +
1459 + /* retry transaction errors until we
1460 + * reach the software xacterr limit
1461 + */
1462 + if ((token & QTD_STS_XACT) &&
1463 + QTD_CERR(token) == 0 &&
1464 + --qh->xacterrs > 0 &&
1465 + !urb->unlinked) {
1466 + ehci_dbg(ehci,
1467 + "detected XactErr len %d/%d retry %d\n",
1468 + qtd->length - QTD_LENGTH(token), qtd->length,
1469 + QH_XACTERR_MAX - qh->xacterrs);
1470 +
1471 + /* reset the token in the qtd and the
1472 + * qh overlay (which still contains
1473 + * the qtd) so that we pick up from
1474 + * where we left off
1475 + */
1476 + token &= ~QTD_STS_HALT;
1477 + token |= QTD_STS_ACTIVE |
1478 + (EHCI_TUNE_CERR << 10);
1479 + qtd->hw_token = cpu_to_hc32(ehci,
1480 + token);
1481 + wmb();
1482 + qh->hw_token = cpu_to_hc32(ehci, token);
1483 + goto retry_xacterr;
1484 + }
1485 stopped = 1;
1486
1487 /* magic dummy for some short reads; qh won't advance.
1488 @@ -421,6 +449,9 @@ halt:
1489 /* remove qtd; it's recycled after possible urb completion */
1490 list_del (&qtd->qtd_list);
1491 last = qtd;
1492 +
1493 + /* reinit the xacterr counter for the next qtd */
1494 + qh->xacterrs = QH_XACTERR_MAX;
1495 }
1496
1497 /* last urb's completion might still need calling */
1498 @@ -862,6 +893,7 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
1499 head->qh_next.qh = qh;
1500 head->hw_next = dma;
1501
1502 + qh->xacterrs = QH_XACTERR_MAX;
1503 qh->qh_state = QH_STATE_LINKED;
1504 /* qtd completions reported later by interrupt */
1505 }
1506 diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
1507 index 33459d7..c165fe4 100644
1508 --- a/drivers/usb/host/ehci.h
1509 +++ b/drivers/usb/host/ehci.h
1510 @@ -500,6 +500,9 @@ struct ehci_qh {
1511 #define QH_STATE_UNLINK_WAIT 4 /* LINKED and on reclaim q */
1512 #define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
1513
1514 + u8 xacterrs; /* XactErr retry counter */
1515 +#define QH_XACTERR_MAX 32 /* XactErr retry limit */
1516 +
1517 /* periodic schedule info */
1518 u8 usecs; /* intr bandwidth */
1519 u8 gap_uf; /* uframes split/csplit gap */
1520 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1521 index 2d78712..13b34bc 100644
1522 --- a/drivers/usb/serial/ftdi_sio.c
1523 +++ b/drivers/usb/serial/ftdi_sio.c
1524 @@ -662,6 +662,7 @@ static struct usb_device_id id_table_combined [] = {
1525 { USB_DEVICE(DE_VID, WHT_PID) },
1526 { USB_DEVICE(ADI_VID, ADI_GNICE_PID),
1527 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1528 + { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
1529 { }, /* Optional parameter entry */
1530 { } /* Terminating entry */
1531 };
1532 diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
1533 index 80fa76e..3425122 100644
1534 --- a/drivers/usb/serial/ftdi_sio.h
1535 +++ b/drivers/usb/serial/ftdi_sio.h
1536 @@ -890,6 +890,13 @@
1537 #define ADI_GNICE_PID 0xF000
1538
1539 /*
1540 + * JETI SPECTROMETER SPECBOS 1201
1541 + * http://www.jeti.com/products/sys/scb/scb1201.php
1542 + */
1543 +#define JETI_VID 0x0c6c
1544 +#define JETI_SPC1201_PID 0x04b2
1545 +
1546 +/*
1547 * BmRequestType: 1100 0000b
1548 * bRequest: FTDI_E2_READ
1549 * wValue: 0
1550 diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
1551 index 898e67d..9466a99 100644
1552 --- a/drivers/usb/storage/cypress_atacb.c
1553 +++ b/drivers/usb/storage/cypress_atacb.c
1554 @@ -133,19 +133,18 @@ void cypress_atacb_passthrough(struct scsi_cmnd *srb, struct us_data *us)
1555
1556 /* build the command for
1557 * reading the ATA registers */
1558 - scsi_eh_prep_cmnd(srb, &ses, NULL, 0, 0);
1559 - srb->sdb.length = sizeof(regs);
1560 - sg_init_one(&ses.sense_sgl, regs, srb->sdb.length);
1561 - srb->sdb.table.sgl = &ses.sense_sgl;
1562 - srb->sc_data_direction = DMA_FROM_DEVICE;
1563 - srb->sdb.table.nents = 1;
1564 + scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sizeof(regs));
1565 +
1566 /* we use the same command as before, but we set
1567 * the read taskfile bit, for not executing atacb command,
1568 * but reading register selected in srb->cmnd[4]
1569 */
1570 + srb->cmd_len = 16;
1571 + srb->cmnd = ses.cmnd;
1572 srb->cmnd[2] = 1;
1573
1574 usb_stor_transparent_scsi_command(srb, us);
1575 + memcpy(regs, srb->sense_buffer, sizeof(regs));
1576 tmp_result = srb->result;
1577 scsi_eh_restore_cmnd(srb, &ses);
1578 /* we fail to get registers, report invalid command */
1579 @@ -162,8 +161,8 @@ void cypress_atacb_passthrough(struct scsi_cmnd *srb, struct us_data *us)
1580
1581 /* XXX we should generate sk, asc, ascq from status and error
1582 * regs
1583 - * (see 11.1 Error translation ­ ATA device error to SCSI error map)
1584 - * and ata_to_sense_error from libata.
1585 + * (see 11.1 Error translation ATA device error to SCSI error
1586 + * map, and ata_to_sense_error from libata.)
1587 */
1588
1589 /* Sense data is current and format is descriptor. */
1590 diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
1591 index 8daaace..5f484ae 100644
1592 --- a/drivers/usb/storage/scsiglue.c
1593 +++ b/drivers/usb/storage/scsiglue.c
1594 @@ -135,6 +135,12 @@ static int slave_configure(struct scsi_device *sdev)
1595 if (sdev->request_queue->max_sectors > max_sectors)
1596 blk_queue_max_sectors(sdev->request_queue,
1597 max_sectors);
1598 + } else if (sdev->type == TYPE_TAPE) {
1599 + /* Tapes need much higher max_sector limits, so just
1600 + * raise it to the maximum possible (4 GB / 512) and
1601 + * let the queue segment size sort out the real limit.
1602 + */
1603 + blk_queue_max_sectors(sdev->request_queue, 0x7FFFFF);
1604 }
1605
1606 /* We can't put these settings in slave_alloc() because that gets
1607 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1608 index d21995a..4cf1617 100644
1609 --- a/drivers/usb/storage/unusual_devs.h
1610 +++ b/drivers/usb/storage/unusual_devs.h
1611 @@ -1241,12 +1241,14 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff,
1612 US_SC_DEVICE, US_PR_DEVICE, NULL,
1613 US_FL_FIX_INQUIRY ),
1614
1615 -/* Reported by Rauch Wolke <rauchwolke@gmx.net> */
1616 +/* Reported by Rauch Wolke <rauchwolke@gmx.net>
1617 + * and augmented by binbin <binbinsh@gmail.com> (Bugzilla #12882)
1618 + */
1619 UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff,
1620 "Simple Tech/Datafab",
1621 "CF+SM Reader",
1622 US_SC_DEVICE, US_PR_DEVICE, NULL,
1623 - US_FL_IGNORE_RESIDUE ),
1624 + US_FL_IGNORE_RESIDUE | US_FL_MAX_SECTORS_64 ),
1625
1626 /* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant
1627 * to the USB storage specification in two ways:
1628 diff --git a/fs/buffer.c b/fs/buffer.c
1629 index a542f97..a5d806d 100644
1630 --- a/fs/buffer.c
1631 +++ b/fs/buffer.c
1632 @@ -3042,7 +3042,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
1633 if (test_clear_buffer_dirty(bh)) {
1634 get_bh(bh);
1635 bh->b_end_io = end_buffer_write_sync;
1636 - ret = submit_bh(WRITE_SYNC, bh);
1637 + ret = submit_bh(WRITE, bh);
1638 wait_on_buffer(bh);
1639 if (buffer_eopnotsupp(bh)) {
1640 clear_buffer_eopnotsupp(bh);
1641 diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
1642 index 088e9ae..455257e 100644
1643 --- a/fs/cifs/CHANGES
1644 +++ b/fs/cifs/CHANGES
1645 @@ -1,4 +1,7 @@
1646 Fix oops in cifs_dfs_ref.c when prefixpath is not reachable when using DFS.
1647 +Fix "redzone overwritten" bug in cifs_put_tcon (CIFSTcon may allocate too
1648 +little memory for the "nativeFileSystem" field returned by the server
1649 +during mount).
1650
1651 Version 1.54
1652 ------------
1653 diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
1654 index 8f0f86d..9231e0a 100644
1655 --- a/fs/cifs/cifssmb.c
1656 +++ b/fs/cifs/cifssmb.c
1657 @@ -2348,8 +2348,10 @@ winCreateHardLinkRetry:
1658 PATH_MAX, nls_codepage, remap);
1659 name_len++; /* trailing null */
1660 name_len *= 2;
1661 - pSMB->OldFileName[name_len] = 0; /* pad */
1662 - pSMB->OldFileName[name_len + 1] = 0x04;
1663 +
1664 + /* protocol specifies ASCII buffer format (0x04) for unicode */
1665 + pSMB->OldFileName[name_len] = 0x04;
1666 + pSMB->OldFileName[name_len + 1] = 0x00; /* pad */
1667 name_len2 =
1668 cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
1669 toName, PATH_MAX, nls_codepage, remap);
1670 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
1671 index f254235..21a1abf 100644
1672 --- a/fs/cifs/connect.c
1673 +++ b/fs/cifs/connect.c
1674 @@ -3549,7 +3549,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
1675 BCC(smb_buffer_response)) {
1676 kfree(tcon->nativeFileSystem);
1677 tcon->nativeFileSystem =
1678 - kzalloc(length + 2, GFP_KERNEL);
1679 + kzalloc(2*(length + 1), GFP_KERNEL);
1680 if (tcon->nativeFileSystem)
1681 cifs_strfromUCS_le(
1682 tcon->nativeFileSystem,
1683 diff --git a/fs/compat.c b/fs/compat.c
1684 index a76455b..a53216d 100644
1685 --- a/fs/compat.c
1686 +++ b/fs/compat.c
1687 @@ -1353,12 +1353,17 @@ int compat_do_execve(char * filename,
1688 {
1689 struct linux_binprm *bprm;
1690 struct file *file;
1691 + struct files_struct *displaced;
1692 int retval;
1693
1694 + retval = unshare_files(&displaced);
1695 + if (retval)
1696 + goto out_ret;
1697 +
1698 retval = -ENOMEM;
1699 bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1700 if (!bprm)
1701 - goto out_ret;
1702 + goto out_files;
1703
1704 file = open_exec(filename);
1705 retval = PTR_ERR(file);
1706 @@ -1410,6 +1415,8 @@ int compat_do_execve(char * filename,
1707 security_bprm_free(bprm);
1708 acct_update_integrals(current);
1709 free_bprm(bprm);
1710 + if (displaced)
1711 + put_files_struct(displaced);
1712 return retval;
1713 }
1714
1715 @@ -1430,6 +1437,9 @@ out_file:
1716 out_kfree:
1717 free_bprm(bprm);
1718
1719 +out_files:
1720 + if (displaced)
1721 + reset_files_struct(displaced);
1722 out_ret:
1723 return retval;
1724 }
1725 diff --git a/fs/dquot.c b/fs/dquot.c
1726 index 8ec4d6c..150fc96 100644
1727 --- a/fs/dquot.c
1728 +++ b/fs/dquot.c
1729 @@ -726,7 +726,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
1730 continue;
1731 if (!dqinit_needed(inode, type))
1732 continue;
1733 - if (inode->i_state & (I_FREEING|I_WILL_FREE))
1734 + if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))
1735 continue;
1736
1737 __iget(inode);
1738 diff --git a/fs/drop_caches.c b/fs/drop_caches.c
1739 index 3e5637f..f7e66c0 100644
1740 --- a/fs/drop_caches.c
1741 +++ b/fs/drop_caches.c
1742 @@ -18,7 +18,7 @@ static void drop_pagecache_sb(struct super_block *sb)
1743
1744 spin_lock(&inode_lock);
1745 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1746 - if (inode->i_state & (I_FREEING|I_WILL_FREE))
1747 + if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))
1748 continue;
1749 if (inode->i_mapping->nrpages == 0)
1750 continue;
1751 diff --git a/fs/exec.c b/fs/exec.c
1752 index 5027d01..5ec0f56 100644
1753 --- a/fs/exec.c
1754 +++ b/fs/exec.c
1755 @@ -1089,9 +1089,7 @@ static int unsafe_exec(struct task_struct *p)
1756 {
1757 int unsafe = tracehook_unsafe_exec(p);
1758
1759 - if (atomic_read(&p->fs->count) > 1 ||
1760 - atomic_read(&p->files->count) > 1 ||
1761 - atomic_read(&p->sighand->count) > 1)
1762 + if (atomic_read(&p->fs->count) > 1)
1763 unsafe |= LSM_UNSAFE_SHARE;
1764
1765 return unsafe;
1766 diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
1767 index 6caa023..6b60261 100644
1768 --- a/fs/fs-writeback.c
1769 +++ b/fs/fs-writeback.c
1770 @@ -538,7 +538,8 @@ void generic_sync_sb_inodes(struct super_block *sb,
1771 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1772 struct address_space *mapping;
1773
1774 - if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
1775 + if (inode->i_state &
1776 + (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
1777 continue;
1778 mapping = inode->i_mapping;
1779 if (mapping->nrpages == 0)
1780 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
1781 index 3f58923..a12a6b3 100644
1782 --- a/fs/hugetlbfs/inode.c
1783 +++ b/fs/hugetlbfs/inode.c
1784 @@ -26,7 +26,6 @@
1785 #include <linux/pagevec.h>
1786 #include <linux/parser.h>
1787 #include <linux/mman.h>
1788 -#include <linux/quotaops.h>
1789 #include <linux/slab.h>
1790 #include <linux/dnotify.h>
1791 #include <linux/statfs.h>
1792 @@ -838,7 +837,7 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
1793 bad_val:
1794 printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n",
1795 args[0].from, p);
1796 - return 1;
1797 + return -EINVAL;
1798 }
1799
1800 static int
1801 diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
1802 index ec2ed15..6f7ea0a 100644
1803 --- a/fs/ocfs2/file.c
1804 +++ b/fs/ocfs2/file.c
1805 @@ -2089,7 +2089,7 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1806 out->f_path.dentry->d_name.len,
1807 out->f_path.dentry->d_name.name);
1808
1809 - inode_double_lock(inode, pipe->inode);
1810 + mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
1811
1812 ret = ocfs2_rw_lock(inode, 1);
1813 if (ret < 0) {
1814 @@ -2104,12 +2104,16 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1815 goto out_unlock;
1816 }
1817
1818 + if (pipe->inode)
1819 + mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD);
1820 ret = generic_file_splice_write_nolock(pipe, out, ppos, len, flags);
1821 + if (pipe->inode)
1822 + mutex_unlock(&pipe->inode->i_mutex);
1823
1824 out_unlock:
1825 ocfs2_rw_unlock(inode, 1);
1826 out:
1827 - inode_double_unlock(inode, pipe->inode);
1828 + mutex_unlock(&inode->i_mutex);
1829
1830 mlog_exit(ret);
1831 return ret;
1832 diff --git a/fs/proc/base.c b/fs/proc/base.c
1833 index a28840b..0e910b3 100644
1834 --- a/fs/proc/base.c
1835 +++ b/fs/proc/base.c
1836 @@ -151,15 +151,22 @@ static unsigned int pid_entry_count_dirs(const struct pid_entry *entries,
1837 int maps_protect;
1838 EXPORT_SYMBOL(maps_protect);
1839
1840 -static struct fs_struct *get_fs_struct(struct task_struct *task)
1841 +static int get_fs_path(struct task_struct *task, struct path *path, bool root)
1842 {
1843 struct fs_struct *fs;
1844 + int result = -ENOENT;
1845 +
1846 task_lock(task);
1847 fs = task->fs;
1848 - if(fs)
1849 - atomic_inc(&fs->count);
1850 + if (fs) {
1851 + read_lock(&fs->lock);
1852 + *path = root ? fs->root : fs->pwd;
1853 + path_get(path);
1854 + read_unlock(&fs->lock);
1855 + result = 0;
1856 + }
1857 task_unlock(task);
1858 - return fs;
1859 + return result;
1860 }
1861
1862 static int get_nr_threads(struct task_struct *tsk)
1863 @@ -178,42 +185,24 @@ static int get_nr_threads(struct task_struct *tsk)
1864 static int proc_cwd_link(struct inode *inode, struct path *path)
1865 {
1866 struct task_struct *task = get_proc_task(inode);
1867 - struct fs_struct *fs = NULL;
1868 int result = -ENOENT;
1869
1870 if (task) {
1871 - fs = get_fs_struct(task);
1872 + result = get_fs_path(task, path, 0);
1873 put_task_struct(task);
1874 }
1875 - if (fs) {
1876 - read_lock(&fs->lock);
1877 - *path = fs->pwd;
1878 - path_get(&fs->pwd);
1879 - read_unlock(&fs->lock);
1880 - result = 0;
1881 - put_fs_struct(fs);
1882 - }
1883 return result;
1884 }
1885
1886 static int proc_root_link(struct inode *inode, struct path *path)
1887 {
1888 struct task_struct *task = get_proc_task(inode);
1889 - struct fs_struct *fs = NULL;
1890 int result = -ENOENT;
1891
1892 if (task) {
1893 - fs = get_fs_struct(task);
1894 + result = get_fs_path(task, path, 1);
1895 put_task_struct(task);
1896 }
1897 - if (fs) {
1898 - read_lock(&fs->lock);
1899 - *path = fs->root;
1900 - path_get(&fs->root);
1901 - read_unlock(&fs->lock);
1902 - result = 0;
1903 - put_fs_struct(fs);
1904 - }
1905 return result;
1906 }
1907
1908 @@ -575,7 +564,6 @@ static int mounts_open_common(struct inode *inode, struct file *file,
1909 struct task_struct *task = get_proc_task(inode);
1910 struct nsproxy *nsp;
1911 struct mnt_namespace *ns = NULL;
1912 - struct fs_struct *fs = NULL;
1913 struct path root;
1914 struct proc_mounts *p;
1915 int ret = -EINVAL;
1916 @@ -589,22 +577,16 @@ static int mounts_open_common(struct inode *inode, struct file *file,
1917 get_mnt_ns(ns);
1918 }
1919 rcu_read_unlock();
1920 - if (ns)
1921 - fs = get_fs_struct(task);
1922 + if (ns && get_fs_path(task, &root, 1) == 0)
1923 + ret = 0;
1924 put_task_struct(task);
1925 }
1926
1927 if (!ns)
1928 goto err;
1929 - if (!fs)
1930 + if (ret)
1931 goto err_put_ns;
1932
1933 - read_lock(&fs->lock);
1934 - root = fs->root;
1935 - path_get(&root);
1936 - read_unlock(&fs->lock);
1937 - put_fs_struct(fs);
1938 -
1939 ret = -ENOMEM;
1940 p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
1941 if (!p)
1942 diff --git a/fs/splice.c b/fs/splice.c
1943 index 6d310c7..aea1eb4 100644
1944 --- a/fs/splice.c
1945 +++ b/fs/splice.c
1946 @@ -735,10 +735,19 @@ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
1947 * ->commit_write. Most of the time, these expect i_mutex to
1948 * be held. Since this may result in an ABBA deadlock with
1949 * pipe->inode, we have to order lock acquiry here.
1950 + *
1951 + * Outer lock must be inode->i_mutex, as pipe_wait() will
1952 + * release and reacquire pipe->inode->i_mutex, AND inode must
1953 + * never be a pipe.
1954 */
1955 - inode_double_lock(inode, pipe->inode);
1956 + WARN_ON(S_ISFIFO(inode->i_mode));
1957 + mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
1958 + if (pipe->inode)
1959 + mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD);
1960 ret = __splice_from_pipe(pipe, &sd, actor);
1961 - inode_double_unlock(inode, pipe->inode);
1962 + if (pipe->inode)
1963 + mutex_unlock(&pipe->inode->i_mutex);
1964 + mutex_unlock(&inode->i_mutex);
1965
1966 return ret;
1967 }
1968 @@ -829,11 +838,17 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
1969 };
1970 ssize_t ret;
1971
1972 - inode_double_lock(inode, pipe->inode);
1973 + WARN_ON(S_ISFIFO(inode->i_mode));
1974 + mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
1975 ret = file_remove_suid(out);
1976 - if (likely(!ret))
1977 + if (likely(!ret)) {
1978 + if (pipe->inode)
1979 + mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD);
1980 ret = __splice_from_pipe(pipe, &sd, pipe_to_file);
1981 - inode_double_unlock(inode, pipe->inode);
1982 + if (pipe->inode)
1983 + mutex_unlock(&pipe->inode->i_mutex);
1984 + }
1985 + mutex_unlock(&inode->i_mutex);
1986 if (ret > 0) {
1987 unsigned long nr_pages;
1988
1989 diff --git a/include/linux/capability.h b/include/linux/capability.h
1990 index 28863f4..dc09ff6 100644
1991 --- a/include/linux/capability.h
1992 +++ b/include/linux/capability.h
1993 @@ -366,7 +366,21 @@ typedef struct kernel_cap_struct {
1994 #define CAP_FOR_EACH_U32(__capi) \
1995 for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi)
1996
1997 +/*
1998 + * CAP_FS_MASK and CAP_NFSD_MASKS:
1999 + *
2000 + * The fs mask is all the privileges that fsuid==0 historically meant.
2001 + * At one time in the past, that included CAP_MKNOD and CAP_LINUX_IMMUTABLE.
2002 + *
2003 + * It has never meant setting security.* and trusted.* xattrs.
2004 + *
2005 + * We could also define fsmask as follows:
2006 + * 1. CAP_FS_MASK is the privilege to bypass all fs-related DAC permissions
2007 + * 2. The security.* and trusted.* xattrs are fs-related MAC permissions
2008 + */
2009 +
2010 # define CAP_FS_MASK_B0 (CAP_TO_MASK(CAP_CHOWN) \
2011 + | CAP_TO_MASK(CAP_MKNOD) \
2012 | CAP_TO_MASK(CAP_DAC_OVERRIDE) \
2013 | CAP_TO_MASK(CAP_DAC_READ_SEARCH) \
2014 | CAP_TO_MASK(CAP_FOWNER) \
2015 @@ -381,11 +395,12 @@ typedef struct kernel_cap_struct {
2016 # define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }})
2017 # define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }})
2018 # define CAP_INIT_EFF_SET ((kernel_cap_t){{ ~CAP_TO_MASK(CAP_SETPCAP), ~0 }})
2019 -# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0, CAP_FS_MASK_B1 } })
2020 +# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
2021 + | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
2022 + CAP_FS_MASK_B1 } })
2023 # define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
2024 - | CAP_TO_MASK(CAP_SYS_RESOURCE) \
2025 - | CAP_TO_MASK(CAP_MKNOD), \
2026 - CAP_FS_MASK_B1 } })
2027 + | CAP_TO_MASK(CAP_SYS_RESOURCE), \
2028 + CAP_FS_MASK_B1 } })
2029
2030 #endif /* _KERNEL_CAPABILITY_U32S != 2 */
2031
2032 diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
2033 index 450684f..f5cd050 100644
2034 --- a/include/linux/pci_regs.h
2035 +++ b/include/linux/pci_regs.h
2036 @@ -234,7 +234,7 @@
2037 #define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */
2038 #define PCI_PM_CTRL 4 /* PM control and status register */
2039 #define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */
2040 -#define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */
2041 +#define PCI_PM_CTRL_NO_SOFT_RESET 0x0008 /* No reset for D3hot->D0 */
2042 #define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */
2043 #define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */
2044 #define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */
2045 diff --git a/include/linux/sched.h b/include/linux/sched.h
2046 index 03e0902..09c5851 100644
2047 --- a/include/linux/sched.h
2048 +++ b/include/linux/sched.h
2049 @@ -201,7 +201,8 @@ extern unsigned long long time_sync_thresh;
2050 #define task_is_stopped_or_traced(task) \
2051 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
2052 #define task_contributes_to_load(task) \
2053 - ((task->state & TASK_UNINTERRUPTIBLE) != 0)
2054 + ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
2055 + (task->flags & PF_FROZEN) == 0)
2056
2057 #define __set_task_state(tsk, state_value) \
2058 do { (tsk)->state = (state_value); } while (0)
2059 diff --git a/kernel/exit.c b/kernel/exit.c
2060 index 14096a1..8715136 100644
2061 --- a/kernel/exit.c
2062 +++ b/kernel/exit.c
2063 @@ -938,8 +938,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
2064 */
2065 if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
2066 (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
2067 - tsk->self_exec_id != tsk->parent_exec_id) &&
2068 - !capable(CAP_KILL))
2069 + tsk->self_exec_id != tsk->parent_exec_id))
2070 tsk->exit_signal = SIGCHLD;
2071
2072 signal = tracehook_notify_death(tsk, &cookie, group_dead);
2073 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
2074 index 75bc2cd..0920fa0 100644
2075 --- a/kernel/kprobes.c
2076 +++ b/kernel/kprobes.c
2077 @@ -890,10 +890,8 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
2078 ri->rp = rp;
2079 ri->task = current;
2080
2081 - if (rp->entry_handler && rp->entry_handler(ri, regs)) {
2082 - spin_unlock_irqrestore(&rp->lock, flags);
2083 + if (rp->entry_handler && rp->entry_handler(ri, regs))
2084 return 0;
2085 - }
2086
2087 arch_prepare_kretprobe(ri, regs);
2088
2089 diff --git a/kernel/signal.c b/kernel/signal.c
2090 index 3d161f0..7d0a222 100644
2091 --- a/kernel/signal.c
2092 +++ b/kernel/signal.c
2093 @@ -1549,7 +1549,15 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
2094 read_lock(&tasklist_lock);
2095 if (may_ptrace_stop()) {
2096 do_notify_parent_cldstop(current, CLD_TRAPPED);
2097 + /*
2098 + * Don't want to allow preemption here, because
2099 + * sys_ptrace() needs this task to be inactive.
2100 + *
2101 + * XXX: implement read_unlock_no_resched().
2102 + */
2103 + preempt_disable();
2104 read_unlock(&tasklist_lock);
2105 + preempt_enable_no_resched();
2106 schedule();
2107 } else {
2108 /*
2109 diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
2110 index b5167df..e8b2b18 100644
2111 --- a/mm/filemap_xip.c
2112 +++ b/mm/filemap_xip.c
2113 @@ -89,8 +89,8 @@ do_xip_mapping_read(struct address_space *mapping,
2114 }
2115 }
2116 nr = nr - offset;
2117 - if (nr > len)
2118 - nr = len;
2119 + if (nr > len - copied)
2120 + nr = len - copied;
2121
2122 error = mapping->a_ops->get_xip_mem(mapping, index, 0,
2123 &xip_mem, &xip_pfn);
2124 diff --git a/mm/mmap.c b/mm/mmap.c
2125 index ca12a93..2ae093e 100644
2126 --- a/mm/mmap.c
2127 +++ b/mm/mmap.c
2128 @@ -2068,6 +2068,9 @@ void exit_mmap(struct mm_struct *mm)
2129 arch_exit_mmap(mm);
2130 mmu_notifier_release(mm);
2131
2132 + if (!mm->mmap) /* Can happen if dup_mmap() received an OOM */
2133 + return;
2134 +
2135 lru_add_drain();
2136 flush_cache_mm(mm);
2137 tlb = tlb_gather_mmu(mm, 1);
2138 diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
2139 index 63c18aa..2043c8e 100644
2140 --- a/net/bridge/br_if.c
2141 +++ b/net/bridge/br_if.c
2142 @@ -419,7 +419,6 @@ err2:
2143 err1:
2144 kobject_del(&p->kobj);
2145 err0:
2146 - kobject_put(&p->kobj);
2147 dev_set_promiscuity(dev, -1);
2148 put_back:
2149 dev_put(dev);
2150 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
2151 index 03e83a6..6671485 100644
2152 --- a/net/ipv4/netfilter/arp_tables.c
2153 +++ b/net/ipv4/netfilter/arp_tables.c
2154 @@ -372,7 +372,9 @@ static int mark_source_chains(struct xt_table_info *newinfo,
2155 && unconditional(&e->arp)) || visited) {
2156 unsigned int oldpos, size;
2157
2158 - if (t->verdict < -NF_MAX_VERDICT - 1) {
2159 + if ((strcmp(t->target.u.user.name,
2160 + ARPT_STANDARD_TARGET) == 0) &&
2161 + t->verdict < -NF_MAX_VERDICT - 1) {
2162 duprintf("mark_source_chains: bad "
2163 "negative verdict (%i)\n",
2164 t->verdict);
2165 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
2166 index 4e7c719..b56a203 100644
2167 --- a/net/ipv4/netfilter/ip_tables.c
2168 +++ b/net/ipv4/netfilter/ip_tables.c
2169 @@ -502,7 +502,9 @@ mark_source_chains(struct xt_table_info *newinfo,
2170 && unconditional(&e->ip)) || visited) {
2171 unsigned int oldpos, size;
2172
2173 - if (t->verdict < -NF_MAX_VERDICT - 1) {
2174 + if ((strcmp(t->target.u.user.name,
2175 + IPT_STANDARD_TARGET) == 0) &&
2176 + t->verdict < -NF_MAX_VERDICT - 1) {
2177 duprintf("mark_source_chains: bad "
2178 "negative verdict (%i)\n",
2179 t->verdict);
2180 diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
2181 index 1646a56..4494cb6 100644
2182 --- a/net/ipv6/inet6_hashtables.c
2183 +++ b/net/ipv6/inet6_hashtables.c
2184 @@ -210,11 +210,11 @@ unique:
2185
2186 if (twp != NULL) {
2187 *twp = tw;
2188 - NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED);
2189 + NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
2190 } else if (tw != NULL) {
2191 /* Silly. Should hash-dance instead... */
2192 inet_twsk_deschedule(tw, death_row);
2193 - NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITRECYCLED);
2194 + NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
2195
2196 inet_twsk_put(tw);
2197 }
2198 diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
2199 index 7e14ccc..e55427d 100644
2200 --- a/net/ipv6/ip6_input.c
2201 +++ b/net/ipv6/ip6_input.c
2202 @@ -74,8 +74,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
2203 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
2204 !idev || unlikely(idev->cnf.disable_ipv6)) {
2205 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS);
2206 - rcu_read_unlock();
2207 - goto out;
2208 + goto drop;
2209 }
2210
2211 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
2212 @@ -145,7 +144,6 @@ err:
2213 drop:
2214 rcu_read_unlock();
2215 kfree_skb(skb);
2216 -out:
2217 return 0;
2218 }
2219
2220 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
2221 index 0b4557e..81751c3 100644
2222 --- a/net/ipv6/netfilter/ip6_tables.c
2223 +++ b/net/ipv6/netfilter/ip6_tables.c
2224 @@ -529,7 +529,9 @@ mark_source_chains(struct xt_table_info *newinfo,
2225 && unconditional(&e->ipv6)) || visited) {
2226 unsigned int oldpos, size;
2227
2228 - if (t->verdict < -NF_MAX_VERDICT - 1) {
2229 + if ((strcmp(t->target.u.user.name,
2230 + IP6T_STANDARD_TARGET) == 0) &&
2231 + t->verdict < -NF_MAX_VERDICT - 1) {
2232 duprintf("mark_source_chains: bad "
2233 "negative verdict (%i)\n",
2234 t->verdict);
2235 diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
2236 index 6f61261..fcbc6b9 100644
2237 --- a/net/netfilter/nf_conntrack_proto_tcp.c
2238 +++ b/net/netfilter/nf_conntrack_proto_tcp.c
2239 @@ -15,6 +15,7 @@
2240 #include <linux/skbuff.h>
2241 #include <linux/ipv6.h>
2242 #include <net/ip6_checksum.h>
2243 +#include <asm/unaligned.h>
2244
2245 #include <net/tcp.h>
2246
2247 @@ -466,7 +467,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
2248 for (i = 0;
2249 i < (opsize - TCPOLEN_SACK_BASE);
2250 i += TCPOLEN_SACK_PERBLOCK) {
2251 - tmp = ntohl(*((__be32 *)(ptr+i)+1));
2252 + tmp = get_unaligned_be32((__be32 *)(ptr+i)+1);
2253
2254 if (after(tmp, *sack))
2255 *sack = tmp;
2256 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
2257 index 9f1ea4a..db9e263 100644
2258 --- a/net/netrom/af_netrom.c
2259 +++ b/net/netrom/af_netrom.c
2260 @@ -1082,7 +1082,13 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
2261
2262 SOCK_DEBUG(sk, "NET/ROM: sendto: Addresses built.\n");
2263
2264 - /* Build a packet */
2265 + /* Build a packet - the conventional user limit is 236 bytes. We can
2266 + do ludicrously large NetROM frames but must not overflow */
2267 + if (len > 65536) {
2268 + err = -EMSGSIZE;
2269 + goto out;
2270 + }
2271 +
2272 SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n");
2273 size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
2274
2275 diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
2276 index a7f1ce1..c062361 100644
2277 --- a/net/rose/af_rose.c
2278 +++ b/net/rose/af_rose.c
2279 @@ -1120,6 +1120,10 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
2280
2281 /* Build a packet */
2282 SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n");
2283 + /* Sanity check the packet size */
2284 + if (len > 65535)
2285 + return -EMSGSIZE;
2286 +
2287 size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
2288
2289 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
2290 diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
2291 index 4c8d9f4..905fda5 100644
2292 --- a/net/sctp/endpointola.c
2293 +++ b/net/sctp/endpointola.c
2294 @@ -111,7 +111,8 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
2295 if (sctp_addip_enable) {
2296 auth_chunks->chunks[0] = SCTP_CID_ASCONF;
2297 auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK;
2298 - auth_chunks->param_hdr.length += htons(2);
2299 + auth_chunks->param_hdr.length =
2300 + htons(sizeof(sctp_paramhdr_t) + 2);
2301 }
2302 }
2303
2304 diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
2305 index 9fc5b02..88d80f5 100644
2306 --- a/net/x25/af_x25.c
2307 +++ b/net/x25/af_x25.c
2308 @@ -1037,6 +1037,12 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock,
2309 sx25.sx25_addr = x25->dest_addr;
2310 }
2311
2312 + /* Sanity check the packet size */
2313 + if (len > 65535) {
2314 + rc = -EMSGSIZE;
2315 + goto out;
2316 + }
2317 +
2318 SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n");
2319
2320 /* Build a packet */
2321 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
2322 index 03fc6a8..f028f70 100644
2323 --- a/security/selinux/hooks.c
2324 +++ b/security/selinux/hooks.c
2325 @@ -4467,6 +4467,7 @@ static int selinux_ip_postroute_iptables_compat(struct sock *sk,
2326 if (err)
2327 return err;
2328 err = avc_has_perm(sk_sid, if_sid, SECCLASS_NETIF, netif_perm, ad);
2329 + if (err)
2330 return err;
2331
2332 err = sel_netnode_sid(addrp, family, &node_sid);
2333 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
2334 index 87d7541..b802923 100644
2335 --- a/security/smack/smack_lsm.c
2336 +++ b/security/smack/smack_lsm.c
2337 @@ -604,6 +604,8 @@ static int smack_inode_setxattr(struct dentry *dentry, const char *name,
2338 strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) {
2339 if (!capable(CAP_MAC_ADMIN))
2340 rc = -EPERM;
2341 + if (size == 0)
2342 + rc = -EINVAL;
2343 } else
2344 rc = cap_inode_setxattr(dentry, name, value, size, flags);
2345
2346 @@ -1360,7 +1362,7 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name,
2347 struct socket *sock;
2348 int rc = 0;
2349
2350 - if (value == NULL || size > SMK_LABELLEN)
2351 + if (value == NULL || size > SMK_LABELLEN || size == 0)
2352 return -EACCES;
2353
2354 sp = smk_import(value, size);
2355 diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
2356 index 8c857d5..026f723 100644
2357 --- a/sound/pci/hda/patch_analog.c
2358 +++ b/sound/pci/hda/patch_analog.c
2359 @@ -3220,7 +3220,7 @@ static const char *ad1884_slave_vols[] = {
2360 "Mic Playback Volume",
2361 "CD Playback Volume",
2362 "Internal Mic Playback Volume",
2363 - "Docking Mic Playback Volume"
2364 + "Docking Mic Playback Volume",
2365 "Beep Playback Volume",
2366 "IEC958 Playback Volume",
2367 NULL