Magellan Linux

Annotation of /trunk/kernel-alx/patches-3.10/0172-3.10.73-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2660 - (hide annotations) (download)
Tue Jul 21 16:20:23 2015 UTC (8 years, 10 months ago) by niro
File size: 59416 byte(s)
-linux-3.10.73
1 niro 2660 diff --git a/Makefile b/Makefile
2     index 211bb34102bf..b1129094ebfd 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 3
7     PATCHLEVEL = 10
8     -SUBLEVEL = 72
9     +SUBLEVEL = 73
10     EXTRAVERSION =
11     NAME = TOSSUG Baby Fish
12    
13     diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
14     index 2f5908f0b8c5..d8af0755bddc 100644
15     --- a/arch/arm/mach-at91/pm.h
16     +++ b/arch/arm/mach-at91/pm.h
17     @@ -37,7 +37,7 @@ static inline void at91rm9200_standby(void)
18     " mcr p15, 0, %0, c7, c0, 4\n\t"
19     " str %5, [%1, %2]"
20     :
21     - : "r" (0), "r" (AT91_BASE_SYS), "r" (AT91RM9200_SDRAMC_LPR),
22     + : "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR),
23     "r" (1), "r" (AT91RM9200_SDRAMC_SRR),
24     "r" (lpr));
25     }
26     diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
27     index ee7ac5e6e28a..c5c640779549 100644
28     --- a/arch/powerpc/kernel/smp.c
29     +++ b/arch/powerpc/kernel/smp.c
30     @@ -544,8 +544,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
31     if (smp_ops->give_timebase)
32     smp_ops->give_timebase();
33    
34     - /* Wait until cpu puts itself in the online map */
35     - while (!cpu_online(cpu))
36     + /* Wait until cpu puts itself in the online & active maps */
37     + while (!cpu_online(cpu) || !cpu_active(cpu))
38     cpu_relax();
39    
40     return 0;
41     diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
42     index b5c38faa4ead..d461b7ddf30e 100644
43     --- a/arch/sparc/kernel/perf_event.c
44     +++ b/arch/sparc/kernel/perf_event.c
45     @@ -960,6 +960,8 @@ out:
46     cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
47     }
48    
49     +static void sparc_pmu_start(struct perf_event *event, int flags);
50     +
51     /* On this PMU each PIC has it's own PCR control register. */
52     static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
53     {
54     @@ -972,20 +974,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
55     struct perf_event *cp = cpuc->event[i];
56     struct hw_perf_event *hwc = &cp->hw;
57     int idx = hwc->idx;
58     - u64 enc;
59    
60     if (cpuc->current_idx[i] != PIC_NO_INDEX)
61     continue;
62    
63     - sparc_perf_event_set_period(cp, hwc, idx);
64     cpuc->current_idx[i] = idx;
65    
66     - enc = perf_event_get_enc(cpuc->events[i]);
67     - cpuc->pcr[idx] &= ~mask_for_index(idx);
68     - if (hwc->state & PERF_HES_STOPPED)
69     - cpuc->pcr[idx] |= nop_for_index(idx);
70     - else
71     - cpuc->pcr[idx] |= event_encoding(enc, idx);
72     + sparc_pmu_start(cp, PERF_EF_RELOAD);
73     }
74     out:
75     for (i = 0; i < cpuc->n_events; i++) {
76     @@ -1101,7 +1096,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
77     int i;
78    
79     local_irq_save(flags);
80     - perf_pmu_disable(event->pmu);
81    
82     for (i = 0; i < cpuc->n_events; i++) {
83     if (event == cpuc->event[i]) {
84     @@ -1127,7 +1121,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
85     }
86     }
87    
88     - perf_pmu_enable(event->pmu);
89     local_irq_restore(flags);
90     }
91    
92     @@ -1361,7 +1354,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
93     unsigned long flags;
94    
95     local_irq_save(flags);
96     - perf_pmu_disable(event->pmu);
97    
98     n0 = cpuc->n_events;
99     if (n0 >= sparc_pmu->max_hw_events)
100     @@ -1394,7 +1386,6 @@ nocheck:
101    
102     ret = 0;
103     out:
104     - perf_pmu_enable(event->pmu);
105     local_irq_restore(flags);
106     return ret;
107     }
108     diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
109     index b9cc9763faf4..036e43cef6fb 100644
110     --- a/arch/sparc/kernel/process_64.c
111     +++ b/arch/sparc/kernel/process_64.c
112     @@ -280,6 +280,8 @@ void arch_trigger_all_cpu_backtrace(void)
113     printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
114     gp->tpc, gp->o7, gp->i7, gp->rpc);
115     }
116     +
117     + touch_nmi_watchdog();
118     }
119    
120     memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
121     @@ -352,6 +354,8 @@ static void pmu_snapshot_all_cpus(void)
122     (cpu == this_cpu ? '*' : ' '), cpu,
123     pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
124     pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
125     +
126     + touch_nmi_watchdog();
127     }
128    
129     memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
130     diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
131     index 2daaaa6eda23..be8db9bb7878 100644
132     --- a/arch/sparc/kernel/sys_sparc_64.c
133     +++ b/arch/sparc/kernel/sys_sparc_64.c
134     @@ -336,7 +336,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
135     long err;
136    
137     /* No need for backward compatibility. We can start fresh... */
138     - if (call <= SEMCTL) {
139     + if (call <= SEMTIMEDOP) {
140     switch (call) {
141     case SEMOP:
142     err = sys_semtimedop(first, ptr,
143     diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S
144     index b7f6334e159f..857ad4f8905f 100644
145     --- a/arch/sparc/lib/memmove.S
146     +++ b/arch/sparc/lib/memmove.S
147     @@ -8,9 +8,11 @@
148    
149     .text
150     ENTRY(memmove) /* o0=dst o1=src o2=len */
151     - mov %o0, %g1
152     + brz,pn %o2, 99f
153     + mov %o0, %g1
154     +
155     cmp %o0, %o1
156     - bleu,pt %xcc, memcpy
157     + bleu,pt %xcc, 2f
158     add %o1, %o2, %g7
159     cmp %g7, %o0
160     bleu,pt %xcc, memcpy
161     @@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
162     stb %g7, [%o0]
163     bne,pt %icc, 1b
164     sub %o0, 1, %o0
165     -
166     +99:
167     retl
168     mov %g1, %o0
169     +
170     + /* We can't just call memcpy for these memmove cases. On some
171     + * chips the memcpy uses cache initializing stores and when dst
172     + * and src are close enough, those can clobber the source data
173     + * before we've loaded it in.
174     + */
175     +2: or %o0, %o1, %g7
176     + or %o2, %g7, %g7
177     + andcc %g7, 0x7, %g0
178     + bne,pn %xcc, 4f
179     + nop
180     +
181     +3: ldx [%o1], %g7
182     + add %o1, 8, %o1
183     + subcc %o2, 8, %o2
184     + add %o0, 8, %o0
185     + bne,pt %icc, 3b
186     + stx %g7, [%o0 - 0x8]
187     + ba,a,pt %xcc, 99b
188     +
189     +4: ldub [%o1], %g7
190     + add %o1, 1, %o1
191     + subcc %o2, 1, %o2
192     + add %o0, 1, %o0
193     + bne,pt %icc, 4b
194     + stb %g7, [%o0 - 0x1]
195     + ba,a,pt %xcc, 99b
196     ENDPROC(memmove)
197     diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
198     index 036c2797dece..f58cb540ff94 100644
199     --- a/arch/sparc/mm/srmmu.c
200     +++ b/arch/sparc/mm/srmmu.c
201     @@ -455,10 +455,12 @@ static void __init sparc_context_init(int numctx)
202     void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
203     struct task_struct *tsk)
204     {
205     + unsigned long flags;
206     +
207     if (mm->context == NO_CONTEXT) {
208     - spin_lock(&srmmu_context_spinlock);
209     + spin_lock_irqsave(&srmmu_context_spinlock, flags);
210     alloc_context(old_mm, mm);
211     - spin_unlock(&srmmu_context_spinlock);
212     + spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
213     srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
214     }
215    
216     @@ -983,14 +985,15 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
217    
218     void destroy_context(struct mm_struct *mm)
219     {
220     + unsigned long flags;
221    
222     if (mm->context != NO_CONTEXT) {
223     flush_cache_mm(mm);
224     srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
225     flush_tlb_mm(mm);
226     - spin_lock(&srmmu_context_spinlock);
227     + spin_lock_irqsave(&srmmu_context_spinlock, flags);
228     free_context(mm->context);
229     - spin_unlock(&srmmu_context_spinlock);
230     + spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
231     mm->context = NO_CONTEXT;
232     }
233     }
234     diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
235     index f89e7490d303..990c9699b662 100644
236     --- a/arch/x86/crypto/aesni-intel_glue.c
237     +++ b/arch/x86/crypto/aesni-intel_glue.c
238     @@ -989,7 +989,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
239     src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
240     if (!src)
241     return -ENOMEM;
242     - assoc = (src + req->cryptlen + auth_tag_len);
243     + assoc = (src + req->cryptlen);
244     scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
245     scatterwalk_map_and_copy(assoc, req->assoc, 0,
246     req->assoclen, 0);
247     @@ -1014,7 +1014,7 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
248     scatterwalk_done(&src_sg_walk, 0, 0);
249     scatterwalk_done(&assoc_sg_walk, 0, 0);
250     } else {
251     - scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
252     + scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
253     kfree(src);
254     }
255     return retval;
256     diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
257     index e72b2e41499e..1b2fc5cf1963 100644
258     --- a/arch/x86/include/asm/fpu-internal.h
259     +++ b/arch/x86/include/asm/fpu-internal.h
260     @@ -370,7 +370,7 @@ static inline void drop_fpu(struct task_struct *tsk)
261     preempt_disable();
262     tsk->fpu_counter = 0;
263     __drop_fpu(tsk);
264     - clear_used_math();
265     + clear_stopped_child_used_math(tsk);
266     preempt_enable();
267     }
268    
269     diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
270     index 1ee723298e90..92f37e7683c5 100644
271     --- a/arch/x86/kernel/xsave.c
272     +++ b/arch/x86/kernel/xsave.c
273     @@ -376,7 +376,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
274     * thread's fpu state, reconstruct fxstate from the fsave
275     * header. Sanitize the copied state etc.
276     */
277     - struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
278     + struct fpu *fpu = &tsk->thread.fpu;
279     struct user_i387_ia32_struct env;
280     int err = 0;
281    
282     @@ -390,14 +390,15 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
283     */
284     drop_fpu(tsk);
285    
286     - if (__copy_from_user(xsave, buf_fx, state_size) ||
287     + if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) ||
288     __copy_from_user(&env, buf, sizeof(env))) {
289     + fpu_finit(fpu);
290     err = -1;
291     } else {
292     sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
293     - set_used_math();
294     }
295    
296     + set_used_math();
297     if (use_eager_fpu()) {
298     preempt_disable();
299     math_state_restore();
300     diff --git a/arch/x86/vdso/vdso32/sigreturn.S b/arch/x86/vdso/vdso32/sigreturn.S
301     index 31776d0efc8c..d7ec4e251c0a 100644
302     --- a/arch/x86/vdso/vdso32/sigreturn.S
303     +++ b/arch/x86/vdso/vdso32/sigreturn.S
304     @@ -17,6 +17,7 @@
305     .text
306     .globl __kernel_sigreturn
307     .type __kernel_sigreturn,@function
308     + nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */
309     ALIGN
310     __kernel_sigreturn:
311     .LSTART_sigreturn:
312     diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
313     index 538856f3e68a..09df26f9621d 100644
314     --- a/drivers/char/tpm/tpm_ibmvtpm.c
315     +++ b/drivers/char/tpm/tpm_ibmvtpm.c
316     @@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
317     {
318     struct ibmvtpm_dev *ibmvtpm;
319     struct ibmvtpm_crq crq;
320     - u64 *word = (u64 *) &crq;
321     + __be64 *word = (__be64 *)&crq;
322     int rc;
323    
324     ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip);
325     @@ -145,11 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
326     memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
327     crq.valid = (u8)IBMVTPM_VALID_CMD;
328     crq.msg = (u8)VTPM_TPM_COMMAND;
329     - crq.len = (u16)count;
330     - crq.data = ibmvtpm->rtce_dma_handle;
331     + crq.len = cpu_to_be16(count);
332     + crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
333    
334     - rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]),
335     - cpu_to_be64(word[1]));
336     + rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
337     + be64_to_cpu(word[1]));
338     if (rc != H_SUCCESS) {
339     dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
340     rc = 0;
341     diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
342     index bd82a791f995..b2c231b1beec 100644
343     --- a/drivers/char/tpm/tpm_ibmvtpm.h
344     +++ b/drivers/char/tpm/tpm_ibmvtpm.h
345     @@ -22,9 +22,9 @@
346     struct ibmvtpm_crq {
347     u8 valid;
348     u8 msg;
349     - u16 len;
350     - u32 data;
351     - u64 reserved;
352     + __be16 len;
353     + __be32 data;
354     + __be64 reserved;
355     } __attribute__((packed, aligned(8)));
356    
357     struct ibmvtpm_crq_queue {
358     diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
359     index fc45567ad3ac..ec3bd62eeaf6 100644
360     --- a/drivers/char/virtio_console.c
361     +++ b/drivers/char/virtio_console.c
362     @@ -2023,12 +2023,13 @@ static int virtcons_probe(struct virtio_device *vdev)
363     spin_lock_init(&portdev->ports_lock);
364     INIT_LIST_HEAD(&portdev->ports);
365    
366     + INIT_WORK(&portdev->control_work, &control_work_handler);
367     +
368     if (multiport) {
369     unsigned int nr_added_bufs;
370    
371     spin_lock_init(&portdev->c_ivq_lock);
372     spin_lock_init(&portdev->c_ovq_lock);
373     - INIT_WORK(&portdev->control_work, &control_work_handler);
374    
375     nr_added_bufs = fill_queue(portdev->c_ivq,
376     &portdev->c_ivq_lock);
377     diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
378     index ead08a49bec0..59ea6547306b 100644
379     --- a/drivers/gpu/drm/radeon/evergreen.c
380     +++ b/drivers/gpu/drm/radeon/evergreen.c
381     @@ -4016,6 +4016,9 @@ int evergreen_irq_set(struct radeon_device *rdev)
382     WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
383     WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
384    
385     + /* posting read */
386     + RREG32(SRBM_STATUS);
387     +
388     return 0;
389     }
390    
391     diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
392     index 46470dd7c710..f9f0e3680d76 100644
393     --- a/drivers/gpu/drm/radeon/r100.c
394     +++ b/drivers/gpu/drm/radeon/r100.c
395     @@ -743,6 +743,10 @@ int r100_irq_set(struct radeon_device *rdev)
396     tmp |= RADEON_FP2_DETECT_MASK;
397     }
398     WREG32(RADEON_GEN_INT_CNTL, tmp);
399     +
400     + /* read back to post the write */
401     + RREG32(RADEON_GEN_INT_CNTL);
402     +
403     return 0;
404     }
405    
406     diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
407     index 4cf21ec1abe3..90b007594e32 100644
408     --- a/drivers/gpu/drm/radeon/r600.c
409     +++ b/drivers/gpu/drm/radeon/r600.c
410     @@ -3459,6 +3459,9 @@ int r600_init(struct radeon_device *rdev)
411     rdev->accel_working = false;
412     }
413    
414     + /* posting read */
415     + RREG32(R_000E50_SRBM_STATUS);
416     +
417     return 0;
418     }
419    
420     diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
421     index 60af3cda587b..6627585da1e5 100644
422     --- a/drivers/gpu/drm/radeon/radeon_cs.c
423     +++ b/drivers/gpu/drm/radeon/radeon_cs.c
424     @@ -177,11 +177,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
425     u32 ring = RADEON_CS_RING_GFX;
426     s32 priority = 0;
427    
428     + INIT_LIST_HEAD(&p->validated);
429     +
430     if (!cs->num_chunks) {
431     return 0;
432     }
433     +
434     /* get chunks */
435     - INIT_LIST_HEAD(&p->validated);
436     p->idx = 0;
437     p->ib.sa_bo = NULL;
438     p->ib.semaphore = NULL;
439     diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
440     index ae813fef0818..971d55f73e0c 100644
441     --- a/drivers/gpu/drm/radeon/rs600.c
442     +++ b/drivers/gpu/drm/radeon/rs600.c
443     @@ -636,6 +636,10 @@ int rs600_irq_set(struct radeon_device *rdev)
444     WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
445     if (ASIC_IS_DCE2(rdev))
446     WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
447     +
448     + /* posting read */
449     + RREG32(R_000040_GEN_INT_CNTL);
450     +
451     return 0;
452     }
453    
454     diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
455     index 03add5d5542e..2410c38ff037 100644
456     --- a/drivers/gpu/drm/radeon/si.c
457     +++ b/drivers/gpu/drm/radeon/si.c
458     @@ -5704,8 +5704,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
459     WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
460    
461     if (!vclk || !dclk) {
462     - /* keep the Bypass mode, put PLL to sleep */
463     - WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
464     + /* keep the Bypass mode */
465     return 0;
466     }
467    
468     @@ -5721,8 +5720,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
469     /* set VCO_MODE to 1 */
470     WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
471    
472     - /* toggle UPLL_SLEEP to 1 then back to 0 */
473     - WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
474     + /* disable sleep mode */
475     WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
476    
477     /* deassert UPLL_RESET */
478     @@ -5778,5 +5776,8 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
479    
480     mdelay(100);
481    
482     + /* posting read */
483     + RREG32(SRBM_STATUS);
484     +
485     return 0;
486     }
487     diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
488     index a4694aa20a3e..f66aeb79abdf 100644
489     --- a/drivers/net/can/dev.c
490     +++ b/drivers/net/can/dev.c
491     @@ -503,6 +503,14 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
492     skb->pkt_type = PACKET_BROADCAST;
493     skb->ip_summed = CHECKSUM_UNNECESSARY;
494    
495     + skb_reset_mac_header(skb);
496     + skb_reset_network_header(skb);
497     + skb_reset_transport_header(skb);
498     +
499     + skb_reset_mac_header(skb);
500     + skb_reset_network_header(skb);
501     + skb_reset_transport_header(skb);
502     +
503     can_skb_reserve(skb);
504     can_skb_prv(skb)->ifindex = dev->ifindex;
505    
506     diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
507     index 1e207f086b75..49ab45e17fe8 100644
508     --- a/drivers/net/usb/cx82310_eth.c
509     +++ b/drivers/net/usb/cx82310_eth.c
510     @@ -302,9 +302,18 @@ static const struct driver_info cx82310_info = {
511     .tx_fixup = cx82310_tx_fixup,
512     };
513    
514     +#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
515     + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
516     + USB_DEVICE_ID_MATCH_DEV_INFO, \
517     + .idVendor = (vend), \
518     + .idProduct = (prod), \
519     + .bDeviceClass = (cl), \
520     + .bDeviceSubClass = (sc), \
521     + .bDeviceProtocol = (pr)
522     +
523     static const struct usb_device_id products[] = {
524     {
525     - USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0),
526     + USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
527     .driver_info = (unsigned long) &cx82310_info
528     },
529     { },
530     diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
531     index e873e8f0070d..283212aa103c 100644
532     --- a/drivers/regulator/core.c
533     +++ b/drivers/regulator/core.c
534     @@ -1596,10 +1596,12 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
535     trace_regulator_enable(rdev_get_name(rdev));
536    
537     if (rdev->ena_pin) {
538     - ret = regulator_ena_gpio_ctrl(rdev, true);
539     - if (ret < 0)
540     - return ret;
541     - rdev->ena_gpio_state = 1;
542     + if (!rdev->ena_gpio_state) {
543     + ret = regulator_ena_gpio_ctrl(rdev, true);
544     + if (ret < 0)
545     + return ret;
546     + rdev->ena_gpio_state = 1;
547     + }
548     } else if (rdev->desc->ops->enable) {
549     ret = rdev->desc->ops->enable(rdev);
550     if (ret < 0)
551     @@ -1701,10 +1703,12 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
552     trace_regulator_disable(rdev_get_name(rdev));
553    
554     if (rdev->ena_pin) {
555     - ret = regulator_ena_gpio_ctrl(rdev, false);
556     - if (ret < 0)
557     - return ret;
558     - rdev->ena_gpio_state = 0;
559     + if (rdev->ena_gpio_state) {
560     + ret = regulator_ena_gpio_ctrl(rdev, false);
561     + if (ret < 0)
562     + return ret;
563     + rdev->ena_gpio_state = 0;
564     + }
565    
566     } else if (rdev->desc->ops->disable) {
567     ret = rdev->desc->ops->disable(rdev);
568     @@ -3614,12 +3618,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
569     config->ena_gpio, ret);
570     goto wash;
571     }
572     -
573     - if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH)
574     - rdev->ena_gpio_state = 1;
575     -
576     - if (config->ena_gpio_invert)
577     - rdev->ena_gpio_state = !rdev->ena_gpio_state;
578     }
579    
580     /* set regulator constraints */
581     @@ -3788,9 +3786,11 @@ int regulator_suspend_finish(void)
582     list_for_each_entry(rdev, &regulator_list, list) {
583     mutex_lock(&rdev->mutex);
584     if (rdev->use_count > 0 || rdev->constraints->always_on) {
585     - error = _regulator_do_enable(rdev);
586     - if (error)
587     - ret = error;
588     + if (!_regulator_is_enabled(rdev)) {
589     + error = _regulator_do_enable(rdev);
590     + if (error)
591     + ret = error;
592     + }
593     } else {
594     if (!has_full_constraints)
595     goto unlock;
596     diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
597     index 62b58d38ce2e..60de66252fa2 100644
598     --- a/drivers/scsi/libsas/sas_discover.c
599     +++ b/drivers/scsi/libsas/sas_discover.c
600     @@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work)
601     struct sas_discovery_event *ev = to_sas_discovery_event(work);
602     struct asd_sas_port *port = ev->port;
603     struct sas_ha_struct *ha = port->ha;
604     + struct domain_device *ddev = port->port_dev;
605    
606     /* prevent revalidation from finding sata links in recovery */
607     mutex_lock(&ha->disco_mutex);
608     @@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work)
609     SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
610     task_pid_nr(current));
611    
612     - if (port->port_dev)
613     - res = sas_ex_revalidate_domain(port->port_dev);
614     + if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
615     + ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE))
616     + res = sas_ex_revalidate_domain(ddev);
617    
618     SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
619     port->id, task_pid_nr(current), res);
620     diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
621     index 5266c89fc989..a6f0878d9bf1 100644
622     --- a/drivers/spi/spi-pl022.c
623     +++ b/drivers/spi/spi-pl022.c
624     @@ -508,12 +508,12 @@ static void giveback(struct pl022 *pl022)
625     pl022->cur_msg = NULL;
626     pl022->cur_transfer = NULL;
627     pl022->cur_chip = NULL;
628     - spi_finalize_current_message(pl022->master);
629    
630     /* disable the SPI/SSP operation */
631     writew((readw(SSP_CR1(pl022->virtbase)) &
632     (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
633    
634     + spi_finalize_current_message(pl022->master);
635     }
636    
637     /**
638     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
639     index 651b5768862f..9559ea749d83 100644
640     --- a/drivers/target/iscsi/iscsi_target.c
641     +++ b/drivers/target/iscsi/iscsi_target.c
642     @@ -4136,11 +4136,17 @@ int iscsit_close_connection(
643     pr_debug("Closing iSCSI connection CID %hu on SID:"
644     " %u\n", conn->cid, sess->sid);
645     /*
646     - * Always up conn_logout_comp just in case the RX Thread is sleeping
647     - * and the logout response never got sent because the connection
648     - * failed.
649     + * Always up conn_logout_comp for the traditional TCP case just in case
650     + * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout
651     + * response never got sent because the connection failed.
652     + *
653     + * However for iser-target, isert_wait4logout() is using conn_logout_comp
654     + * to signal logout response TX interrupt completion. Go ahead and skip
655     + * this for iser since isert_rx_opcode() does not wait on logout failure,
656     + * and to avoid iscsi_conn pointer dereference in iser-target code.
657     */
658     - complete(&conn->conn_logout_comp);
659     + if (conn->conn_transport->transport_type == ISCSI_TCP)
660     + complete(&conn->conn_logout_comp);
661    
662     iscsi_release_thread_set(conn);
663    
664     diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
665     index 27ec6e4d1c7c..7f85f4a6d73a 100644
666     --- a/drivers/target/target_core_pr.c
667     +++ b/drivers/target/target_core_pr.c
668     @@ -518,6 +518,18 @@ static int core_scsi3_pr_seq_non_holder(
669    
670     return 0;
671     }
672     + } else if (we && registered_nexus) {
673     + /*
674     + * Reads are allowed for Write Exclusive locks
675     + * from all registrants.
676     + */
677     + if (cmd->data_direction == DMA_FROM_DEVICE) {
678     + pr_debug("Allowing READ CDB: 0x%02x for %s"
679     + " reservation\n", cdb[0],
680     + core_scsi3_pr_dump_type(pr_reg_type));
681     +
682     + return 0;
683     + }
684     }
685     pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
686     " for %s reservation\n", transport_dump_cmd_direction(cmd),
687     @@ -2397,6 +2409,7 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
688     spin_lock(&dev->dev_reservation_lock);
689     pr_res_holder = dev->dev_pr_res_holder;
690     if (pr_res_holder) {
691     + int pr_res_type = pr_res_holder->pr_res_type;
692     /*
693     * From spc4r17 Section 5.7.9: Reserving:
694     *
695     @@ -2407,7 +2420,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
696     * the logical unit, then the command shall be completed with
697     * RESERVATION CONFLICT status.
698     */
699     - if (pr_res_holder != pr_reg) {
700     + if ((pr_res_holder != pr_reg) &&
701     + (pr_res_type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
702     + (pr_res_type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
703     struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
704     pr_err("SPC-3 PR: Attempted RESERVE from"
705     " [%s]: %s while reservation already held by"
706     @@ -4012,7 +4027,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
707     unsigned char *buf;
708     u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
709     u32 off = 8; /* off into first Full Status descriptor */
710     - int format_code = 0;
711     + int format_code = 0, pr_res_type = 0, pr_res_scope = 0;
712     + bool all_reg = false;
713    
714     if (cmd->data_length < 8) {
715     pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
716     @@ -4029,6 +4045,19 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
717     buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
718     buf[3] = (dev->t10_pr.pr_generation & 0xff);
719    
720     + spin_lock(&dev->dev_reservation_lock);
721     + if (dev->dev_pr_res_holder) {
722     + struct t10_pr_registration *pr_holder = dev->dev_pr_res_holder;
723     +
724     + if (pr_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG ||
725     + pr_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG) {
726     + all_reg = true;
727     + pr_res_type = pr_holder->pr_res_type;
728     + pr_res_scope = pr_holder->pr_res_scope;
729     + }
730     + }
731     + spin_unlock(&dev->dev_reservation_lock);
732     +
733     spin_lock(&pr_tmpl->registration_lock);
734     list_for_each_entry_safe(pr_reg, pr_reg_tmp,
735     &pr_tmpl->registration_list, pr_reg_list) {
736     @@ -4078,14 +4107,20 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
737     * reservation holder for PR_HOLDER bit.
738     *
739     * Also, if this registration is the reservation
740     - * holder, fill in SCOPE and TYPE in the next byte.
741     + * holder or there is an All Registrants reservation
742     + * active, fill in SCOPE and TYPE in the next byte.
743     */
744     if (pr_reg->pr_res_holder) {
745     buf[off++] |= 0x01;
746     buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
747     (pr_reg->pr_res_type & 0x0f);
748     - } else
749     + } else if (all_reg) {
750     + buf[off++] |= 0x01;
751     + buf[off++] = (pr_res_scope & 0xf0) |
752     + (pr_res_type & 0x0f);
753     + } else {
754     off += 2;
755     + }
756    
757     off += 4; /* Skip over reserved area */
758     /*
759     diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
760     index 3250ba2594e0..b1e77ff9a636 100644
761     --- a/drivers/target/target_core_pscsi.c
762     +++ b/drivers/target/target_core_pscsi.c
763     @@ -1112,7 +1112,7 @@ static u32 pscsi_get_device_type(struct se_device *dev)
764     struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
765     struct scsi_device *sd = pdv->pdv_sd;
766    
767     - return sd->type;
768     + return (sd) ? sd->type : TYPE_NO_LUN;
769     }
770    
771     static sector_t pscsi_get_blocks(struct se_device *dev)
772     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
773     index dcc5daa0ff1c..daf0f6357bb3 100644
774     --- a/drivers/target/target_core_transport.c
775     +++ b/drivers/target/target_core_transport.c
776     @@ -2222,6 +2222,10 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
777    
778     out:
779     spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
780     +
781     + if (ret && ack_kref)
782     + target_put_sess_cmd(se_sess, se_cmd);
783     +
784     return ret;
785     }
786     EXPORT_SYMBOL(target_get_sess_cmd);
787     diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
788     index 8d3c0b5e2878..98b8423793fd 100644
789     --- a/drivers/tty/serial/8250/8250_pci.c
790     +++ b/drivers/tty/serial/8250/8250_pci.c
791     @@ -68,7 +68,7 @@ static void moan_device(const char *str, struct pci_dev *dev)
792     "Please send the output of lspci -vv, this\n"
793     "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n"
794     "manufacturer and name of serial board or\n"
795     - "modem board to rmk+serial@arm.linux.org.uk.\n",
796     + "modem board to <linux-serial@vger.kernel.org>.\n",
797     pci_name(dev), str, dev->vendor, dev->device,
798     dev->subsystem_vendor, dev->subsystem_device);
799     }
800     diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
801     index 46ae0f9f02ad..75fe3d466515 100644
802     --- a/drivers/xen/xen-pciback/conf_space.c
803     +++ b/drivers/xen/xen-pciback/conf_space.c
804     @@ -16,7 +16,7 @@
805     #include "conf_space.h"
806     #include "conf_space_quirks.h"
807    
808     -static bool permissive;
809     +bool permissive;
810     module_param(permissive, bool, 0644);
811    
812     /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
813     diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
814     index e56c934ad137..2e1d73d1d5d0 100644
815     --- a/drivers/xen/xen-pciback/conf_space.h
816     +++ b/drivers/xen/xen-pciback/conf_space.h
817     @@ -64,6 +64,8 @@ struct config_field_entry {
818     void *data;
819     };
820    
821     +extern bool permissive;
822     +
823     #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
824    
825     /* Add fields to a device - the add_fields macro expects to get a pointer to
826     diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
827     index 3daf862d739d..a5bb81a600f7 100644
828     --- a/drivers/xen/xen-pciback/conf_space_header.c
829     +++ b/drivers/xen/xen-pciback/conf_space_header.c
830     @@ -9,6 +9,10 @@
831     #include "pciback.h"
832     #include "conf_space.h"
833    
834     +struct pci_cmd_info {
835     + u16 val;
836     +};
837     +
838     struct pci_bar_info {
839     u32 val;
840     u32 len_val;
841     @@ -18,22 +22,36 @@ struct pci_bar_info {
842     #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
843     #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
844    
845     -static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
846     +/* Bits guests are allowed to control in permissive mode. */
847     +#define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \
848     + PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \
849     + PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK)
850     +
851     +static void *command_init(struct pci_dev *dev, int offset)
852     {
853     - int i;
854     - int ret;
855     -
856     - ret = xen_pcibk_read_config_word(dev, offset, value, data);
857     - if (!pci_is_enabled(dev))
858     - return ret;
859     -
860     - for (i = 0; i < PCI_ROM_RESOURCE; i++) {
861     - if (dev->resource[i].flags & IORESOURCE_IO)
862     - *value |= PCI_COMMAND_IO;
863     - if (dev->resource[i].flags & IORESOURCE_MEM)
864     - *value |= PCI_COMMAND_MEMORY;
865     + struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
866     + int err;
867     +
868     + if (!cmd)
869     + return ERR_PTR(-ENOMEM);
870     +
871     + err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val);
872     + if (err) {
873     + kfree(cmd);
874     + return ERR_PTR(err);
875     }
876    
877     + return cmd;
878     +}
879     +
880     +static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
881     +{
882     + int ret = pci_read_config_word(dev, offset, value);
883     + const struct pci_cmd_info *cmd = data;
884     +
885     + *value &= PCI_COMMAND_GUEST;
886     + *value |= cmd->val & ~PCI_COMMAND_GUEST;
887     +
888     return ret;
889     }
890    
891     @@ -41,6 +59,8 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
892     {
893     struct xen_pcibk_dev_data *dev_data;
894     int err;
895     + u16 val;
896     + struct pci_cmd_info *cmd = data;
897    
898     dev_data = pci_get_drvdata(dev);
899     if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
900     @@ -83,6 +103,19 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
901     }
902     }
903    
904     + cmd->val = value;
905     +
906     + if (!permissive && (!dev_data || !dev_data->permissive))
907     + return 0;
908     +
909     + /* Only allow the guest to control certain bits. */
910     + err = pci_read_config_word(dev, offset, &val);
911     + if (err || val == value)
912     + return err;
913     +
914     + value &= PCI_COMMAND_GUEST;
915     + value |= val & ~PCI_COMMAND_GUEST;
916     +
917     return pci_write_config_word(dev, offset, value);
918     }
919    
920     @@ -282,6 +315,8 @@ static const struct config_field header_common[] = {
921     {
922     .offset = PCI_COMMAND,
923     .size = 2,
924     + .init = command_init,
925     + .release = bar_release,
926     .u.w.read = command_read,
927     .u.w.write = command_write,
928     },
929     diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
930     index 23bf1a52a5da..b535008b6c4c 100644
931     --- a/fs/fuse/dev.c
932     +++ b/fs/fuse/dev.c
933     @@ -819,8 +819,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
934    
935     newpage = buf->page;
936    
937     - if (WARN_ON(!PageUptodate(newpage)))
938     - return -EIO;
939     + if (!PageUptodate(newpage))
940     + SetPageUptodate(newpage);
941    
942     ClearPageMappedToDisk(newpage);
943    
944     @@ -1725,6 +1725,9 @@ copy_finish:
945     static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
946     unsigned int size, struct fuse_copy_state *cs)
947     {
948     + /* Don't try to move pages (yet) */
949     + cs->move_pages = 0;
950     +
951     switch (code) {
952     case FUSE_NOTIFY_POLL:
953     return fuse_notify_poll(fc, size, cs);
954     diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
955     index b3c95c1a4700..99294a286e66 100644
956     --- a/fs/nilfs2/segment.c
957     +++ b/fs/nilfs2/segment.c
958     @@ -1906,6 +1906,7 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
959     struct the_nilfs *nilfs)
960     {
961     struct nilfs_inode_info *ii, *n;
962     + int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
963     int defer_iput = false;
964    
965     spin_lock(&nilfs->ns_inode_lock);
966     @@ -1918,10 +1919,10 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
967     brelse(ii->i_bh);
968     ii->i_bh = NULL;
969     list_del_init(&ii->i_dirty);
970     - if (!ii->vfs_inode.i_nlink) {
971     + if (!ii->vfs_inode.i_nlink || during_mount) {
972     /*
973     - * Defer calling iput() to avoid a deadlock
974     - * over I_SYNC flag for inodes with i_nlink == 0
975     + * Defer calling iput() to avoid deadlocks if
976     + * i_nlink == 0 or mount is not yet finished.
977     */
978     list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
979     defer_iput = true;
980     diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
981     index ff28cf578d01..120dd354849d 100644
982     --- a/include/linux/workqueue.h
983     +++ b/include/linux/workqueue.h
984     @@ -71,7 +71,8 @@ enum {
985     /* data contains off-queue information when !WORK_STRUCT_PWQ */
986     WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
987    
988     - WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
989     + __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
990     + WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
991    
992     /*
993     * When a work item is off queue, its high bits point to the last
994     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
995     index 16730a9c8cac..fe7c4b91d2e7 100644
996     --- a/kernel/workqueue.c
997     +++ b/kernel/workqueue.c
998     @@ -2861,19 +2861,57 @@ bool flush_work(struct work_struct *work)
999     }
1000     EXPORT_SYMBOL_GPL(flush_work);
1001    
1002     +struct cwt_wait {
1003     + wait_queue_t wait;
1004     + struct work_struct *work;
1005     +};
1006     +
1007     +static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
1008     +{
1009     + struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
1010     +
1011     + if (cwait->work != key)
1012     + return 0;
1013     + return autoremove_wake_function(wait, mode, sync, key);
1014     +}
1015     +
1016     static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
1017     {
1018     + static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
1019     unsigned long flags;
1020     int ret;
1021    
1022     do {
1023     ret = try_to_grab_pending(work, is_dwork, &flags);
1024     /*
1025     - * If someone else is canceling, wait for the same event it
1026     - * would be waiting for before retrying.
1027     + * If someone else is already canceling, wait for it to
1028     + * finish. flush_work() doesn't work for PREEMPT_NONE
1029     + * because we may get scheduled between @work's completion
1030     + * and the other canceling task resuming and clearing
1031     + * CANCELING - flush_work() will return false immediately
1032     + * as @work is no longer busy, try_to_grab_pending() will
1033     + * return -ENOENT as @work is still being canceled and the
1034     + * other canceling task won't be able to clear CANCELING as
1035     + * we're hogging the CPU.
1036     + *
1037     + * Let's wait for completion using a waitqueue. As this
1038     + * may lead to the thundering herd problem, use a custom
1039     + * wake function which matches @work along with exclusive
1040     + * wait and wakeup.
1041     */
1042     - if (unlikely(ret == -ENOENT))
1043     - flush_work(work);
1044     + if (unlikely(ret == -ENOENT)) {
1045     + struct cwt_wait cwait;
1046     +
1047     + init_wait(&cwait.wait);
1048     + cwait.wait.func = cwt_wakefn;
1049     + cwait.work = work;
1050     +
1051     + prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
1052     + TASK_UNINTERRUPTIBLE);
1053     + if (work_is_canceling(work))
1054     + schedule();
1055     + finish_wait(&cancel_waitq, &cwait.wait);
1056     + }
1057     } while (unlikely(ret < 0));
1058    
1059     /* tell other tasks trying to grab @work to back off */
1060     @@ -2882,6 +2920,16 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
1061    
1062     flush_work(work);
1063     clear_work_data(work);
1064     +
1065     + /*
1066     + * Paired with prepare_to_wait() above so that either
1067     + * waitqueue_active() is visible here or !work_is_canceling() is
1068     + * visible there.
1069     + */
1070     + smp_mb();
1071     + if (waitqueue_active(&cancel_waitq))
1072     + __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
1073     +
1074     return ret;
1075     }
1076    
1077     diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
1078     index d6be3edb7a43..526bf56f4d31 100644
1079     --- a/net/caif/caif_socket.c
1080     +++ b/net/caif/caif_socket.c
1081     @@ -283,7 +283,7 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
1082     int copylen;
1083    
1084     ret = -EOPNOTSUPP;
1085     - if (m->msg_flags&MSG_OOB)
1086     + if (flags & MSG_OOB)
1087     goto read_error;
1088    
1089     skb = skb_recv_datagram(sk, flags, 0 , &ret);
1090     diff --git a/net/can/af_can.c b/net/can/af_can.c
1091     index f59859a3f562..d3668c55b088 100644
1092     --- a/net/can/af_can.c
1093     +++ b/net/can/af_can.c
1094     @@ -262,6 +262,9 @@ int can_send(struct sk_buff *skb, int loop)
1095     goto inval_skb;
1096     }
1097    
1098     + skb->ip_summed = CHECKSUM_UNNECESSARY;
1099     +
1100     + skb_reset_mac_header(skb);
1101     skb_reset_network_header(skb);
1102     skb_reset_transport_header(skb);
1103    
1104     diff --git a/net/compat.c b/net/compat.c
1105     index 275af79c131b..d12529050b29 100644
1106     --- a/net/compat.c
1107     +++ b/net/compat.c
1108     @@ -71,6 +71,13 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
1109     __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
1110     __get_user(kmsg->msg_flags, &umsg->msg_flags))
1111     return -EFAULT;
1112     +
1113     + if (!tmp1)
1114     + kmsg->msg_namelen = 0;
1115     +
1116     + if (kmsg->msg_namelen < 0)
1117     + return -EINVAL;
1118     +
1119     if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
1120     kmsg->msg_namelen = sizeof(struct sockaddr_storage);
1121     kmsg->msg_name = compat_ptr(tmp1);
1122     diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
1123     index 2ff093b7c45e..0a327b66a344 100644
1124     --- a/net/core/sysctl_net_core.c
1125     +++ b/net/core/sysctl_net_core.c
1126     @@ -23,6 +23,8 @@
1127     static int zero = 0;
1128     static int one = 1;
1129     static int ushort_max = USHRT_MAX;
1130     +static int min_sndbuf = SOCK_MIN_SNDBUF;
1131     +static int min_rcvbuf = SOCK_MIN_RCVBUF;
1132    
1133     #ifdef CONFIG_RPS
1134     static int rps_sock_flow_sysctl(ctl_table *table, int write,
1135     @@ -97,7 +99,7 @@ static struct ctl_table net_core_table[] = {
1136     .maxlen = sizeof(int),
1137     .mode = 0644,
1138     .proc_handler = proc_dointvec_minmax,
1139     - .extra1 = &one,
1140     + .extra1 = &min_sndbuf,
1141     },
1142     {
1143     .procname = "rmem_max",
1144     @@ -105,7 +107,7 @@ static struct ctl_table net_core_table[] = {
1145     .maxlen = sizeof(int),
1146     .mode = 0644,
1147     .proc_handler = proc_dointvec_minmax,
1148     - .extra1 = &one,
1149     + .extra1 = &min_rcvbuf,
1150     },
1151     {
1152     .procname = "wmem_default",
1153     @@ -113,7 +115,7 @@ static struct ctl_table net_core_table[] = {
1154     .maxlen = sizeof(int),
1155     .mode = 0644,
1156     .proc_handler = proc_dointvec_minmax,
1157     - .extra1 = &one,
1158     + .extra1 = &min_sndbuf,
1159     },
1160     {
1161     .procname = "rmem_default",
1162     @@ -121,7 +123,7 @@ static struct ctl_table net_core_table[] = {
1163     .maxlen = sizeof(int),
1164     .mode = 0644,
1165     .proc_handler = proc_dointvec_minmax,
1166     - .extra1 = &one,
1167     + .extra1 = &min_rcvbuf,
1168     },
1169     {
1170     .procname = "dev_weight",
1171     diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
1172     index 45dbdab915e2..14a1ed611b05 100644
1173     --- a/net/ipv4/inet_diag.c
1174     +++ b/net/ipv4/inet_diag.c
1175     @@ -71,6 +71,20 @@ static inline void inet_diag_unlock_handler(
1176     mutex_unlock(&inet_diag_table_mutex);
1177     }
1178    
1179     +static size_t inet_sk_attr_size(void)
1180     +{
1181     + return nla_total_size(sizeof(struct tcp_info))
1182     + + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
1183     + + nla_total_size(1) /* INET_DIAG_TOS */
1184     + + nla_total_size(1) /* INET_DIAG_TCLASS */
1185     + + nla_total_size(sizeof(struct inet_diag_meminfo))
1186     + + nla_total_size(sizeof(struct inet_diag_msg))
1187     + + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
1188     + + nla_total_size(TCP_CA_NAME_MAX)
1189     + + nla_total_size(sizeof(struct tcpvegas_info))
1190     + + 64;
1191     +}
1192     +
1193     int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
1194     struct sk_buff *skb, struct inet_diag_req_v2 *req,
1195     struct user_namespace *user_ns,
1196     @@ -326,9 +340,7 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
1197     if (err)
1198     goto out;
1199    
1200     - rep = nlmsg_new(sizeof(struct inet_diag_msg) +
1201     - sizeof(struct inet_diag_meminfo) +
1202     - sizeof(struct tcp_info) + 64, GFP_KERNEL);
1203     + rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL);
1204     if (!rep) {
1205     err = -ENOMEM;
1206     goto out;
1207     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1208     index 923146c4f007..913dc4f49b10 100644
1209     --- a/net/ipv4/tcp_output.c
1210     +++ b/net/ipv4/tcp_output.c
1211     @@ -2593,15 +2593,11 @@ void tcp_send_fin(struct sock *sk)
1212     } else {
1213     /* Socket is locked, keep trying until memory is available. */
1214     for (;;) {
1215     - skb = alloc_skb_fclone(MAX_TCP_HEADER,
1216     - sk->sk_allocation);
1217     + skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
1218     if (skb)
1219     break;
1220     yield();
1221     }
1222     -
1223     - /* Reserve space for headers and prepare control bits. */
1224     - skb_reserve(skb, MAX_TCP_HEADER);
1225     /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
1226     tcp_init_nondata_skb(skb, tp->write_seq,
1227     TCPHDR_ACK | TCPHDR_FIN);
1228     @@ -2875,9 +2871,9 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
1229     {
1230     struct tcp_sock *tp = tcp_sk(sk);
1231     struct tcp_fastopen_request *fo = tp->fastopen_req;
1232     - int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
1233     - struct sk_buff *syn_data = NULL, *data;
1234     + int syn_loss = 0, space, err = 0;
1235     unsigned long last_syn_loss = 0;
1236     + struct sk_buff *syn_data;
1237    
1238     tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */
1239     tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
1240     @@ -2908,42 +2904,38 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
1241     /* limit to order-0 allocations */
1242     space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
1243    
1244     - syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
1245     - sk->sk_allocation);
1246     - if (syn_data == NULL)
1247     + syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation);
1248     + if (!syn_data)
1249     goto fallback;
1250     + syn_data->ip_summed = CHECKSUM_PARTIAL;
1251     + memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
1252     + if (unlikely(memcpy_fromiovecend(skb_put(syn_data, space),
1253     + fo->data->msg_iov, 0, space))) {
1254     + kfree_skb(syn_data);
1255     + goto fallback;
1256     + }
1257    
1258     - for (i = 0; i < iovlen && syn_data->len < space; ++i) {
1259     - struct iovec *iov = &fo->data->msg_iov[i];
1260     - unsigned char __user *from = iov->iov_base;
1261     - int len = iov->iov_len;
1262     -
1263     - if (syn_data->len + len > space)
1264     - len = space - syn_data->len;
1265     - else if (i + 1 == iovlen)
1266     - /* No more data pending in inet_wait_for_connect() */
1267     - fo->data = NULL;
1268     + /* No more data pending in inet_wait_for_connect() */
1269     + if (space == fo->size)
1270     + fo->data = NULL;
1271     + fo->copied = space;
1272    
1273     - if (skb_add_data(syn_data, from, len))
1274     - goto fallback;
1275     - }
1276     + tcp_connect_queue_skb(sk, syn_data);
1277    
1278     - /* Queue a data-only packet after the regular SYN for retransmission */
1279     - data = pskb_copy(syn_data, sk->sk_allocation);
1280     - if (data == NULL)
1281     - goto fallback;
1282     - TCP_SKB_CB(data)->seq++;
1283     - TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN;
1284     - TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH);
1285     - tcp_connect_queue_skb(sk, data);
1286     - fo->copied = data->len;
1287     + err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
1288    
1289     - if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
1290     + /* Now full SYN+DATA was cloned and sent (or not),
1291     + * remove the SYN from the original skb (syn_data)
1292     + * we keep in write queue in case of a retransmit, as we
1293     + * also have the SYN packet (with no data) in the same queue.
1294     + */
1295     + TCP_SKB_CB(syn_data)->seq++;
1296     + TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
1297     + if (!err) {
1298     tp->syn_data = (fo->copied > 0);
1299     NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
1300     goto done;
1301     }
1302     - syn_data = NULL;
1303    
1304     fallback:
1305     /* Send a regular SYN with Fast Open cookie request option */
1306     @@ -2952,7 +2944,6 @@ fallback:
1307     err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
1308     if (err)
1309     tp->syn_fastopen = 0;
1310     - kfree_skb(syn_data);
1311     done:
1312     fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */
1313     return err;
1314     @@ -2972,13 +2963,10 @@ int tcp_connect(struct sock *sk)
1315     return 0;
1316     }
1317    
1318     - buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
1319     - if (unlikely(buff == NULL))
1320     + buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
1321     + if (unlikely(!buff))
1322     return -ENOBUFS;
1323    
1324     - /* Reserve space for headers. */
1325     - skb_reserve(buff, MAX_TCP_HEADER);
1326     -
1327     tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
1328     tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
1329     tcp_connect_queue_skb(sk, buff);
1330     diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
1331     index 26b9a986a87f..1c6a71c41e62 100644
1332     --- a/net/netfilter/ipvs/ip_vs_core.c
1333     +++ b/net/netfilter/ipvs/ip_vs_core.c
1334     @@ -650,16 +650,24 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
1335     return err;
1336     }
1337    
1338     -static int ip_vs_route_me_harder(int af, struct sk_buff *skb)
1339     +static int ip_vs_route_me_harder(int af, struct sk_buff *skb,
1340     + unsigned int hooknum)
1341     {
1342     + if (!sysctl_snat_reroute(skb))
1343     + return 0;
1344     + /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
1345     + if (NF_INET_LOCAL_IN == hooknum)
1346     + return 0;
1347     #ifdef CONFIG_IP_VS_IPV6
1348     if (af == AF_INET6) {
1349     - if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0)
1350     + struct dst_entry *dst = skb_dst(skb);
1351     +
1352     + if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
1353     + ip6_route_me_harder(skb) != 0)
1354     return 1;
1355     } else
1356     #endif
1357     - if ((sysctl_snat_reroute(skb) ||
1358     - skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
1359     + if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
1360     ip_route_me_harder(skb, RTN_LOCAL) != 0)
1361     return 1;
1362    
1363     @@ -782,7 +790,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
1364     union nf_inet_addr *snet,
1365     __u8 protocol, struct ip_vs_conn *cp,
1366     struct ip_vs_protocol *pp,
1367     - unsigned int offset, unsigned int ihl)
1368     + unsigned int offset, unsigned int ihl,
1369     + unsigned int hooknum)
1370     {
1371     unsigned int verdict = NF_DROP;
1372    
1373     @@ -812,7 +821,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
1374     #endif
1375     ip_vs_nat_icmp(skb, pp, cp, 1);
1376    
1377     - if (ip_vs_route_me_harder(af, skb))
1378     + if (ip_vs_route_me_harder(af, skb, hooknum))
1379     goto out;
1380    
1381     /* do the statistics and put it back */
1382     @@ -907,7 +916,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
1383    
1384     snet.ip = iph->saddr;
1385     return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
1386     - pp, ciph.len, ihl);
1387     + pp, ciph.len, ihl, hooknum);
1388     }
1389    
1390     #ifdef CONFIG_IP_VS_IPV6
1391     @@ -972,7 +981,8 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
1392     snet.in6 = ciph.saddr.in6;
1393     writable = ciph.len;
1394     return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
1395     - pp, writable, sizeof(struct ipv6hdr));
1396     + pp, writable, sizeof(struct ipv6hdr),
1397     + hooknum);
1398     }
1399     #endif
1400    
1401     @@ -1031,7 +1041,8 @@ static inline bool is_new_conn(const struct sk_buff *skb,
1402     */
1403     static unsigned int
1404     handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1405     - struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
1406     + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
1407     + unsigned int hooknum)
1408     {
1409     struct ip_vs_protocol *pp = pd->pp;
1410    
1411     @@ -1069,7 +1080,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
1412     * if it came from this machine itself. So re-compute
1413     * the routing information.
1414     */
1415     - if (ip_vs_route_me_harder(af, skb))
1416     + if (ip_vs_route_me_harder(af, skb, hooknum))
1417     goto drop;
1418    
1419     IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
1420     @@ -1172,7 +1183,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1421     cp = pp->conn_out_get(af, skb, &iph, 0);
1422    
1423     if (likely(cp))
1424     - return handle_response(af, skb, pd, cp, &iph);
1425     + return handle_response(af, skb, pd, cp, &iph, hooknum);
1426     if (sysctl_nat_icmp_send(net) &&
1427     (pp->protocol == IPPROTO_TCP ||
1428     pp->protocol == IPPROTO_UDP ||
1429     diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
1430     index f6046d9af8d3..e476cc7dc801 100644
1431     --- a/net/netfilter/ipvs/ip_vs_sync.c
1432     +++ b/net/netfilter/ipvs/ip_vs_sync.c
1433     @@ -878,6 +878,8 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
1434     IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
1435     return;
1436     }
1437     + if (!(flags & IP_VS_CONN_F_TEMPLATE))
1438     + kfree(param->pe_data);
1439     }
1440    
1441     if (opt)
1442     @@ -1151,6 +1153,7 @@ static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end)
1443     (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
1444     );
1445     #endif
1446     + ip_vs_pe_put(param.pe);
1447     return 0;
1448     /* Error exit */
1449     out:
1450     diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
1451     index a817705ce2d0..dba8d0864f18 100644
1452     --- a/net/rds/iw_rdma.c
1453     +++ b/net/rds/iw_rdma.c
1454     @@ -88,7 +88,9 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool,
1455     int *unpinned);
1456     static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
1457    
1458     -static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id)
1459     +static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst,
1460     + struct rds_iw_device **rds_iwdev,
1461     + struct rdma_cm_id **cm_id)
1462     {
1463     struct rds_iw_device *iwdev;
1464     struct rds_iw_cm_id *i_cm_id;
1465     @@ -112,15 +114,15 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
1466     src_addr->sin_port,
1467     dst_addr->sin_addr.s_addr,
1468     dst_addr->sin_port,
1469     - rs->rs_bound_addr,
1470     - rs->rs_bound_port,
1471     - rs->rs_conn_addr,
1472     - rs->rs_conn_port);
1473     + src->sin_addr.s_addr,
1474     + src->sin_port,
1475     + dst->sin_addr.s_addr,
1476     + dst->sin_port);
1477     #ifdef WORKING_TUPLE_DETECTION
1478     - if (src_addr->sin_addr.s_addr == rs->rs_bound_addr &&
1479     - src_addr->sin_port == rs->rs_bound_port &&
1480     - dst_addr->sin_addr.s_addr == rs->rs_conn_addr &&
1481     - dst_addr->sin_port == rs->rs_conn_port) {
1482     + if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr &&
1483     + src_addr->sin_port == src->sin_port &&
1484     + dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr &&
1485     + dst_addr->sin_port == dst->sin_port) {
1486     #else
1487     /* FIXME - needs to compare the local and remote
1488     * ipaddr/port tuple, but the ipaddr is the only
1489     @@ -128,7 +130,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
1490     * zero'ed. It doesn't appear to be properly populated
1491     * during connection setup...
1492     */
1493     - if (src_addr->sin_addr.s_addr == rs->rs_bound_addr) {
1494     + if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) {
1495     #endif
1496     spin_unlock_irq(&iwdev->spinlock);
1497     *rds_iwdev = iwdev;
1498     @@ -180,19 +182,13 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
1499     {
1500     struct sockaddr_in *src_addr, *dst_addr;
1501     struct rds_iw_device *rds_iwdev_old;
1502     - struct rds_sock rs;
1503     struct rdma_cm_id *pcm_id;
1504     int rc;
1505    
1506     src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
1507     dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
1508    
1509     - rs.rs_bound_addr = src_addr->sin_addr.s_addr;
1510     - rs.rs_bound_port = src_addr->sin_port;
1511     - rs.rs_conn_addr = dst_addr->sin_addr.s_addr;
1512     - rs.rs_conn_port = dst_addr->sin_port;
1513     -
1514     - rc = rds_iw_get_device(&rs, &rds_iwdev_old, &pcm_id);
1515     + rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id);
1516     if (rc)
1517     rds_iw_remove_cm_id(rds_iwdev, cm_id);
1518    
1519     @@ -598,9 +594,17 @@ void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
1520     struct rds_iw_device *rds_iwdev;
1521     struct rds_iw_mr *ibmr = NULL;
1522     struct rdma_cm_id *cm_id;
1523     + struct sockaddr_in src = {
1524     + .sin_addr.s_addr = rs->rs_bound_addr,
1525     + .sin_port = rs->rs_bound_port,
1526     + };
1527     + struct sockaddr_in dst = {
1528     + .sin_addr.s_addr = rs->rs_conn_addr,
1529     + .sin_port = rs->rs_conn_port,
1530     + };
1531     int ret;
1532    
1533     - ret = rds_iw_get_device(rs, &rds_iwdev, &cm_id);
1534     + ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id);
1535     if (ret || !cm_id) {
1536     ret = -ENODEV;
1537     goto out;
1538     diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
1539     index 898492a8d61b..5cc2da5d295d 100644
1540     --- a/net/rxrpc/ar-recvmsg.c
1541     +++ b/net/rxrpc/ar-recvmsg.c
1542     @@ -87,7 +87,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
1543     if (!skb) {
1544     /* nothing remains on the queue */
1545     if (copied &&
1546     - (msg->msg_flags & MSG_PEEK || timeo == 0))
1547     + (flags & MSG_PEEK || timeo == 0))
1548     goto out;
1549    
1550     /* wait for a message to turn up */
1551     diff --git a/sound/core/control.c b/sound/core/control.c
1552     index 98a29b26c5f4..f2082a35b890 100644
1553     --- a/sound/core/control.c
1554     +++ b/sound/core/control.c
1555     @@ -1168,6 +1168,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
1556    
1557     if (info->count < 1)
1558     return -EINVAL;
1559     + if (!*info->id.name)
1560     + return -EINVAL;
1561     + if (strnlen(info->id.name, sizeof(info->id.name)) >= sizeof(info->id.name))
1562     + return -EINVAL;
1563     access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
1564     (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
1565     SNDRV_CTL_ELEM_ACCESS_INACTIVE|
1566     diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
1567     index cb4d3700f330..db67e5b596d3 100644
1568     --- a/sound/pci/hda/hda_generic.c
1569     +++ b/sound/pci/hda/hda_generic.c
1570     @@ -642,12 +642,45 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
1571     return val;
1572     }
1573    
1574     +/* is this a stereo widget or a stereo-to-mono mix? */
1575     +static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid, int dir)
1576     +{
1577     + unsigned int wcaps = get_wcaps(codec, nid);
1578     + hda_nid_t conn;
1579     +
1580     + if (wcaps & AC_WCAP_STEREO)
1581     + return true;
1582     + if (dir != HDA_INPUT || get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
1583     + return false;
1584     + if (snd_hda_get_num_conns(codec, nid) != 1)
1585     + return false;
1586     + if (snd_hda_get_connections(codec, nid, &conn, 1) < 0)
1587     + return false;
1588     + return !!(get_wcaps(codec, conn) & AC_WCAP_STEREO);
1589     +}
1590     +
1591     /* initialize the amp value (only at the first time) */
1592     static void init_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx)
1593     {
1594     unsigned int caps = query_amp_caps(codec, nid, dir);
1595     int val = get_amp_val_to_activate(codec, nid, dir, caps, false);
1596     - snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
1597     +
1598     + if (is_stereo_amps(codec, nid, dir))
1599     + snd_hda_codec_amp_init_stereo(codec, nid, dir, idx, 0xff, val);
1600     + else
1601     + snd_hda_codec_amp_init(codec, nid, 0, dir, idx, 0xff, val);
1602     +}
1603     +
1604     +/* update the amp, doing in stereo or mono depending on NID */
1605     +static int update_amp(struct hda_codec *codec, hda_nid_t nid, int dir, int idx,
1606     + unsigned int mask, unsigned int val)
1607     +{
1608     + if (is_stereo_amps(codec, nid, dir))
1609     + return snd_hda_codec_amp_stereo(codec, nid, dir, idx,
1610     + mask, val);
1611     + else
1612     + return snd_hda_codec_amp_update(codec, nid, 0, dir, idx,
1613     + mask, val);
1614     }
1615    
1616     /* calculate amp value mask we can modify;
1617     @@ -687,7 +720,7 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir,
1618     return;
1619    
1620     val &= mask;
1621     - snd_hda_codec_amp_stereo(codec, nid, dir, idx, mask, val);
1622     + update_amp(codec, nid, dir, idx, mask, val);
1623     }
1624    
1625     static void activate_amp_out(struct hda_codec *codec, struct nid_path *path,
1626     @@ -4235,13 +4268,11 @@ static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix)
1627     has_amp = nid_has_mute(codec, mix, HDA_INPUT);
1628     for (i = 0; i < nums; i++) {
1629     if (has_amp)
1630     - snd_hda_codec_amp_stereo(codec, mix,
1631     - HDA_INPUT, i,
1632     - 0xff, HDA_AMP_MUTE);
1633     + update_amp(codec, mix, HDA_INPUT, i,
1634     + 0xff, HDA_AMP_MUTE);
1635     else if (nid_has_volume(codec, conn[i], HDA_OUTPUT))
1636     - snd_hda_codec_amp_stereo(codec, conn[i],
1637     - HDA_OUTPUT, 0,
1638     - 0xff, HDA_AMP_MUTE);
1639     + update_amp(codec, conn[i], HDA_OUTPUT, 0,
1640     + 0xff, HDA_AMP_MUTE);
1641     }
1642     }
1643    
1644     diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
1645     index 0fee8fae590a..eb94e495c754 100644
1646     --- a/sound/pci/hda/hda_proc.c
1647     +++ b/sound/pci/hda/hda_proc.c
1648     @@ -129,13 +129,38 @@ static void print_amp_caps(struct snd_info_buffer *buffer,
1649     (caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT);
1650     }
1651    
1652     +/* is this a stereo widget or a stereo-to-mono mix? */
1653     +static bool is_stereo_amps(struct hda_codec *codec, hda_nid_t nid,
1654     + int dir, unsigned int wcaps, int indices)
1655     +{
1656     + hda_nid_t conn;
1657     +
1658     + if (wcaps & AC_WCAP_STEREO)
1659     + return true;
1660     + /* check for a stereo-to-mono mix; it must be:
1661     + * only a single connection, only for input, and only a mixer widget
1662     + */
1663     + if (indices != 1 || dir != HDA_INPUT ||
1664     + get_wcaps_type(wcaps) != AC_WID_AUD_MIX)
1665     + return false;
1666     +
1667     + if (snd_hda_get_raw_connections(codec, nid, &conn, 1) < 0)
1668     + return false;
1669     + /* the connection source is a stereo? */
1670     + wcaps = snd_hda_param_read(codec, conn, AC_PAR_AUDIO_WIDGET_CAP);
1671     + return !!(wcaps & AC_WCAP_STEREO);
1672     +}
1673     +
1674     static void print_amp_vals(struct snd_info_buffer *buffer,
1675     struct hda_codec *codec, hda_nid_t nid,
1676     - int dir, int stereo, int indices)
1677     + int dir, unsigned int wcaps, int indices)
1678     {
1679     unsigned int val;
1680     + bool stereo;
1681     int i;
1682    
1683     + stereo = is_stereo_amps(codec, nid, dir, wcaps, indices);
1684     +
1685     dir = dir == HDA_OUTPUT ? AC_AMP_GET_OUTPUT : AC_AMP_GET_INPUT;
1686     for (i = 0; i < indices; i++) {
1687     snd_iprintf(buffer, " [");
1688     @@ -682,12 +707,10 @@ static void print_codec_info(struct snd_info_entry *entry,
1689     (codec->single_adc_amp &&
1690     wid_type == AC_WID_AUD_IN))
1691     print_amp_vals(buffer, codec, nid, HDA_INPUT,
1692     - wid_caps & AC_WCAP_STEREO,
1693     - 1);
1694     + wid_caps, 1);
1695     else
1696     print_amp_vals(buffer, codec, nid, HDA_INPUT,
1697     - wid_caps & AC_WCAP_STEREO,
1698     - conn_len);
1699     + wid_caps, conn_len);
1700     }
1701     if (wid_caps & AC_WCAP_OUT_AMP) {
1702     snd_iprintf(buffer, " Amp-Out caps: ");
1703     @@ -696,11 +719,10 @@ static void print_codec_info(struct snd_info_entry *entry,
1704     if (wid_type == AC_WID_PIN &&
1705     codec->pin_amp_workaround)
1706     print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
1707     - wid_caps & AC_WCAP_STEREO,
1708     - conn_len);
1709     + wid_caps, conn_len);
1710     else
1711     print_amp_vals(buffer, codec, nid, HDA_OUTPUT,
1712     - wid_caps & AC_WCAP_STEREO, 1);
1713     + wid_caps, 1);
1714     }
1715    
1716     switch (wid_type) {
1717     diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
1718     index cccaf9c7a7bb..e2642ba88b2d 100644
1719     --- a/sound/pci/hda/patch_cirrus.c
1720     +++ b/sound/pci/hda/patch_cirrus.c
1721     @@ -363,6 +363,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
1722     SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
1723     SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
1724     SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
1725     + SND_PCI_QUIRK(0x106b, 0x5600, "MacBookAir 5,2", CS420X_MBP81),
1726     SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42),
1727     SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
1728     {} /* terminator */
1729     @@ -531,6 +532,7 @@ static int patch_cs420x(struct hda_codec *codec)
1730     return -ENOMEM;
1731    
1732     spec->gen.automute_hook = cs_automute;
1733     + codec->single_adc_amp = 1;
1734    
1735     snd_hda_pick_fixup(codec, cs420x_models, cs420x_fixup_tbl,
1736     cs420x_fixups);
1737     diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
1738     index 1868d3a6e310..fab909908a42 100644
1739     --- a/sound/pci/hda/patch_conexant.c
1740     +++ b/sound/pci/hda/patch_conexant.c
1741     @@ -3223,6 +3223,7 @@ enum {
1742     CXT_PINCFG_LENOVO_TP410,
1743     CXT_PINCFG_LEMOTE_A1004,
1744     CXT_PINCFG_LEMOTE_A1205,
1745     + CXT_PINCFG_COMPAQ_CQ60,
1746     CXT_FIXUP_STEREO_DMIC,
1747     CXT_FIXUP_INC_MIC_BOOST,
1748     CXT_FIXUP_GPIO1,
1749     @@ -3296,6 +3297,15 @@ static const struct hda_fixup cxt_fixups[] = {
1750     .type = HDA_FIXUP_PINS,
1751     .v.pins = cxt_pincfg_lemote,
1752     },
1753     + [CXT_PINCFG_COMPAQ_CQ60] = {
1754     + .type = HDA_FIXUP_PINS,
1755     + .v.pins = (const struct hda_pintbl[]) {
1756     + /* 0x17 was falsely set up as a mic, it should 0x1d */
1757     + { 0x17, 0x400001f0 },
1758     + { 0x1d, 0x97a70120 },
1759     + { }
1760     + }
1761     + },
1762     [CXT_FIXUP_STEREO_DMIC] = {
1763     .type = HDA_FIXUP_FUNC,
1764     .v.func = cxt_fixup_stereo_dmic,
1765     @@ -3316,6 +3326,7 @@ static const struct hda_fixup cxt_fixups[] = {
1766     };
1767    
1768     static const struct snd_pci_quirk cxt5051_fixups[] = {
1769     + SND_PCI_QUIRK(0x103c, 0x360b, "Compaq CQ60", CXT_PINCFG_COMPAQ_CQ60),
1770     SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200),
1771     {}
1772     };