Magellan Linux

Contents of /trunk/kernel-magellan/patches-3.11/0107-3.11.8-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2318 - (show annotations) (download)
Mon Nov 18 11:55:33 2013 UTC (10 years, 5 months ago) by niro
File size: 125922 byte(s)
-linux-3.11.8
1 diff --git a/Makefile b/Makefile
2 index 686adf7f2035..7521adbea135 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 11
8 -SUBLEVEL = 7
9 +SUBLEVEL = 8
10 EXTRAVERSION =
11 NAME = Linux for Workgroups
12
13 diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
14 index 0fd1f0d515ff..da7764b5136d 100644
15 --- a/arch/arc/mm/fault.c
16 +++ b/arch/arc/mm/fault.c
17 @@ -17,7 +17,7 @@
18 #include <asm/pgalloc.h>
19 #include <asm/mmu.h>
20
21 -static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
22 +static int handle_vmalloc_fault(unsigned long address)
23 {
24 /*
25 * Synchronize this task's top level page-table
26 @@ -27,7 +27,7 @@ static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
27 pud_t *pud, *pud_k;
28 pmd_t *pmd, *pmd_k;
29
30 - pgd = pgd_offset_fast(mm, address);
31 + pgd = pgd_offset_fast(current->active_mm, address);
32 pgd_k = pgd_offset_k(address);
33
34 if (!pgd_present(*pgd_k))
35 @@ -73,7 +73,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address)
36 * nothing more.
37 */
38 if (address >= VMALLOC_START && address <= VMALLOC_END) {
39 - ret = handle_vmalloc_fault(mm, address);
40 + ret = handle_vmalloc_fault(address);
41 if (unlikely(ret))
42 goto bad_area_nosemaphore;
43 else
44 diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
45 index 37aabd772fbb..d2d58258aea6 100644
46 --- a/arch/parisc/kernel/head.S
47 +++ b/arch/parisc/kernel/head.S
48 @@ -195,6 +195,8 @@ common_stext:
49 ldw MEM_PDC_HI(%r0),%r6
50 depd %r6, 31, 32, %r3 /* move to upper word */
51
52 + mfctl %cr30,%r6 /* PCX-W2 firmware bug */
53 +
54 ldo PDC_PSW(%r0),%arg0 /* 21 */
55 ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
56 ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
57 @@ -203,6 +205,8 @@ common_stext:
58 copy %r0,%arg3
59
60 stext_pdc_ret:
61 + mtctl %r6,%cr30 /* restore task thread info */
62 +
63 /* restore rfi target address*/
64 ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
65 tophys_r1 %r10
66 diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
67 index 829df49dee99..41ebbfebb333 100644
68 --- a/arch/um/kernel/exitcode.c
69 +++ b/arch/um/kernel/exitcode.c
70 @@ -40,9 +40,11 @@ static ssize_t exitcode_proc_write(struct file *file,
71 const char __user *buffer, size_t count, loff_t *pos)
72 {
73 char *end, buf[sizeof("nnnnn\0")];
74 + size_t size;
75 int tmp;
76
77 - if (copy_from_user(buf, buffer, count))
78 + size = min(count, sizeof(buf));
79 + if (copy_from_user(buf, buffer, size))
80 return -EFAULT;
81
82 tmp = simple_strtol(buf, &end, 0);
83 diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
84 index 1191ac1c9d25..a419814cea57 100644
85 --- a/arch/x86/kernel/apic/x2apic_uv_x.c
86 +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
87 @@ -113,7 +113,7 @@ static int __init early_get_pnodeid(void)
88 break;
89 case UV3_HUB_PART_NUMBER:
90 case UV3_HUB_PART_NUMBER_X:
91 - uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1;
92 + uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
93 break;
94 }
95
96 diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
97 index 718eca1850bd..98b67d5f1514 100644
98 --- a/arch/xtensa/kernel/signal.c
99 +++ b/arch/xtensa/kernel/signal.c
100 @@ -341,7 +341,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
101
102 sp = regs->areg[1];
103
104 - if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) {
105 + if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
106 sp = current->sas_ss_sp + current->sas_ss_size;
107 }
108
109 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
110 index c69fcce505c0..370462fa8e01 100644
111 --- a/drivers/ata/libata-eh.c
112 +++ b/drivers/ata/libata-eh.c
113 @@ -1322,14 +1322,14 @@ void ata_eh_qc_complete(struct ata_queued_cmd *qc)
114 * should be retried. To be used from EH.
115 *
116 * SCSI midlayer limits the number of retries to scmd->allowed.
117 - * scmd->retries is decremented for commands which get retried
118 + * scmd->allowed is incremented for commands which get retried
119 * due to unrelated failures (qc->err_mask is zero).
120 */
121 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
122 {
123 struct scsi_cmnd *scmd = qc->scsicmd;
124 - if (!qc->err_mask && scmd->retries)
125 - scmd->retries--;
126 + if (!qc->err_mask)
127 + scmd->allowed++;
128 __ata_eh_qc_complete(qc);
129 }
130
131 diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
132 index 6d819a37f647..c9771cc85582 100644
133 --- a/drivers/clk/clk-nomadik.c
134 +++ b/drivers/clk/clk-nomadik.c
135 @@ -27,6 +27,14 @@
136 */
137
138 #define SRC_CR 0x00U
139 +#define SRC_CR_T0_ENSEL BIT(15)
140 +#define SRC_CR_T1_ENSEL BIT(17)
141 +#define SRC_CR_T2_ENSEL BIT(19)
142 +#define SRC_CR_T3_ENSEL BIT(21)
143 +#define SRC_CR_T4_ENSEL BIT(23)
144 +#define SRC_CR_T5_ENSEL BIT(25)
145 +#define SRC_CR_T6_ENSEL BIT(27)
146 +#define SRC_CR_T7_ENSEL BIT(29)
147 #define SRC_XTALCR 0x0CU
148 #define SRC_XTALCR_XTALTIMEN BIT(20)
149 #define SRC_XTALCR_SXTALDIS BIT(19)
150 @@ -543,6 +551,19 @@ void __init nomadik_clk_init(void)
151 __func__, np->name);
152 return;
153 }
154 +
155 + /* Set all timers to use the 2.4 MHz TIMCLK */
156 + val = readl(src_base + SRC_CR);
157 + val |= SRC_CR_T0_ENSEL;
158 + val |= SRC_CR_T1_ENSEL;
159 + val |= SRC_CR_T2_ENSEL;
160 + val |= SRC_CR_T3_ENSEL;
161 + val |= SRC_CR_T4_ENSEL;
162 + val |= SRC_CR_T5_ENSEL;
163 + val |= SRC_CR_T6_ENSEL;
164 + val |= SRC_CR_T7_ENSEL;
165 + writel(val, src_base + SRC_CR);
166 +
167 val = readl(src_base + SRC_XTALCR);
168 pr_info("SXTALO is %s\n",
169 (val & SRC_XTALCR_SXTALDIS) ? "disabled" : "enabled");
170 diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
171 index 67ccf4aa7277..f5e4c21b301f 100644
172 --- a/drivers/clk/versatile/clk-icst.c
173 +++ b/drivers/clk/versatile/clk-icst.c
174 @@ -107,7 +107,7 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
175
176 vco = icst_hz_to_vco(icst->params, rate);
177 icst->rate = icst_hz(icst->params, vco);
178 - vco_set(icst->vcoreg, icst->lockreg, vco);
179 + vco_set(icst->lockreg, icst->vcoreg, vco);
180 return 0;
181 }
182
183 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
184 index 7cde885011ed..e8c3db810359 100644
185 --- a/drivers/cpufreq/intel_pstate.c
186 +++ b/drivers/cpufreq/intel_pstate.c
187 @@ -629,8 +629,8 @@ static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
188
189 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
190 {
191 - int rc, min_pstate, max_pstate;
192 struct cpudata *cpu;
193 + int rc;
194
195 rc = intel_pstate_init_cpu(policy->cpu);
196 if (rc)
197 @@ -644,9 +644,8 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
198 else
199 policy->policy = CPUFREQ_POLICY_POWERSAVE;
200
201 - intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
202 - policy->min = min_pstate * 100000;
203 - policy->max = max_pstate * 100000;
204 + policy->min = cpu->pstate.min_pstate * 100000;
205 + policy->max = cpu->pstate.turbo_pstate * 100000;
206
207 /* cpuinfo and default policy values */
208 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
209 diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
210 index 13bb4bae64ee..4c5cae6c0758 100644
211 --- a/drivers/cpufreq/s3c64xx-cpufreq.c
212 +++ b/drivers/cpufreq/s3c64xx-cpufreq.c
213 @@ -166,7 +166,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
214 if (freq->frequency == CPUFREQ_ENTRY_INVALID)
215 continue;
216
217 - dvfs = &s3c64xx_dvfs_table[freq->index];
218 + dvfs = &s3c64xx_dvfs_table[freq->driver_data];
219 found = 0;
220
221 for (i = 0; i < count; i++) {
222 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
223 index 99fcd7c32ea2..6dd71735cab4 100644
224 --- a/drivers/gpu/drm/drm_drv.c
225 +++ b/drivers/gpu/drm/drm_drv.c
226 @@ -407,9 +407,16 @@ long drm_ioctl(struct file *filp,
227 cmd = ioctl->cmd_drv;
228 }
229 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
230 + u32 drv_size;
231 +
232 ioctl = &drm_ioctls[nr];
233 - cmd = ioctl->cmd;
234 +
235 + drv_size = _IOC_SIZE(ioctl->cmd);
236 usize = asize = _IOC_SIZE(cmd);
237 + if (drv_size > asize)
238 + asize = drv_size;
239 +
240 + cmd = ioctl->cmd;
241 } else
242 goto err_i1;
243
244 diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
245 index 3acec8c48166..6aa6ebd53f48 100644
246 --- a/drivers/gpu/drm/i915/intel_crt.c
247 +++ b/drivers/gpu/drm/i915/intel_crt.c
248 @@ -84,8 +84,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
249 return true;
250 }
251
252 -static void intel_crt_get_config(struct intel_encoder *encoder,
253 - struct intel_crtc_config *pipe_config)
254 +static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
255 {
256 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
257 struct intel_crt *crt = intel_encoder_to_crt(encoder);
258 @@ -103,7 +102,27 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
259 else
260 flags |= DRM_MODE_FLAG_NVSYNC;
261
262 - pipe_config->adjusted_mode.flags |= flags;
263 + return flags;
264 +}
265 +
266 +static void intel_crt_get_config(struct intel_encoder *encoder,
267 + struct intel_crtc_config *pipe_config)
268 +{
269 + struct drm_device *dev = encoder->base.dev;
270 +
271 + pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
272 +}
273 +
274 +static void hsw_crt_get_config(struct intel_encoder *encoder,
275 + struct intel_crtc_config *pipe_config)
276 +{
277 + intel_ddi_get_config(encoder, pipe_config);
278 +
279 + pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
280 + DRM_MODE_FLAG_NHSYNC |
281 + DRM_MODE_FLAG_PVSYNC |
282 + DRM_MODE_FLAG_NVSYNC);
283 + pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
284 }
285
286 /* Note: The caller is required to filter out dpms modes not supported by the
287 @@ -802,7 +821,10 @@ void intel_crt_init(struct drm_device *dev)
288 crt->base.compute_config = intel_crt_compute_config;
289 crt->base.disable = intel_disable_crt;
290 crt->base.enable = intel_enable_crt;
291 - crt->base.get_config = intel_crt_get_config;
292 + if (IS_HASWELL(dev))
293 + crt->base.get_config = hsw_crt_get_config;
294 + else
295 + crt->base.get_config = intel_crt_get_config;
296 if (I915_HAS_HOTPLUG(dev))
297 crt->base.hpd_pin = HPD_CRT;
298 if (HAS_DDI(dev))
299 diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
300 index b042ee5c4070..5a6368dc414c 100644
301 --- a/drivers/gpu/drm/i915/intel_ddi.c
302 +++ b/drivers/gpu/drm/i915/intel_ddi.c
303 @@ -1261,8 +1261,8 @@ static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
304 intel_dp_check_link_status(intel_dp);
305 }
306
307 -static void intel_ddi_get_config(struct intel_encoder *encoder,
308 - struct intel_crtc_config *pipe_config)
309 +void intel_ddi_get_config(struct intel_encoder *encoder,
310 + struct intel_crtc_config *pipe_config)
311 {
312 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
313 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
314 @@ -1280,6 +1280,23 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
315 flags |= DRM_MODE_FLAG_NVSYNC;
316
317 pipe_config->adjusted_mode.flags |= flags;
318 +
319 + switch (temp & TRANS_DDI_BPC_MASK) {
320 + case TRANS_DDI_BPC_6:
321 + pipe_config->pipe_bpp = 18;
322 + break;
323 + case TRANS_DDI_BPC_8:
324 + pipe_config->pipe_bpp = 24;
325 + break;
326 + case TRANS_DDI_BPC_10:
327 + pipe_config->pipe_bpp = 30;
328 + break;
329 + case TRANS_DDI_BPC_12:
330 + pipe_config->pipe_bpp = 36;
331 + break;
332 + default:
333 + break;
334 + }
335 }
336
337 static void intel_ddi_destroy(struct drm_encoder *encoder)
338 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
339 index 90a7c1773a9a..ad2a258476da 100644
340 --- a/drivers/gpu/drm/i915/intel_display.c
341 +++ b/drivers/gpu/drm/i915/intel_display.c
342 @@ -2251,9 +2251,10 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
343 FDI_FE_ERRC_ENABLE);
344 }
345
346 -static bool pipe_has_enabled_pch(struct intel_crtc *intel_crtc)
347 +static bool pipe_has_enabled_pch(struct intel_crtc *crtc)
348 {
349 - return intel_crtc->base.enabled && intel_crtc->config.has_pch_encoder;
350 + return crtc->base.enabled && crtc->active &&
351 + crtc->config.has_pch_encoder;
352 }
353
354 static void ivb_modeset_global_resources(struct drm_device *dev)
355 @@ -2901,6 +2902,48 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
356 I915_READ(VSYNCSHIFT(cpu_transcoder)));
357 }
358
359 +static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
360 +{
361 + struct drm_i915_private *dev_priv = dev->dev_private;
362 + uint32_t temp;
363 +
364 + temp = I915_READ(SOUTH_CHICKEN1);
365 + if (temp & FDI_BC_BIFURCATION_SELECT)
366 + return;
367 +
368 + WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
369 + WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
370 +
371 + temp |= FDI_BC_BIFURCATION_SELECT;
372 + DRM_DEBUG_KMS("enabling fdi C rx\n");
373 + I915_WRITE(SOUTH_CHICKEN1, temp);
374 + POSTING_READ(SOUTH_CHICKEN1);
375 +}
376 +
377 +static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
378 +{
379 + struct drm_device *dev = intel_crtc->base.dev;
380 + struct drm_i915_private *dev_priv = dev->dev_private;
381 +
382 + switch (intel_crtc->pipe) {
383 + case PIPE_A:
384 + break;
385 + case PIPE_B:
386 + if (intel_crtc->config.fdi_lanes > 2)
387 + WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
388 + else
389 + cpt_enable_fdi_bc_bifurcation(dev);
390 +
391 + break;
392 + case PIPE_C:
393 + cpt_enable_fdi_bc_bifurcation(dev);
394 +
395 + break;
396 + default:
397 + BUG();
398 + }
399 +}
400 +
401 /*
402 * Enable PCH resources required for PCH ports:
403 * - PCH PLLs
404 @@ -2919,6 +2962,9 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
405
406 assert_pch_transcoder_disabled(dev_priv, pipe);
407
408 + if (IS_IVYBRIDGE(dev))
409 + ivybridge_update_fdi_bc_bifurcation(intel_crtc);
410 +
411 /* Write the TU size bits before fdi link training, so that error
412 * detection works. */
413 I915_WRITE(FDI_RX_TUSIZE1(pipe),
414 @@ -4943,6 +4989,22 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
415 if (!(tmp & PIPECONF_ENABLE))
416 return false;
417
418 + if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
419 + switch (tmp & PIPECONF_BPC_MASK) {
420 + case PIPECONF_6BPC:
421 + pipe_config->pipe_bpp = 18;
422 + break;
423 + case PIPECONF_8BPC:
424 + pipe_config->pipe_bpp = 24;
425 + break;
426 + case PIPECONF_10BPC:
427 + pipe_config->pipe_bpp = 30;
428 + break;
429 + default:
430 + break;
431 + }
432 + }
433 +
434 intel_get_pipe_timings(crtc, pipe_config);
435
436 i9xx_get_pfit_config(crtc, pipe_config);
437 @@ -5496,48 +5558,6 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
438 return true;
439 }
440
441 -static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
442 -{
443 - struct drm_i915_private *dev_priv = dev->dev_private;
444 - uint32_t temp;
445 -
446 - temp = I915_READ(SOUTH_CHICKEN1);
447 - if (temp & FDI_BC_BIFURCATION_SELECT)
448 - return;
449 -
450 - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
451 - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
452 -
453 - temp |= FDI_BC_BIFURCATION_SELECT;
454 - DRM_DEBUG_KMS("enabling fdi C rx\n");
455 - I915_WRITE(SOUTH_CHICKEN1, temp);
456 - POSTING_READ(SOUTH_CHICKEN1);
457 -}
458 -
459 -static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
460 -{
461 - struct drm_device *dev = intel_crtc->base.dev;
462 - struct drm_i915_private *dev_priv = dev->dev_private;
463 -
464 - switch (intel_crtc->pipe) {
465 - case PIPE_A:
466 - break;
467 - case PIPE_B:
468 - if (intel_crtc->config.fdi_lanes > 2)
469 - WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
470 - else
471 - cpt_enable_fdi_bc_bifurcation(dev);
472 -
473 - break;
474 - case PIPE_C:
475 - cpt_enable_fdi_bc_bifurcation(dev);
476 -
477 - break;
478 - default:
479 - BUG();
480 - }
481 -}
482 -
483 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
484 {
485 /*
486 @@ -5752,9 +5772,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
487 &intel_crtc->config.fdi_m_n);
488 }
489
490 - if (IS_IVYBRIDGE(dev))
491 - ivybridge_update_fdi_bc_bifurcation(intel_crtc);
492 -
493 ironlake_set_pipeconf(crtc);
494
495 /* Set up the display plane register */
496 @@ -5821,6 +5838,23 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
497 if (!(tmp & PIPECONF_ENABLE))
498 return false;
499
500 + switch (tmp & PIPECONF_BPC_MASK) {
501 + case PIPECONF_6BPC:
502 + pipe_config->pipe_bpp = 18;
503 + break;
504 + case PIPECONF_8BPC:
505 + pipe_config->pipe_bpp = 24;
506 + break;
507 + case PIPECONF_10BPC:
508 + pipe_config->pipe_bpp = 30;
509 + break;
510 + case PIPECONF_12BPC:
511 + pipe_config->pipe_bpp = 36;
512 + break;
513 + default:
514 + break;
515 + }
516 +
517 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
518 struct intel_shared_dpll *pll;
519
520 @@ -8147,6 +8181,9 @@ intel_pipe_config_compare(struct drm_device *dev,
521 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
522 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
523
524 + if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
525 + PIPE_CONF_CHECK_I(pipe_bpp);
526 +
527 #undef PIPE_CONF_CHECK_X
528 #undef PIPE_CONF_CHECK_I
529 #undef PIPE_CONF_CHECK_FLAGS
530 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
531 index 3aed1fe0aa51..3a0f3a2d1666 100644
532 --- a/drivers/gpu/drm/i915/intel_dp.c
533 +++ b/drivers/gpu/drm/i915/intel_dp.c
534 @@ -276,29 +276,13 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
535 return status;
536 }
537
538 -static int
539 -intel_dp_aux_ch(struct intel_dp *intel_dp,
540 - uint8_t *send, int send_bytes,
541 - uint8_t *recv, int recv_size)
542 +static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
543 + int index)
544 {
545 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
546 struct drm_device *dev = intel_dig_port->base.base.dev;
547 struct drm_i915_private *dev_priv = dev->dev_private;
548 - uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
549 - uint32_t ch_data = ch_ctl + 4;
550 - int i, ret, recv_bytes;
551 - uint32_t status;
552 - uint32_t aux_clock_divider;
553 - int try, precharge;
554 - bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
555
556 - /* dp aux is extremely sensitive to irq latency, hence request the
557 - * lowest possible wakeup latency and so prevent the cpu from going into
558 - * deep sleep states.
559 - */
560 - pm_qos_update_request(&dev_priv->pm_qos, 0);
561 -
562 - intel_dp_check_edp(intel_dp);
563 /* The clock divider is based off the hrawclk,
564 * and would like to run at 2MHz. So, take the
565 * hrawclk value and divide by 2 and use that
566 @@ -307,23 +291,53 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
567 * clock divider.
568 */
569 if (IS_VALLEYVIEW(dev)) {
570 - aux_clock_divider = 100;
571 + return index ? 0 : 100;
572 } else if (intel_dig_port->port == PORT_A) {
573 + if (index)
574 + return 0;
575 if (HAS_DDI(dev))
576 - aux_clock_divider = DIV_ROUND_CLOSEST(
577 - intel_ddi_get_cdclk_freq(dev_priv), 2000);
578 + return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
579 else if (IS_GEN6(dev) || IS_GEN7(dev))
580 - aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
581 + return 200; /* SNB & IVB eDP input clock at 400Mhz */
582 else
583 - aux_clock_divider = 225; /* eDP input clock at 450Mhz */
584 + return 225; /* eDP input clock at 450Mhz */
585 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
586 /* Workaround for non-ULT HSW */
587 - aux_clock_divider = 74;
588 + switch (index) {
589 + case 0: return 63;
590 + case 1: return 72;
591 + default: return 0;
592 + }
593 } else if (HAS_PCH_SPLIT(dev)) {
594 - aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
595 + return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
596 } else {
597 - aux_clock_divider = intel_hrawclk(dev) / 2;
598 + return index ? 0 :intel_hrawclk(dev) / 2;
599 }
600 +}
601 +
602 +static int
603 +intel_dp_aux_ch(struct intel_dp *intel_dp,
604 + uint8_t *send, int send_bytes,
605 + uint8_t *recv, int recv_size)
606 +{
607 + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
608 + struct drm_device *dev = intel_dig_port->base.base.dev;
609 + struct drm_i915_private *dev_priv = dev->dev_private;
610 + uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
611 + uint32_t ch_data = ch_ctl + 4;
612 + uint32_t aux_clock_divider;
613 + int i, ret, recv_bytes;
614 + uint32_t status;
615 + int try, precharge, clock = 0;
616 + bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
617 +
618 + /* dp aux is extremely sensitive to irq latency, hence request the
619 + * lowest possible wakeup latency and so prevent the cpu from going into
620 + * deep sleep states.
621 + */
622 + pm_qos_update_request(&dev_priv->pm_qos, 0);
623 +
624 + intel_dp_check_edp(intel_dp);
625
626 if (IS_GEN6(dev))
627 precharge = 3;
628 @@ -345,37 +359,41 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
629 goto out;
630 }
631
632 - /* Must try at least 3 times according to DP spec */
633 - for (try = 0; try < 5; try++) {
634 - /* Load the send data into the aux channel data registers */
635 - for (i = 0; i < send_bytes; i += 4)
636 - I915_WRITE(ch_data + i,
637 - pack_aux(send + i, send_bytes - i));
638 -
639 - /* Send the command and wait for it to complete */
640 - I915_WRITE(ch_ctl,
641 - DP_AUX_CH_CTL_SEND_BUSY |
642 - (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
643 - DP_AUX_CH_CTL_TIME_OUT_400us |
644 - (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
645 - (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
646 - (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
647 - DP_AUX_CH_CTL_DONE |
648 - DP_AUX_CH_CTL_TIME_OUT_ERROR |
649 - DP_AUX_CH_CTL_RECEIVE_ERROR);
650 -
651 - status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
652 -
653 - /* Clear done status and any errors */
654 - I915_WRITE(ch_ctl,
655 - status |
656 - DP_AUX_CH_CTL_DONE |
657 - DP_AUX_CH_CTL_TIME_OUT_ERROR |
658 - DP_AUX_CH_CTL_RECEIVE_ERROR);
659 -
660 - if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
661 - DP_AUX_CH_CTL_RECEIVE_ERROR))
662 - continue;
663 + while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
664 + /* Must try at least 3 times according to DP spec */
665 + for (try = 0; try < 5; try++) {
666 + /* Load the send data into the aux channel data registers */
667 + for (i = 0; i < send_bytes; i += 4)
668 + I915_WRITE(ch_data + i,
669 + pack_aux(send + i, send_bytes - i));
670 +
671 + /* Send the command and wait for it to complete */
672 + I915_WRITE(ch_ctl,
673 + DP_AUX_CH_CTL_SEND_BUSY |
674 + (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
675 + DP_AUX_CH_CTL_TIME_OUT_400us |
676 + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
677 + (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
678 + (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
679 + DP_AUX_CH_CTL_DONE |
680 + DP_AUX_CH_CTL_TIME_OUT_ERROR |
681 + DP_AUX_CH_CTL_RECEIVE_ERROR);
682 +
683 + status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
684 +
685 + /* Clear done status and any errors */
686 + I915_WRITE(ch_ctl,
687 + status |
688 + DP_AUX_CH_CTL_DONE |
689 + DP_AUX_CH_CTL_TIME_OUT_ERROR |
690 + DP_AUX_CH_CTL_RECEIVE_ERROR);
691 +
692 + if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
693 + DP_AUX_CH_CTL_RECEIVE_ERROR))
694 + continue;
695 + if (status & DP_AUX_CH_CTL_DONE)
696 + break;
697 + }
698 if (status & DP_AUX_CH_CTL_DONE)
699 break;
700 }
701 diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
702 index b7d6e09456ce..ddf7e2f6dce4 100644
703 --- a/drivers/gpu/drm/i915/intel_drv.h
704 +++ b/drivers/gpu/drm/i915/intel_drv.h
705 @@ -816,6 +816,8 @@ extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
706 extern bool
707 intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
708 extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
709 +extern void intel_ddi_get_config(struct intel_encoder *encoder,
710 + struct intel_crtc_config *pipe_config);
711
712 extern void intel_display_handle_reset(struct drm_device *dev);
713 extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
714 diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
715 index 61348eae2f04..44533dde25c1 100644
716 --- a/drivers/gpu/drm/i915/intel_lvds.c
717 +++ b/drivers/gpu/drm/i915/intel_lvds.c
718 @@ -696,6 +696,22 @@ static const struct dmi_system_id intel_no_lvds[] = {
719 },
720 {
721 .callback = intel_no_lvds_dmi_callback,
722 + .ident = "Intel D410PT",
723 + .matches = {
724 + DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
725 + DMI_MATCH(DMI_BOARD_NAME, "D410PT"),
726 + },
727 + },
728 + {
729 + .callback = intel_no_lvds_dmi_callback,
730 + .ident = "Intel D425KT",
731 + .matches = {
732 + DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
733 + DMI_EXACT_MATCH(DMI_BOARD_NAME, "D425KT"),
734 + },
735 + },
736 + {
737 + .callback = intel_no_lvds_dmi_callback,
738 .ident = "Intel D510MO",
739 .matches = {
740 DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
741 diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
742 index 7c2a28531cab..56ed69ee6b91 100644
743 --- a/drivers/gpu/drm/radeon/atombios_encoders.c
744 +++ b/drivers/gpu/drm/radeon/atombios_encoders.c
745 @@ -1657,7 +1657,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
746 * does the same thing and more.
747 */
748 if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
749 - (rdev->family != CHIP_RS880))
750 + (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
751 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
752 }
753 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
754 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
755 index fc55256abda0..263d14bc73d7 100644
756 --- a/drivers/gpu/drm/radeon/ni.c
757 +++ b/drivers/gpu/drm/radeon/ni.c
758 @@ -800,6 +800,7 @@ int ni_init_microcode(struct radeon_device *rdev)
759 fw_name);
760 release_firmware(rdev->smc_fw);
761 rdev->smc_fw = NULL;
762 + err = 0;
763 } else if (rdev->smc_fw->size != smc_req_size) {
764 printk(KERN_ERR
765 "ni_mc: Bogus length %zu in firmware \"%s\"\n",
766 diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
767 index 739ffbe265cc..c2d7eb64eb14 100644
768 --- a/drivers/gpu/drm/radeon/r600.c
769 +++ b/drivers/gpu/drm/radeon/r600.c
770 @@ -2310,6 +2310,7 @@ int r600_init_microcode(struct radeon_device *rdev)
771 fw_name);
772 release_firmware(rdev->smc_fw);
773 rdev->smc_fw = NULL;
774 + err = 0;
775 } else if (rdev->smc_fw->size != smc_req_size) {
776 printk(KERN_ERR
777 "smc: Bogus length %zu in firmware \"%s\"\n",
778 diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
779 index 7af2113378a8..bf1fcb60b4dc 100644
780 --- a/drivers/gpu/drm/radeon/si.c
781 +++ b/drivers/gpu/drm/radeon/si.c
782 @@ -1669,6 +1669,7 @@ static int si_init_microcode(struct radeon_device *rdev)
783 fw_name);
784 release_firmware(rdev->smc_fw);
785 rdev->smc_fw = NULL;
786 + err = 0;
787 } else if (rdev->smc_fw->size != smc_req_size) {
788 printk(KERN_ERR
789 "si_smc: Bogus length %zu in firmware \"%s\"\n",
790 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
791 index 78e21649d48a..01642a3ed837 100644
792 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
793 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
794 @@ -738,9 +738,17 @@ static void vmw_postclose(struct drm_device *dev,
795 struct vmw_fpriv *vmw_fp;
796
797 vmw_fp = vmw_fpriv(file_priv);
798 - ttm_object_file_release(&vmw_fp->tfile);
799 - if (vmw_fp->locked_master)
800 +
801 + if (vmw_fp->locked_master) {
802 + struct vmw_master *vmaster =
803 + vmw_master(vmw_fp->locked_master);
804 +
805 + ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
806 + ttm_vt_unlock(&vmaster->lock);
807 drm_master_put(&vmw_fp->locked_master);
808 + }
809 +
810 + ttm_object_file_release(&vmw_fp->tfile);
811 kfree(vmw_fp);
812 }
813
814 @@ -940,14 +948,13 @@ static void vmw_master_drop(struct drm_device *dev,
815
816 vmw_fp->locked_master = drm_master_get(file_priv->master);
817 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
818 - vmw_execbuf_release_pinned_bo(dev_priv);
819 -
820 if (unlikely((ret != 0))) {
821 DRM_ERROR("Unable to lock TTM at VT switch.\n");
822 drm_master_put(&vmw_fp->locked_master);
823 }
824
825 - ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
826 + ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
827 + vmw_execbuf_release_pinned_bo(dev_priv);
828
829 if (!dev_priv->enable_fb) {
830 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
831 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
832 index 7953d1f90b63..ad2b05678503 100644
833 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
834 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
835 @@ -970,7 +970,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
836 if (new_backup)
837 res->backup_offset = new_backup_offset;
838
839 - if (!res->func->may_evict)
840 + if (!res->func->may_evict || res->id == -1)
841 return;
842
843 write_lock(&dev_priv->resource_lock);
844 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
845 index 9f60d631f733..15323dab2c85 100644
846 --- a/drivers/hid/hid-core.c
847 +++ b/drivers/hid/hid-core.c
848 @@ -1827,6 +1827,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
849
850 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
851 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
852 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
853 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
854 { }
855 };
856 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
857 index 339623c1f7d3..e2808f5cc313 100644
858 --- a/drivers/hid/hid-ids.h
859 +++ b/drivers/hid/hid-ids.h
860 @@ -633,6 +633,7 @@
861 #define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
862
863 #define USB_VENDOR_ID_NINTENDO 0x057e
864 +#define USB_VENDOR_ID_NINTENDO2 0x054c
865 #define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
866 #define USB_DEVICE_ID_NINTENDO_WIIMOTE2 0x0330
867
868 diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
869 index 660209824e56..2dd88abdf24c 100644
870 --- a/drivers/hid/hid-wiimote-core.c
871 +++ b/drivers/hid/hid-wiimote-core.c
872 @@ -838,7 +838,8 @@ static void wiimote_init_set_type(struct wiimote_data *wdata,
873 goto done;
874 }
875
876 - if (vendor == USB_VENDOR_ID_NINTENDO) {
877 + if (vendor == USB_VENDOR_ID_NINTENDO ||
878 + vendor == USB_VENDOR_ID_NINTENDO2) {
879 if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) {
880 devtype = WIIMOTE_DEV_GEN10;
881 goto done;
882 @@ -1860,6 +1861,8 @@ static void wiimote_hid_remove(struct hid_device *hdev)
883 static const struct hid_device_id wiimote_hid_devices[] = {
884 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
885 USB_DEVICE_ID_NINTENDO_WIIMOTE) },
886 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2,
887 + USB_DEVICE_ID_NINTENDO_WIIMOTE) },
888 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
889 USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
890 { }
891 diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
892 index b6a74bcbb08f..2a7f0dd6abab 100644
893 --- a/drivers/md/bcache/request.c
894 +++ b/drivers/md/bcache/request.c
895 @@ -1000,7 +1000,7 @@ static void request_write(struct cached_dev *dc, struct search *s)
896
897 if (bio->bi_rw & REQ_FLUSH) {
898 /* Also need to send a flush to the backing device */
899 - struct bio *flush = bio_alloc_bioset(0, GFP_NOIO,
900 + struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
901 dc->disk.bio_split);
902
903 flush->bi_rw = WRITE_FLUSH;
904 diff --git a/drivers/md/md.c b/drivers/md/md.c
905 index 9f13e13506ef..866f48975ea6 100644
906 --- a/drivers/md/md.c
907 +++ b/drivers/md/md.c
908 @@ -8093,6 +8093,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
909 u64 *p;
910 int lo, hi;
911 int rv = 1;
912 + unsigned long flags;
913
914 if (bb->shift < 0)
915 /* badblocks are disabled */
916 @@ -8107,7 +8108,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
917 sectors = next - s;
918 }
919
920 - write_seqlock_irq(&bb->lock);
921 + write_seqlock_irqsave(&bb->lock, flags);
922
923 p = bb->page;
924 lo = 0;
925 @@ -8223,7 +8224,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
926 bb->changed = 1;
927 if (!acknowledged)
928 bb->unacked_exist = 1;
929 - write_sequnlock_irq(&bb->lock);
930 + write_sequnlock_irqrestore(&bb->lock, flags);
931
932 return rv;
933 }
934 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
935 index d60412c7f995..aacf6bf352d8 100644
936 --- a/drivers/md/raid1.c
937 +++ b/drivers/md/raid1.c
938 @@ -1479,6 +1479,7 @@ static int raid1_spare_active(struct mddev *mddev)
939 }
940 }
941 if (rdev
942 + && rdev->recovery_offset == MaxSector
943 && !test_bit(Faulty, &rdev->flags)
944 && !test_and_set_bit(In_sync, &rdev->flags)) {
945 count++;
946 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
947 index df7b0a06b0ea..73dc8a377522 100644
948 --- a/drivers/md/raid10.c
949 +++ b/drivers/md/raid10.c
950 @@ -1782,6 +1782,7 @@ static int raid10_spare_active(struct mddev *mddev)
951 }
952 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
953 } else if (tmp->rdev
954 + && tmp->rdev->recovery_offset == MaxSector
955 && !test_bit(Faulty, &tmp->rdev->flags)
956 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
957 count++;
958 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
959 index 78ea44336e75..d825059d00ce 100644
960 --- a/drivers/md/raid5.c
961 +++ b/drivers/md/raid5.c
962 @@ -668,6 +668,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
963 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
964 bi->bi_io_vec[0].bv_offset = 0;
965 bi->bi_size = STRIPE_SIZE;
966 + /*
967 + * If this is discard request, set bi_vcnt 0. We don't
968 + * want to confuse SCSI because SCSI will replace payload
969 + */
970 + if (rw & REQ_DISCARD)
971 + bi->bi_vcnt = 0;
972 if (rrdev)
973 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
974
975 @@ -706,6 +712,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
976 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
977 rbi->bi_io_vec[0].bv_offset = 0;
978 rbi->bi_size = STRIPE_SIZE;
979 + /*
980 + * If this is discard request, set bi_vcnt 0. We don't
981 + * want to confuse SCSI because SCSI will replace payload
982 + */
983 + if (rw & REQ_DISCARD)
984 + rbi->bi_vcnt = 0;
985 if (conf->mddev->gendisk)
986 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
987 rbi, disk_devt(conf->mddev->gendisk),
988 @@ -2800,6 +2812,14 @@ static void handle_stripe_clean_event(struct r5conf *conf,
989 }
990 /* now that discard is done we can proceed with any sync */
991 clear_bit(STRIPE_DISCARD, &sh->state);
992 + /*
993 + * SCSI discard will change some bio fields and the stripe has
994 + * no updated data, so remove it from hash list and the stripe
995 + * will be reinitialized
996 + */
997 + spin_lock_irq(&conf->device_lock);
998 + remove_hash(sh);
999 + spin_unlock_irq(&conf->device_lock);
1000 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
1001 set_bit(STRIPE_HANDLE, &sh->state);
1002
1003 diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
1004 index dbbe97ae121e..c1e654618891 100644
1005 --- a/drivers/net/can/at91_can.c
1006 +++ b/drivers/net/can/at91_can.c
1007 @@ -1405,10 +1405,10 @@ static int at91_can_remove(struct platform_device *pdev)
1008
1009 static const struct platform_device_id at91_can_id_table[] = {
1010 {
1011 - .name = "at91_can",
1012 + .name = "at91sam9x5_can",
1013 .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data,
1014 }, {
1015 - .name = "at91sam9x5_can",
1016 + .name = "at91_can",
1017 .driver_data = (kernel_ulong_t)&at91_at91sam9263_data,
1018 }, {
1019 /* sentinel */
1020 diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
1021 index 7b0be0910f4b..d1968c83c561 100644
1022 --- a/drivers/net/can/flexcan.c
1023 +++ b/drivers/net/can/flexcan.c
1024 @@ -62,7 +62,7 @@
1025 #define FLEXCAN_MCR_BCC BIT(16)
1026 #define FLEXCAN_MCR_LPRIO_EN BIT(13)
1027 #define FLEXCAN_MCR_AEN BIT(12)
1028 -#define FLEXCAN_MCR_MAXMB(x) ((x) & 0xf)
1029 +#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x1f)
1030 #define FLEXCAN_MCR_IDAM_A (0 << 8)
1031 #define FLEXCAN_MCR_IDAM_B (1 << 8)
1032 #define FLEXCAN_MCR_IDAM_C (2 << 8)
1033 @@ -736,9 +736,11 @@ static int flexcan_chip_start(struct net_device *dev)
1034 *
1035 */
1036 reg_mcr = flexcan_read(&regs->mcr);
1037 + reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
1038 reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
1039 FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
1040 - FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS;
1041 + FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS |
1042 + FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
1043 netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
1044 flexcan_write(reg_mcr, &regs->mcr);
1045
1046 @@ -783,6 +785,10 @@ static int flexcan_chip_start(struct net_device *dev)
1047 &regs->cantxfg[i].can_ctrl);
1048 }
1049
1050 + /* Abort any pending TX, mark Mailbox as INACTIVE */
1051 + flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
1052 + &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
1053 +
1054 /* acceptance mask/acceptance code (accept everything) */
1055 flexcan_write(0x0, &regs->rxgmask);
1056 flexcan_write(0x0, &regs->rx14mask);
1057 @@ -979,9 +985,9 @@ static void unregister_flexcandev(struct net_device *dev)
1058 }
1059
1060 static const struct of_device_id flexcan_of_match[] = {
1061 - { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
1062 - { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
1063 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
1064 + { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
1065 + { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
1066 { /* sentinel */ },
1067 };
1068 MODULE_DEVICE_TABLE(of, flexcan_of_match);
1069 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
1070 index cb5a65553ac7..5697c7acd5c0 100644
1071 --- a/drivers/net/wireless/ath/ath9k/main.c
1072 +++ b/drivers/net/wireless/ath/ath9k/main.c
1073 @@ -208,6 +208,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
1074 struct ath_hw *ah = sc->sc_ah;
1075 struct ath_common *common = ath9k_hw_common(ah);
1076 unsigned long flags;
1077 + int i;
1078
1079 if (ath_startrecv(sc) != 0) {
1080 ath_err(common, "Unable to restart recv logic\n");
1081 @@ -235,6 +236,15 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
1082 }
1083 work:
1084 ath_restart_work(sc);
1085 +
1086 + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1087 + if (!ATH_TXQ_SETUP(sc, i))
1088 + continue;
1089 +
1090 + spin_lock_bh(&sc->tx.txq[i].axq_lock);
1091 + ath_txq_schedule(sc, &sc->tx.txq[i]);
1092 + spin_unlock_bh(&sc->tx.txq[i].axq_lock);
1093 + }
1094 }
1095
1096 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3)
1097 @@ -542,21 +552,10 @@ chip_reset:
1098
1099 static int ath_reset(struct ath_softc *sc)
1100 {
1101 - int i, r;
1102 + int r;
1103
1104 ath9k_ps_wakeup(sc);
1105 -
1106 r = ath_reset_internal(sc, NULL);
1107 -
1108 - for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1109 - if (!ATH_TXQ_SETUP(sc, i))
1110 - continue;
1111 -
1112 - spin_lock_bh(&sc->tx.txq[i].axq_lock);
1113 - ath_txq_schedule(sc, &sc->tx.txq[i]);
1114 - spin_unlock_bh(&sc->tx.txq[i].axq_lock);
1115 - }
1116 -
1117 ath9k_ps_restore(sc);
1118
1119 return r;
1120 diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
1121 index 30d45e2fc193..8ac305be68f4 100644
1122 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c
1123 +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
1124 @@ -240,6 +240,12 @@ const struct iwl_cfg iwl6035_2agn_cfg = {
1125 .ht_params = &iwl6000_ht_params,
1126 };
1127
1128 +const struct iwl_cfg iwl6035_2agn_sff_cfg = {
1129 + .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
1130 + IWL_DEVICE_6035,
1131 + .ht_params = &iwl6000_ht_params,
1132 +};
1133 +
1134 const struct iwl_cfg iwl1030_bgn_cfg = {
1135 .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
1136 IWL_DEVICE_6030,
1137 diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
1138 index 83b9ff6ff3ad..d97884618835 100644
1139 --- a/drivers/net/wireless/iwlwifi/iwl-config.h
1140 +++ b/drivers/net/wireless/iwlwifi/iwl-config.h
1141 @@ -277,6 +277,7 @@ extern const struct iwl_cfg iwl2000_2bgn_cfg;
1142 extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
1143 extern const struct iwl_cfg iwl2030_2bgn_cfg;
1144 extern const struct iwl_cfg iwl6035_2agn_cfg;
1145 +extern const struct iwl_cfg iwl6035_2agn_sff_cfg;
1146 extern const struct iwl_cfg iwl105_bgn_cfg;
1147 extern const struct iwl_cfg iwl105_bgn_d_cfg;
1148 extern const struct iwl_cfg iwl135_bgn_cfg;
1149 diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
1150 index acdff6b67e04..c55d88f3cfd2 100644
1151 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c
1152 +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
1153 @@ -392,6 +392,11 @@ static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
1154 return false;
1155 }
1156
1157 + /*
1158 + * If scan cannot be aborted, it means that we had a
1159 + * SCAN_COMPLETE_NOTIFICATION in the pipe and it called
1160 + * ieee80211_scan_completed already.
1161 + */
1162 IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n",
1163 *resp);
1164 return true;
1165 @@ -415,14 +420,19 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
1166 SCAN_COMPLETE_NOTIFICATION };
1167 int ret;
1168
1169 + if (mvm->scan_status == IWL_MVM_SCAN_NONE)
1170 + return;
1171 +
1172 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
1173 scan_abort_notif,
1174 ARRAY_SIZE(scan_abort_notif),
1175 iwl_mvm_scan_abort_notif, NULL);
1176
1177 - ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
1178 + ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD,
1179 + CMD_SYNC | CMD_SEND_IN_RFKILL, 0, NULL);
1180 if (ret) {
1181 IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
1182 + /* mac80211's state will be cleaned in the fw_restart flow */
1183 goto out_remove_notif;
1184 }
1185
1186 diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
1187 index ff13458efc27..058c6aa58b7a 100644
1188 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c
1189 +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
1190 @@ -139,13 +139,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
1191
1192 /* 6x00 Series */
1193 {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
1194 + {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
1195 {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
1196 + {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
1197 {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
1198 {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
1199 {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
1200 {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
1201 {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
1202 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
1203 + {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
1204 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
1205 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
1206
1207 @@ -153,12 +156,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
1208 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
1209 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
1210 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
1211 + {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
1212 {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
1213 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
1214 + {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
1215 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
1216 + {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
1217 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
1218 {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
1219 {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
1220 + {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
1221 {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
1222 {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
1223 {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
1224 @@ -240,8 +247,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
1225
1226 /* 6x35 Series */
1227 {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
1228 + {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
1229 {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
1230 + {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
1231 {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
1232 + {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
1233 {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
1234 {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
1235
1236 diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
1237 index 108267d4e08c..02cc93b673c1 100644
1238 --- a/drivers/net/wireless/mwifiex/main.c
1239 +++ b/drivers/net/wireless/mwifiex/main.c
1240 @@ -354,10 +354,12 @@ process_start:
1241 }
1242 } while (true);
1243
1244 - if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter))
1245 + spin_lock_irqsave(&adapter->main_proc_lock, flags);
1246 + if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) {
1247 + spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
1248 goto process_start;
1249 + }
1250
1251 - spin_lock_irqsave(&adapter->main_proc_lock, flags);
1252 adapter->mwifiex_processing = false;
1253 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
1254
1255 diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
1256 index 76d95deb274b..dc49e525ae5e 100644
1257 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c
1258 +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
1259 @@ -105,13 +105,11 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
1260 goto exit_release_regions;
1261 }
1262
1263 - pci_enable_msi(pci_dev);
1264 -
1265 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
1266 if (!hw) {
1267 rt2x00_probe_err("Failed to allocate hardware\n");
1268 retval = -ENOMEM;
1269 - goto exit_disable_msi;
1270 + goto exit_release_regions;
1271 }
1272
1273 pci_set_drvdata(pci_dev, hw);
1274 @@ -152,9 +150,6 @@ exit_free_reg:
1275 exit_free_device:
1276 ieee80211_free_hw(hw);
1277
1278 -exit_disable_msi:
1279 - pci_disable_msi(pci_dev);
1280 -
1281 exit_release_regions:
1282 pci_release_regions(pci_dev);
1283
1284 @@ -179,8 +174,6 @@ void rt2x00pci_remove(struct pci_dev *pci_dev)
1285 rt2x00pci_free_reg(rt2x00dev);
1286 ieee80211_free_hw(hw);
1287
1288 - pci_disable_msi(pci_dev);
1289 -
1290 /*
1291 * Free the PCI device data.
1292 */
1293 diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
1294 index 763cf1defab5..5a060e537fbe 100644
1295 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
1296 +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
1297 @@ -343,7 +343,8 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
1298 (bool)GET_RX_DESC_PAGGR(pdesc));
1299 rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
1300 if (phystatus) {
1301 - p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
1302 + p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
1303 + stats->rx_bufshift);
1304 rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
1305 p_drvinfo);
1306 }
1307 diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
1308 index 2dacd19e1b8a..b9bf8b551e3c 100644
1309 --- a/drivers/ntb/ntb_hw.c
1310 +++ b/drivers/ntb/ntb_hw.c
1311 @@ -78,6 +78,8 @@ enum {
1312 BWD_HW,
1313 };
1314
1315 +static struct dentry *debugfs_dir;
1316 +
1317 /* Translate memory window 0,1 to BAR 2,4 */
1318 #define MW_TO_BAR(mw) (mw * 2 + 2)
1319
1320 @@ -531,9 +533,9 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
1321 }
1322
1323 if (val & SNB_PPD_DEV_TYPE)
1324 - ndev->dev_type = NTB_DEV_DSD;
1325 - else
1326 ndev->dev_type = NTB_DEV_USD;
1327 + else
1328 + ndev->dev_type = NTB_DEV_DSD;
1329
1330 ndev->reg_ofs.pdb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
1331 ndev->reg_ofs.pdb_mask = ndev->reg_base + SNB_PDBMSK_OFFSET;
1332 @@ -547,7 +549,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
1333 if (ndev->conn_type == NTB_CONN_B2B) {
1334 ndev->reg_ofs.sdb = ndev->reg_base + SNB_B2B_DOORBELL_OFFSET;
1335 ndev->reg_ofs.spad_write = ndev->reg_base + SNB_B2B_SPAD_OFFSET;
1336 - ndev->limits.max_spads = SNB_MAX_SPADS;
1337 + ndev->limits.max_spads = SNB_MAX_B2B_SPADS;
1338 } else {
1339 ndev->reg_ofs.sdb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
1340 ndev->reg_ofs.spad_write = ndev->reg_base + SNB_SPAD_OFFSET;
1341 @@ -644,10 +646,16 @@ static int ntb_device_setup(struct ntb_device *ndev)
1342 rc = -ENODEV;
1343 }
1344
1345 + if (rc)
1346 + return rc;
1347 +
1348 + dev_info(&ndev->pdev->dev, "Device Type = %s\n",
1349 + ndev->dev_type == NTB_DEV_USD ? "USD/DSP" : "DSD/USP");
1350 +
1351 /* Enable Bus Master and Memory Space on the secondary side */
1352 writew(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER, ndev->reg_ofs.spci_cmd);
1353
1354 - return rc;
1355 + return 0;
1356 }
1357
1358 static void ntb_device_free(struct ntb_device *ndev)
1359 @@ -992,6 +1000,28 @@ static void ntb_free_callbacks(struct ntb_device *ndev)
1360 kfree(ndev->db_cb);
1361 }
1362
1363 +static void ntb_setup_debugfs(struct ntb_device *ndev)
1364 +{
1365 + if (!debugfs_initialized())
1366 + return;
1367 +
1368 + if (!debugfs_dir)
1369 + debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1370 +
1371 + ndev->debugfs_dir = debugfs_create_dir(pci_name(ndev->pdev),
1372 + debugfs_dir);
1373 +}
1374 +
1375 +static void ntb_free_debugfs(struct ntb_device *ndev)
1376 +{
1377 + debugfs_remove_recursive(ndev->debugfs_dir);
1378 +
1379 + if (debugfs_dir && simple_empty(debugfs_dir)) {
1380 + debugfs_remove_recursive(debugfs_dir);
1381 + debugfs_dir = NULL;
1382 + }
1383 +}
1384 +
1385 static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1386 {
1387 struct ntb_device *ndev;
1388 @@ -1004,6 +1034,7 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1389 ndev->pdev = pdev;
1390 ndev->link_status = NTB_LINK_DOWN;
1391 pci_set_drvdata(pdev, ndev);
1392 + ntb_setup_debugfs(ndev);
1393
1394 rc = pci_enable_device(pdev);
1395 if (rc)
1396 @@ -1100,6 +1131,7 @@ err2:
1397 err1:
1398 pci_disable_device(pdev);
1399 err:
1400 + ntb_free_debugfs(ndev);
1401 kfree(ndev);
1402
1403 dev_err(&pdev->dev, "Error loading %s module\n", KBUILD_MODNAME);
1404 @@ -1129,6 +1161,7 @@ static void ntb_pci_remove(struct pci_dev *pdev)
1405 iounmap(ndev->reg_base);
1406 pci_release_selected_regions(pdev, NTB_BAR_MASK);
1407 pci_disable_device(pdev);
1408 + ntb_free_debugfs(ndev);
1409 kfree(ndev);
1410 }
1411
1412 diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h
1413 index 3a3038ca83e6..6a4f56f564ee 100644
1414 --- a/drivers/ntb/ntb_hw.h
1415 +++ b/drivers/ntb/ntb_hw.h
1416 @@ -127,6 +127,8 @@ struct ntb_device {
1417 unsigned char link_status;
1418 struct delayed_work hb_timer;
1419 unsigned long last_ts;
1420 +
1421 + struct dentry *debugfs_dir;
1422 };
1423
1424 /**
1425 @@ -155,6 +157,20 @@ static inline struct pci_dev *ntb_query_pdev(struct ntb_device *ndev)
1426 return ndev->pdev;
1427 }
1428
1429 +/**
1430 + * ntb_query_debugfs() - return the debugfs pointer
1431 + * @ndev: pointer to ntb_device instance
1432 + *
1433 + * Given the ntb pointer, return the debugfs directory pointer for the NTB
1434 + * hardware device
1435 + *
1436 + * RETURNS: a pointer to the debugfs directory
1437 + */
1438 +static inline struct dentry *ntb_query_debugfs(struct ntb_device *ndev)
1439 +{
1440 + return ndev->debugfs_dir;
1441 +}
1442 +
1443 struct ntb_device *ntb_register_transport(struct pci_dev *pdev,
1444 void *transport);
1445 void ntb_unregister_transport(struct ntb_device *ndev);
1446 diff --git a/drivers/ntb/ntb_regs.h b/drivers/ntb/ntb_regs.h
1447 index 5bfa8c06c059..96209b4abc22 100644
1448 --- a/drivers/ntb/ntb_regs.h
1449 +++ b/drivers/ntb/ntb_regs.h
1450 @@ -53,8 +53,8 @@
1451 #define NTB_LINK_WIDTH_MASK 0x03f0
1452
1453 #define SNB_MSIX_CNT 4
1454 -#define SNB_MAX_SPADS 16
1455 -#define SNB_MAX_COMPAT_SPADS 8
1456 +#define SNB_MAX_B2B_SPADS 16
1457 +#define SNB_MAX_COMPAT_SPADS 16
1458 /* Reserve the uppermost bit for link interrupt */
1459 #define SNB_MAX_DB_BITS 15
1460 #define SNB_DB_BITS_PER_VEC 5
1461 diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
1462 index f8d7081ee301..c3089151aa49 100644
1463 --- a/drivers/ntb/ntb_transport.c
1464 +++ b/drivers/ntb/ntb_transport.c
1465 @@ -157,7 +157,6 @@ struct ntb_transport {
1466 bool transport_link;
1467 struct delayed_work link_work;
1468 struct work_struct link_cleanup;
1469 - struct dentry *debugfs_dir;
1470 };
1471
1472 enum {
1473 @@ -824,12 +823,12 @@ static void ntb_transport_init_queue(struct ntb_transport *nt,
1474 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
1475 qp->tx_max_entry = tx_size / qp->tx_max_frame;
1476
1477 - if (nt->debugfs_dir) {
1478 + if (ntb_query_debugfs(nt->ndev)) {
1479 char debugfs_name[4];
1480
1481 snprintf(debugfs_name, 4, "qp%d", qp_num);
1482 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
1483 - nt->debugfs_dir);
1484 + ntb_query_debugfs(nt->ndev));
1485
1486 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
1487 qp->debugfs_dir, qp,
1488 @@ -857,11 +856,6 @@ int ntb_transport_init(struct pci_dev *pdev)
1489 if (!nt)
1490 return -ENOMEM;
1491
1492 - if (debugfs_initialized())
1493 - nt->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1494 - else
1495 - nt->debugfs_dir = NULL;
1496 -
1497 nt->ndev = ntb_register_transport(pdev, nt);
1498 if (!nt->ndev) {
1499 rc = -EIO;
1500 @@ -907,7 +901,6 @@ err2:
1501 err1:
1502 ntb_unregister_transport(nt->ndev);
1503 err:
1504 - debugfs_remove_recursive(nt->debugfs_dir);
1505 kfree(nt);
1506 return rc;
1507 }
1508 @@ -921,16 +914,16 @@ void ntb_transport_free(void *transport)
1509 nt->transport_link = NTB_LINK_DOWN;
1510
1511 /* verify that all the qp's are freed */
1512 - for (i = 0; i < nt->max_qps; i++)
1513 + for (i = 0; i < nt->max_qps; i++) {
1514 if (!test_bit(i, &nt->qp_bitmap))
1515 ntb_transport_free_queue(&nt->qps[i]);
1516 + debugfs_remove_recursive(nt->qps[i].debugfs_dir);
1517 + }
1518
1519 ntb_bus_remove(nt);
1520
1521 cancel_delayed_work_sync(&nt->link_work);
1522
1523 - debugfs_remove_recursive(nt->debugfs_dir);
1524 -
1525 ntb_unregister_event_callback(nt->ndev);
1526
1527 pdev = ntb_query_pdev(nt->ndev);
1528 diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
1529 index feab3a5e50b5..757eb0716d45 100644
1530 --- a/drivers/scsi/BusLogic.c
1531 +++ b/drivers/scsi/BusLogic.c
1532 @@ -696,7 +696,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
1533 while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC,
1534 PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
1535 pci_device)) != NULL) {
1536 - struct blogic_adapter *adapter = adapter;
1537 + struct blogic_adapter *host_adapter = adapter;
1538 struct blogic_adapter_info adapter_info;
1539 enum blogic_isa_ioport mod_ioaddr_req;
1540 unsigned char bus;
1541 @@ -744,9 +744,9 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
1542 known and enabled, note that the particular Standard ISA I/O
1543 Address should not be probed.
1544 */
1545 - adapter->io_addr = io_addr;
1546 - blogic_intreset(adapter);
1547 - if (blogic_cmd(adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
1548 + host_adapter->io_addr = io_addr;
1549 + blogic_intreset(host_adapter);
1550 + if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
1551 &adapter_info, sizeof(adapter_info)) ==
1552 sizeof(adapter_info)) {
1553 if (adapter_info.isa_port < 6)
1554 @@ -762,7 +762,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
1555 I/O Address assigned at system initialization.
1556 */
1557 mod_ioaddr_req = BLOGIC_IO_DISABLE;
1558 - blogic_cmd(adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req,
1559 + blogic_cmd(host_adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req,
1560 sizeof(mod_ioaddr_req), NULL, 0);
1561 /*
1562 For the first MultiMaster Host Adapter enumerated,
1563 @@ -779,12 +779,12 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
1564
1565 fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45;
1566 fetch_localram.count = sizeof(autoscsi_byte45);
1567 - blogic_cmd(adapter, BLOGIC_FETCH_LOCALRAM,
1568 + blogic_cmd(host_adapter, BLOGIC_FETCH_LOCALRAM,
1569 &fetch_localram, sizeof(fetch_localram),
1570 &autoscsi_byte45,
1571 sizeof(autoscsi_byte45));
1572 - blogic_cmd(adapter, BLOGIC_GET_BOARD_ID, NULL, 0, &id,
1573 - sizeof(id));
1574 + blogic_cmd(host_adapter, BLOGIC_GET_BOARD_ID, NULL, 0,
1575 + &id, sizeof(id));
1576 if (id.fw_ver_digit1 == '5')
1577 force_scan_order =
1578 autoscsi_byte45.force_scan_order;
1579 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
1580 index 408a42ef787a..f0d432c139d0 100644
1581 --- a/drivers/scsi/aacraid/linit.c
1582 +++ b/drivers/scsi/aacraid/linit.c
1583 @@ -771,6 +771,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
1584 static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
1585 {
1586 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
1587 + if (!capable(CAP_SYS_RAWIO))
1588 + return -EPERM;
1589 return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
1590 }
1591
1592 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1593 index 2783dd7057ec..83e9070dc3c0 100644
1594 --- a/drivers/scsi/sd.c
1595 +++ b/drivers/scsi/sd.c
1596 @@ -2853,6 +2853,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
1597 gd->events |= DISK_EVENT_MEDIA_CHANGE;
1598 }
1599
1600 + blk_pm_runtime_init(sdp->request_queue, dev);
1601 add_disk(gd);
1602 if (sdkp->capacity)
1603 sd_dif_config_host(sdkp);
1604 @@ -2861,7 +2862,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
1605
1606 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
1607 sdp->removable ? "removable " : "");
1608 - blk_pm_runtime_init(sdp->request_queue, dev);
1609 scsi_autopm_put_device(sdp);
1610 put_device(&sdkp->dev);
1611 }
1612 diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
1613 index f67a22536cbf..756b6344b1fd 100644
1614 --- a/drivers/staging/bcm/Bcmchar.c
1615 +++ b/drivers/staging/bcm/Bcmchar.c
1616 @@ -1960,6 +1960,7 @@ cntrlEnd:
1617
1618 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
1619
1620 + memset(&DevInfo, 0, sizeof(DevInfo));
1621 DevInfo.MaxRDMBufferSize = BUFFER_4K;
1622 DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
1623 DevInfo.u32RxAlignmentCorrection = 0;
1624 diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
1625 index 374fdc398641..ea5f9f3595fd 100644
1626 --- a/drivers/staging/ozwpan/ozcdev.c
1627 +++ b/drivers/staging/ozwpan/ozcdev.c
1628 @@ -152,6 +152,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
1629 struct oz_app_hdr *app_hdr;
1630 struct oz_serial_ctx *ctx;
1631
1632 + if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
1633 + return -EINVAL;
1634 +
1635 spin_lock_bh(&g_cdev.lock);
1636 pd = g_cdev.active_pd;
1637 if (pd)
1638 diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c
1639 index 23db32f07fd5..a10cdb17038b 100644
1640 --- a/drivers/staging/sb105x/sb_pci_mp.c
1641 +++ b/drivers/staging/sb105x/sb_pci_mp.c
1642 @@ -1063,7 +1063,7 @@ static int mp_wait_modem_status(struct sb_uart_state *state, unsigned long arg)
1643
1644 static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt)
1645 {
1646 - struct serial_icounter_struct icount;
1647 + struct serial_icounter_struct icount = {};
1648 struct sb_uart_icount cnow;
1649 struct sb_uart_port *port = state->port;
1650
1651 diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
1652 index c97e0e154d28..7e10dcdc3090 100644
1653 --- a/drivers/staging/wlags49_h2/wl_priv.c
1654 +++ b/drivers/staging/wlags49_h2/wl_priv.c
1655 @@ -570,6 +570,7 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
1656 ltv_t *pLtv;
1657 bool_t ltvAllocated = FALSE;
1658 ENCSTRCT sEncryption;
1659 + size_t len;
1660
1661 #ifdef USE_WDS
1662 hcf_16 hcfPort = HCF_PORT_0;
1663 @@ -686,7 +687,8 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
1664 break;
1665 case CFG_CNF_OWN_NAME:
1666 memset(lp->StationName, 0, sizeof(lp->StationName));
1667 - memcpy((void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]);
1668 + len = min_t(size_t, pLtv->u.u16[0], sizeof(lp->StationName));
1669 + strlcpy(lp->StationName, &pLtv->u.u8[2], len);
1670 pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
1671 break;
1672 case CFG_CNF_LOAD_BALANCING:
1673 @@ -1783,6 +1785,7 @@ int wvlan_set_station_nickname(struct net_device *dev,
1674 {
1675 struct wl_private *lp = wl_priv(dev);
1676 unsigned long flags;
1677 + size_t len;
1678 int ret = 0;
1679 /*------------------------------------------------------------------------*/
1680
1681 @@ -1793,8 +1796,8 @@ int wvlan_set_station_nickname(struct net_device *dev,
1682 wl_lock(lp, &flags);
1683
1684 memset(lp->StationName, 0, sizeof(lp->StationName));
1685 -
1686 - memcpy(lp->StationName, extra, wrqu->data.length);
1687 + len = min_t(size_t, wrqu->data.length, sizeof(lp->StationName));
1688 + strlcpy(lp->StationName, extra, len);
1689
1690 /* Commit the adapter parameters */
1691 wl_apply(lp);
1692 diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
1693 index e992b27aa090..3250ba2594e0 100644
1694 --- a/drivers/target/target_core_pscsi.c
1695 +++ b/drivers/target/target_core_pscsi.c
1696 @@ -134,10 +134,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
1697 * pSCSI Host ID and enable for phba mode
1698 */
1699 sh = scsi_host_lookup(phv->phv_host_id);
1700 - if (IS_ERR(sh)) {
1701 + if (!sh) {
1702 pr_err("pSCSI: Unable to locate SCSI Host for"
1703 " phv_host_id: %d\n", phv->phv_host_id);
1704 - return PTR_ERR(sh);
1705 + return -EINVAL;
1706 }
1707
1708 phv->phv_lld_host = sh;
1709 @@ -515,10 +515,10 @@ static int pscsi_configure_device(struct se_device *dev)
1710 sh = phv->phv_lld_host;
1711 } else {
1712 sh = scsi_host_lookup(pdv->pdv_host_id);
1713 - if (IS_ERR(sh)) {
1714 + if (!sh) {
1715 pr_err("pSCSI: Unable to locate"
1716 " pdv_host_id: %d\n", pdv->pdv_host_id);
1717 - return PTR_ERR(sh);
1718 + return -EINVAL;
1719 }
1720 }
1721 } else {
1722 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
1723 index 3b96f18593b3..4bf4bb24ee8f 100644
1724 --- a/drivers/uio/uio.c
1725 +++ b/drivers/uio/uio.c
1726 @@ -630,36 +630,57 @@ static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1727 return 0;
1728 }
1729
1730 -static const struct vm_operations_struct uio_vm_ops = {
1731 +static const struct vm_operations_struct uio_logical_vm_ops = {
1732 .open = uio_vma_open,
1733 .close = uio_vma_close,
1734 .fault = uio_vma_fault,
1735 };
1736
1737 +static int uio_mmap_logical(struct vm_area_struct *vma)
1738 +{
1739 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1740 + vma->vm_ops = &uio_logical_vm_ops;
1741 + uio_vma_open(vma);
1742 + return 0;
1743 +}
1744 +
1745 +static const struct vm_operations_struct uio_physical_vm_ops = {
1746 +#ifdef CONFIG_HAVE_IOREMAP_PROT
1747 + .access = generic_access_phys,
1748 +#endif
1749 +};
1750 +
1751 static int uio_mmap_physical(struct vm_area_struct *vma)
1752 {
1753 struct uio_device *idev = vma->vm_private_data;
1754 int mi = uio_find_mem_index(vma);
1755 + struct uio_mem *mem;
1756 if (mi < 0)
1757 return -EINVAL;
1758 + mem = idev->info->mem + mi;
1759
1760 + if (vma->vm_end - vma->vm_start > mem->size)
1761 + return -EINVAL;
1762 +
1763 + vma->vm_ops = &uio_physical_vm_ops;
1764 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1765
1766 + /*
1767 + * We cannot use the vm_iomap_memory() helper here,
1768 + * because vma->vm_pgoff is the map index we looked
1769 + * up above in uio_find_mem_index(), rather than an
1770 + * actual page offset into the mmap.
1771 + *
1772 + * So we just do the physical mmap without a page
1773 + * offset.
1774 + */
1775 return remap_pfn_range(vma,
1776 vma->vm_start,
1777 - idev->info->mem[mi].addr >> PAGE_SHIFT,
1778 + mem->addr >> PAGE_SHIFT,
1779 vma->vm_end - vma->vm_start,
1780 vma->vm_page_prot);
1781 }
1782
1783 -static int uio_mmap_logical(struct vm_area_struct *vma)
1784 -{
1785 - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1786 - vma->vm_ops = &uio_vm_ops;
1787 - uio_vma_open(vma);
1788 - return 0;
1789 -}
1790 -
1791 static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
1792 {
1793 struct uio_listener *listener = filep->private_data;
1794 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1795 index 5b44cd47da5b..01fe36273f3b 100644
1796 --- a/drivers/usb/core/quirks.c
1797 +++ b/drivers/usb/core/quirks.c
1798 @@ -97,6 +97,9 @@ static const struct usb_device_id usb_quirk_list[] = {
1799 /* Alcor Micro Corp. Hub */
1800 { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
1801
1802 + /* MicroTouch Systems touchscreen */
1803 + { USB_DEVICE(0x0596, 0x051e), .driver_info = USB_QUIRK_RESET_RESUME },
1804 +
1805 /* appletouch */
1806 { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
1807
1808 @@ -130,6 +133,9 @@ static const struct usb_device_id usb_quirk_list[] = {
1809 /* Broadcom BCM92035DGROM BT dongle */
1810 { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
1811
1812 + /* MAYA44USB sound device */
1813 + { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
1814 +
1815 /* Action Semiconductor flash disk */
1816 { USB_DEVICE(0x10d6, 0x2200), .driver_info =
1817 USB_QUIRK_STRING_FETCH_255 },
1818 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
1819 index ad5b99bd56b6..1f582d969f97 100644
1820 --- a/drivers/usb/host/xhci-hub.c
1821 +++ b/drivers/usb/host/xhci-hub.c
1822 @@ -1092,18 +1092,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1823 t1 = xhci_port_state_to_neutral(t1);
1824 if (t1 != t2)
1825 xhci_writel(xhci, t2, port_array[port_index]);
1826 -
1827 - if (hcd->speed != HCD_USB3) {
1828 - /* enable remote wake up for USB 2.0 */
1829 - __le32 __iomem *addr;
1830 - u32 tmp;
1831 -
1832 - /* Get the port power control register address. */
1833 - addr = port_array[port_index] + PORTPMSC;
1834 - tmp = xhci_readl(xhci, addr);
1835 - tmp |= PORT_RWE;
1836 - xhci_writel(xhci, tmp, addr);
1837 - }
1838 }
1839 hcd->state = HC_STATE_SUSPENDED;
1840 bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
1841 @@ -1182,20 +1170,6 @@ int xhci_bus_resume(struct usb_hcd *hcd)
1842 xhci_ring_device(xhci, slot_id);
1843 } else
1844 xhci_writel(xhci, temp, port_array[port_index]);
1845 -
1846 - if (hcd->speed != HCD_USB3) {
1847 - /* disable remote wake up for USB 2.0 */
1848 - __le32 __iomem *addr;
1849 - u32 tmp;
1850 -
1851 - /* Add one to the port status register address to get
1852 - * the port power control register address.
1853 - */
1854 - addr = port_array[port_index] + PORTPMSC;
1855 - tmp = xhci_readl(xhci, addr);
1856 - tmp &= ~PORT_RWE;
1857 - xhci_writel(xhci, tmp, addr);
1858 - }
1859 }
1860
1861 (void) xhci_readl(xhci, &xhci->op_regs->command);
1862 diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
1863 index 29a24ced6748..6ef8c9407e25 100644
1864 --- a/drivers/usb/musb/musb_core.c
1865 +++ b/drivers/usb/musb/musb_core.c
1866 @@ -923,6 +923,52 @@ static void musb_generic_disable(struct musb *musb)
1867 }
1868
1869 /*
1870 + * Program the HDRC to start (enable interrupts, dma, etc.).
1871 + */
1872 +void musb_start(struct musb *musb)
1873 +{
1874 + void __iomem *regs = musb->mregs;
1875 + u8 devctl = musb_readb(regs, MUSB_DEVCTL);
1876 +
1877 + dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
1878 +
1879 + /* Set INT enable registers, enable interrupts */
1880 + musb->intrtxe = musb->epmask;
1881 + musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
1882 + musb->intrrxe = musb->epmask & 0xfffe;
1883 + musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
1884 + musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
1885 +
1886 + musb_writeb(regs, MUSB_TESTMODE, 0);
1887 +
1888 + /* put into basic highspeed mode and start session */
1889 + musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
1890 + | MUSB_POWER_HSENAB
1891 + /* ENSUSPEND wedges tusb */
1892 + /* | MUSB_POWER_ENSUSPEND */
1893 + );
1894 +
1895 + musb->is_active = 0;
1896 + devctl = musb_readb(regs, MUSB_DEVCTL);
1897 + devctl &= ~MUSB_DEVCTL_SESSION;
1898 +
1899 + /* session started after:
1900 + * (a) ID-grounded irq, host mode;
1901 + * (b) vbus present/connect IRQ, peripheral mode;
1902 + * (c) peripheral initiates, using SRP
1903 + */
1904 + if (musb->port_mode != MUSB_PORT_MODE_HOST &&
1905 + (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
1906 + musb->is_active = 1;
1907 + } else {
1908 + devctl |= MUSB_DEVCTL_SESSION;
1909 + }
1910 +
1911 + musb_platform_enable(musb);
1912 + musb_writeb(regs, MUSB_DEVCTL, devctl);
1913 +}
1914 +
1915 +/*
1916 * Make the HDRC stop (disable interrupts, etc.);
1917 * reversible by musb_start
1918 * called on gadget driver unregister
1919 diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
1920 index 7d341c387eab..679dd5b82cc5 100644
1921 --- a/drivers/usb/musb/musb_core.h
1922 +++ b/drivers/usb/musb/musb_core.h
1923 @@ -511,6 +511,7 @@ static inline void musb_configure_ep0(struct musb *musb)
1924 extern const char musb_driver_name[];
1925
1926 extern void musb_stop(struct musb *musb);
1927 +extern void musb_start(struct musb *musb);
1928
1929 extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
1930 extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
1931 diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
1932 index 0414bc19d009..d95378a68f6c 100644
1933 --- a/drivers/usb/musb/musb_gadget.c
1934 +++ b/drivers/usb/musb/musb_gadget.c
1935 @@ -1842,6 +1842,8 @@ static int musb_gadget_start(struct usb_gadget *g,
1936 musb->xceiv->state = OTG_STATE_B_IDLE;
1937 spin_unlock_irqrestore(&musb->lock, flags);
1938
1939 + musb_start(musb);
1940 +
1941 /* REVISIT: funcall to other code, which also
1942 * handles power budgeting ... this way also
1943 * ensures HdrcStart is indirectly called.
1944 diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
1945 index a523950c2b32..d1d6b83aabca 100644
1946 --- a/drivers/usb/musb/musb_virthub.c
1947 +++ b/drivers/usb/musb/musb_virthub.c
1948 @@ -44,52 +44,6 @@
1949
1950 #include "musb_core.h"
1951
1952 -/*
1953 -* Program the HDRC to start (enable interrupts, dma, etc.).
1954 -*/
1955 -static void musb_start(struct musb *musb)
1956 -{
1957 - void __iomem *regs = musb->mregs;
1958 - u8 devctl = musb_readb(regs, MUSB_DEVCTL);
1959 -
1960 - dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
1961 -
1962 - /* Set INT enable registers, enable interrupts */
1963 - musb->intrtxe = musb->epmask;
1964 - musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
1965 - musb->intrrxe = musb->epmask & 0xfffe;
1966 - musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
1967 - musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
1968 -
1969 - musb_writeb(regs, MUSB_TESTMODE, 0);
1970 -
1971 - /* put into basic highspeed mode and start session */
1972 - musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
1973 - | MUSB_POWER_HSENAB
1974 - /* ENSUSPEND wedges tusb */
1975 - /* | MUSB_POWER_ENSUSPEND */
1976 - );
1977 -
1978 - musb->is_active = 0;
1979 - devctl = musb_readb(regs, MUSB_DEVCTL);
1980 - devctl &= ~MUSB_DEVCTL_SESSION;
1981 -
1982 - /* session started after:
1983 - * (a) ID-grounded irq, host mode;
1984 - * (b) vbus present/connect IRQ, peripheral mode;
1985 - * (c) peripheral initiates, using SRP
1986 - */
1987 - if (musb->port_mode != MUSB_PORT_MODE_HOST &&
1988 - (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
1989 - musb->is_active = 1;
1990 - } else {
1991 - devctl |= MUSB_DEVCTL_SESSION;
1992 - }
1993 -
1994 - musb_platform_enable(musb);
1995 - musb_writeb(regs, MUSB_DEVCTL, devctl);
1996 -}
1997 -
1998 static void musb_port_suspend(struct musb *musb, bool do_suspend)
1999 {
2000 struct usb_otg *otg = musb->xceiv->otg;
2001 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2002 index b65e657c641d..aa3aed5458a6 100644
2003 --- a/drivers/usb/serial/ftdi_sio.c
2004 +++ b/drivers/usb/serial/ftdi_sio.c
2005 @@ -906,6 +906,7 @@ static struct usb_device_id id_table_combined [] = {
2006 { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
2007 /* Crucible Devices */
2008 { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
2009 + { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
2010 { }, /* Optional parameter entry */
2011 { } /* Terminating entry */
2012 };
2013 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2014 index 1b8af461b522..a7019d1e3058 100644
2015 --- a/drivers/usb/serial/ftdi_sio_ids.h
2016 +++ b/drivers/usb/serial/ftdi_sio_ids.h
2017 @@ -1307,3 +1307,9 @@
2018 * Manufacturer: Crucible Technologies
2019 */
2020 #define FTDI_CT_COMET_PID 0x8e08
2021 +
2022 +/*
2023 + * Product: Z3X Box
2024 + * Manufacturer: Smart GSM Team
2025 + */
2026 +#define FTDI_Z3X_PID 0x0011
2027 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2028 index f1507c052a2e..acaee066b99a 100644
2029 --- a/drivers/usb/serial/option.c
2030 +++ b/drivers/usb/serial/option.c
2031 @@ -693,6 +693,222 @@ static const struct usb_device_id option_ids[] = {
2032 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
2033 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
2034 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
2035 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x01) },
2036 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x02) },
2037 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x03) },
2038 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x04) },
2039 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x05) },
2040 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x06) },
2041 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0A) },
2042 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0B) },
2043 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0D) },
2044 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0E) },
2045 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0F) },
2046 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x10) },
2047 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x12) },
2048 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x13) },
2049 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x14) },
2050 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x15) },
2051 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x17) },
2052 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x18) },
2053 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x19) },
2054 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1A) },
2055 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1B) },
2056 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1C) },
2057 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x31) },
2058 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x32) },
2059 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x33) },
2060 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x34) },
2061 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x35) },
2062 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x36) },
2063 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3A) },
2064 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3B) },
2065 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3D) },
2066 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3E) },
2067 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3F) },
2068 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x48) },
2069 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x49) },
2070 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4A) },
2071 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4B) },
2072 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4C) },
2073 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x61) },
2074 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x62) },
2075 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x63) },
2076 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x64) },
2077 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x65) },
2078 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x66) },
2079 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6A) },
2080 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6B) },
2081 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
2082 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
2083 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
2084 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
2085 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
2086 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
2087 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7B) },
2088 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7C) },
2089 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x01) },
2090 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x02) },
2091 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x03) },
2092 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x04) },
2093 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x05) },
2094 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x06) },
2095 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0A) },
2096 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0B) },
2097 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0D) },
2098 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0E) },
2099 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0F) },
2100 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x10) },
2101 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x12) },
2102 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x13) },
2103 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x14) },
2104 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x15) },
2105 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x17) },
2106 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x18) },
2107 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x19) },
2108 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1A) },
2109 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1B) },
2110 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1C) },
2111 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x31) },
2112 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x32) },
2113 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x33) },
2114 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x34) },
2115 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x35) },
2116 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x36) },
2117 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3A) },
2118 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3B) },
2119 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3D) },
2120 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3E) },
2121 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3F) },
2122 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x48) },
2123 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x49) },
2124 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4A) },
2125 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4B) },
2126 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4C) },
2127 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x61) },
2128 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x62) },
2129 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x63) },
2130 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x64) },
2131 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x65) },
2132 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x66) },
2133 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6A) },
2134 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6B) },
2135 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
2136 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
2137 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
2138 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
2139 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
2140 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
2141 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7B) },
2142 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7C) },
2143 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x01) },
2144 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x02) },
2145 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x03) },
2146 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x04) },
2147 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x05) },
2148 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x06) },
2149 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0A) },
2150 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0B) },
2151 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0D) },
2152 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0E) },
2153 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0F) },
2154 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x10) },
2155 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x12) },
2156 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x13) },
2157 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x14) },
2158 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x15) },
2159 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x17) },
2160 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x18) },
2161 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x19) },
2162 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1A) },
2163 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1B) },
2164 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1C) },
2165 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x31) },
2166 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x32) },
2167 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x33) },
2168 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x34) },
2169 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x35) },
2170 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x36) },
2171 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3A) },
2172 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3B) },
2173 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3D) },
2174 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3E) },
2175 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3F) },
2176 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x48) },
2177 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x49) },
2178 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4A) },
2179 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4B) },
2180 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4C) },
2181 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x61) },
2182 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x62) },
2183 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x63) },
2184 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x64) },
2185 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x65) },
2186 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x66) },
2187 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6A) },
2188 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6B) },
2189 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
2190 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
2191 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
2192 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
2193 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
2194 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
2195 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7B) },
2196 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7C) },
2197 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x01) },
2198 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x02) },
2199 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x03) },
2200 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x04) },
2201 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x05) },
2202 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x06) },
2203 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0A) },
2204 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0B) },
2205 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0D) },
2206 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0E) },
2207 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0F) },
2208 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x10) },
2209 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x12) },
2210 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x13) },
2211 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x14) },
2212 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x15) },
2213 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x17) },
2214 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x18) },
2215 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x19) },
2216 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1A) },
2217 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1B) },
2218 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1C) },
2219 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x31) },
2220 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x32) },
2221 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x33) },
2222 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x34) },
2223 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x35) },
2224 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x36) },
2225 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3A) },
2226 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3B) },
2227 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3D) },
2228 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3E) },
2229 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3F) },
2230 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x48) },
2231 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x49) },
2232 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4A) },
2233 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4B) },
2234 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4C) },
2235 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x61) },
2236 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x62) },
2237 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x63) },
2238 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x64) },
2239 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x65) },
2240 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x66) },
2241 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6A) },
2242 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6B) },
2243 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
2244 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
2245 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
2246 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
2247 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
2248 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
2249 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
2250 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
2251
2252
2253 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
2254 diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
2255 index 92b05d95ec5e..5db153260827 100644
2256 --- a/drivers/usb/storage/scsiglue.c
2257 +++ b/drivers/usb/storage/scsiglue.c
2258 @@ -211,8 +211,11 @@ static int slave_configure(struct scsi_device *sdev)
2259 /*
2260 * Many devices do not respond properly to READ_CAPACITY_16.
2261 * Tell the SCSI layer to try READ_CAPACITY_10 first.
2262 + * However some USB 3.0 drive enclosures return capacity
2263 + * modulo 2TB. Those must use READ_CAPACITY_16
2264 */
2265 - sdev->try_rc_10_first = 1;
2266 + if (!(us->fflags & US_FL_NEEDS_CAP16))
2267 + sdev->try_rc_10_first = 1;
2268
2269 /* assume SPC3 or latter devices support sense size > 18 */
2270 if (sdev->scsi_level > SCSI_SPC_2)
2271 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
2272 index c015f2c16729..de32cfa5bfa6 100644
2273 --- a/drivers/usb/storage/unusual_devs.h
2274 +++ b/drivers/usb/storage/unusual_devs.h
2275 @@ -1925,6 +1925,13 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
2276 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2277 US_FL_IGNORE_RESIDUE ),
2278
2279 +/* Reported by Oliver Neukum <oneukum@suse.com> */
2280 +UNUSUAL_DEV( 0x174c, 0x55aa, 0x0100, 0x0100,
2281 + "ASMedia",
2282 + "AS2105",
2283 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2284 + US_FL_NEEDS_CAP16),
2285 +
2286 /* Reported by Jesse Feddema <jdfeddema@gmail.com> */
2287 UNUSUAL_DEV( 0x177f, 0x0400, 0x0000, 0x0000,
2288 "Yarvik",
2289 diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
2290 index 0c27c7df1b09..1869237efbed 100644
2291 --- a/drivers/vhost/scsi.c
2292 +++ b/drivers/vhost/scsi.c
2293 @@ -1030,7 +1030,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
2294 if (data_direction != DMA_NONE) {
2295 ret = vhost_scsi_map_iov_to_sgl(cmd,
2296 &vq->iov[data_first], data_num,
2297 - data_direction == DMA_TO_DEVICE);
2298 + data_direction == DMA_FROM_DEVICE);
2299 if (unlikely(ret)) {
2300 vq_err(vq, "Failed to map iov to sgl\n");
2301 goto err_free;
2302 diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
2303 index a54ccdc4d661..22ad85242e5b 100644
2304 --- a/drivers/video/au1100fb.c
2305 +++ b/drivers/video/au1100fb.c
2306 @@ -361,37 +361,13 @@ void au1100fb_fb_rotate(struct fb_info *fbi, int angle)
2307 int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
2308 {
2309 struct au1100fb_device *fbdev;
2310 - unsigned int len;
2311 - unsigned long start=0, off;
2312
2313 fbdev = to_au1100fb_device(fbi);
2314
2315 - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
2316 - return -EINVAL;
2317 - }
2318 -
2319 - start = fbdev->fb_phys & PAGE_MASK;
2320 - len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
2321 -
2322 - off = vma->vm_pgoff << PAGE_SHIFT;
2323 -
2324 - if ((vma->vm_end - vma->vm_start + off) > len) {
2325 - return -EINVAL;
2326 - }
2327 -
2328 - off += start;
2329 - vma->vm_pgoff = off >> PAGE_SHIFT;
2330 -
2331 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2332 pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
2333
2334 - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
2335 - vma->vm_end - vma->vm_start,
2336 - vma->vm_page_prot)) {
2337 - return -EAGAIN;
2338 - }
2339 -
2340 - return 0;
2341 + return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
2342 }
2343
2344 static struct fb_ops au1100fb_ops =
2345 diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
2346 index 301224ecc950..1d02897d17f2 100644
2347 --- a/drivers/video/au1200fb.c
2348 +++ b/drivers/video/au1200fb.c
2349 @@ -1233,34 +1233,13 @@ static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
2350 * method mainly to allow the use of the TLB streaming flag (CCA=6)
2351 */
2352 static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
2353 -
2354 {
2355 - unsigned int len;
2356 - unsigned long start=0, off;
2357 struct au1200fb_device *fbdev = info->par;
2358
2359 - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
2360 - return -EINVAL;
2361 - }
2362 -
2363 - start = fbdev->fb_phys & PAGE_MASK;
2364 - len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
2365 -
2366 - off = vma->vm_pgoff << PAGE_SHIFT;
2367 -
2368 - if ((vma->vm_end - vma->vm_start + off) > len) {
2369 - return -EINVAL;
2370 - }
2371 -
2372 - off += start;
2373 - vma->vm_pgoff = off >> PAGE_SHIFT;
2374 -
2375 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2376 pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
2377
2378 - return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
2379 - vma->vm_end - vma->vm_start,
2380 - vma->vm_page_prot);
2381 + return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
2382 }
2383
2384 static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
2385 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
2386 index 85ea98d139fc..40cfef58dcc3 100644
2387 --- a/fs/cifs/cifsfs.c
2388 +++ b/fs/cifs/cifsfs.c
2389 @@ -120,14 +120,16 @@ cifs_read_super(struct super_block *sb)
2390 {
2391 struct inode *inode;
2392 struct cifs_sb_info *cifs_sb;
2393 + struct cifs_tcon *tcon;
2394 int rc = 0;
2395
2396 cifs_sb = CIFS_SB(sb);
2397 + tcon = cifs_sb_master_tcon(cifs_sb);
2398
2399 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
2400 sb->s_flags |= MS_POSIXACL;
2401
2402 - if (cifs_sb_master_tcon(cifs_sb)->ses->capabilities & CAP_LARGE_FILES)
2403 + if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
2404 sb->s_maxbytes = MAX_LFS_FILESIZE;
2405 else
2406 sb->s_maxbytes = MAX_NON_LFS;
2407 @@ -147,7 +149,7 @@ cifs_read_super(struct super_block *sb)
2408 goto out_no_root;
2409 }
2410
2411 - if (cifs_sb_master_tcon(cifs_sb)->nocase)
2412 + if (tcon->nocase)
2413 sb->s_d_op = &cifs_ci_dentry_ops;
2414 else
2415 sb->s_d_op = &cifs_dentry_ops;
2416 diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
2417 index d10757635b9c..40db6880cdd6 100644
2418 --- a/fs/ecryptfs/crypto.c
2419 +++ b/fs/ecryptfs/crypto.c
2420 @@ -408,7 +408,7 @@ static loff_t lower_offset_for_page(struct ecryptfs_crypt_stat *crypt_stat,
2421 struct page *page)
2422 {
2423 return ecryptfs_lower_header_size(crypt_stat) +
2424 - (page->index << PAGE_CACHE_SHIFT);
2425 + ((loff_t)page->index << PAGE_CACHE_SHIFT);
2426 }
2427
2428 /**
2429 diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
2430 index 7d52806c2119..4725a07f003c 100644
2431 --- a/fs/ecryptfs/keystore.c
2432 +++ b/fs/ecryptfs/keystore.c
2433 @@ -1149,7 +1149,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
2434 struct ecryptfs_msg_ctx *msg_ctx;
2435 struct ecryptfs_message *msg = NULL;
2436 char *auth_tok_sig;
2437 - char *payload;
2438 + char *payload = NULL;
2439 size_t payload_len = 0;
2440 int rc;
2441
2442 @@ -1203,6 +1203,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
2443 }
2444 out:
2445 kfree(msg);
2446 + kfree(payload);
2447 return rc;
2448 }
2449
2450 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
2451 index 9ad17b15b454..0f0f73624a88 100644
2452 --- a/fs/eventpoll.c
2453 +++ b/fs/eventpoll.c
2454 @@ -34,7 +34,6 @@
2455 #include <linux/mutex.h>
2456 #include <linux/anon_inodes.h>
2457 #include <linux/device.h>
2458 -#include <linux/freezer.h>
2459 #include <asm/uaccess.h>
2460 #include <asm/io.h>
2461 #include <asm/mman.h>
2462 @@ -1603,8 +1602,7 @@ fetch_events:
2463 }
2464
2465 spin_unlock_irqrestore(&ep->lock, flags);
2466 - if (!freezable_schedule_hrtimeout_range(to, slack,
2467 - HRTIMER_MODE_ABS))
2468 + if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
2469 timed_out = 1;
2470
2471 spin_lock_irqsave(&ep->lock, flags);
2472 diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
2473 index c1a3e603279c..7f464c513ba0 100644
2474 --- a/fs/jfs/jfs_inode.c
2475 +++ b/fs/jfs/jfs_inode.c
2476 @@ -95,7 +95,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
2477
2478 if (insert_inode_locked(inode) < 0) {
2479 rc = -EINVAL;
2480 - goto fail_unlock;
2481 + goto fail_put;
2482 }
2483
2484 inode_init_owner(inode, parent, mode);
2485 @@ -156,7 +156,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
2486 fail_drop:
2487 dquot_drop(inode);
2488 inode->i_flags |= S_NOQUOTA;
2489 -fail_unlock:
2490 clear_nlink(inode);
2491 unlock_new_inode(inode);
2492 fail_put:
2493 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
2494 index 107d026f5d6e..7a9e255f195d 100644
2495 --- a/fs/proc/task_mmu.c
2496 +++ b/fs/proc/task_mmu.c
2497 @@ -938,6 +938,8 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
2498 frame = pte_pfn(pte);
2499 flags = PM_PRESENT;
2500 page = vm_normal_page(vma, addr, pte);
2501 + if (pte_soft_dirty(pte))
2502 + flags2 |= __PM_SOFT_DIRTY;
2503 } else if (is_swap_pte(pte)) {
2504 swp_entry_t entry;
2505 if (pte_swp_soft_dirty(pte))
2506 @@ -955,8 +957,6 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
2507
2508 if (page && !PageAnon(page))
2509 flags |= PM_FILE;
2510 - if (pte_soft_dirty(pte))
2511 - flags2 |= __PM_SOFT_DIRTY;
2512
2513 *pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags);
2514 }
2515 diff --git a/fs/select.c b/fs/select.c
2516 index 35d4adc749d9..dfd5cb18c012 100644
2517 --- a/fs/select.c
2518 +++ b/fs/select.c
2519 @@ -238,8 +238,7 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
2520
2521 set_current_state(state);
2522 if (!pwq->triggered)
2523 - rc = freezable_schedule_hrtimeout_range(expires, slack,
2524 - HRTIMER_MODE_ABS);
2525 + rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
2526 __set_current_state(TASK_RUNNING);
2527
2528 /*
2529 diff --git a/fs/seq_file.c b/fs/seq_file.c
2530 index 3135c2525c76..a290157265ef 100644
2531 --- a/fs/seq_file.c
2532 +++ b/fs/seq_file.c
2533 @@ -328,6 +328,8 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence)
2534 m->read_pos = offset;
2535 retval = file->f_pos = offset;
2536 }
2537 + } else {
2538 + file->f_pos = offset;
2539 }
2540 }
2541 file->f_version = m->version;
2542 diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
2543 index bf99cd01be20..630356866030 100644
2544 --- a/include/linux/usb_usual.h
2545 +++ b/include/linux/usb_usual.h
2546 @@ -66,7 +66,9 @@
2547 US_FLAG(INITIAL_READ10, 0x00100000) \
2548 /* Initial READ(10) (and others) must be retried */ \
2549 US_FLAG(WRITE_CACHE, 0x00200000) \
2550 - /* Write Cache status is not available */
2551 + /* Write Cache status is not available */ \
2552 + US_FLAG(NEEDS_CAP16, 0x00400000)
2553 + /* cannot handle READ_CAPACITY_10 */
2554
2555 #define US_FLAG(name, value) US_FL_##name = value ,
2556 enum { US_DO_ALL_FLAGS };
2557 diff --git a/include/trace/events/target.h b/include/trace/events/target.h
2558 index aef8fc354025..da9cc0f05c93 100644
2559 --- a/include/trace/events/target.h
2560 +++ b/include/trace/events/target.h
2561 @@ -144,7 +144,7 @@ TRACE_EVENT(target_sequencer_start,
2562 ),
2563
2564 TP_fast_assign(
2565 - __entry->unpacked_lun = cmd->se_lun->unpacked_lun;
2566 + __entry->unpacked_lun = cmd->orig_fe_lun;
2567 __entry->opcode = cmd->t_task_cdb[0];
2568 __entry->data_length = cmd->data_length;
2569 __entry->task_attribute = cmd->sam_task_attr;
2570 @@ -182,7 +182,7 @@ TRACE_EVENT(target_cmd_complete,
2571 ),
2572
2573 TP_fast_assign(
2574 - __entry->unpacked_lun = cmd->se_lun->unpacked_lun;
2575 + __entry->unpacked_lun = cmd->orig_fe_lun;
2576 __entry->opcode = cmd->t_task_cdb[0];
2577 __entry->data_length = cmd->data_length;
2578 __entry->task_attribute = cmd->sam_task_attr;
2579 diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
2580 index 53db7cea373b..7830754c9b4a 100644
2581 --- a/include/uapi/drm/drm_mode.h
2582 +++ b/include/uapi/drm/drm_mode.h
2583 @@ -223,6 +223,8 @@ struct drm_mode_get_connector {
2584 __u32 connection;
2585 __u32 mm_width, mm_height; /**< HxW in millimeters */
2586 __u32 subpixel;
2587 +
2588 + __u32 pad;
2589 };
2590
2591 #define DRM_MODE_PROP_PENDING (1<<0)
2592 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
2593 index e91963302c0d..d22f5977a31b 100644
2594 --- a/kernel/cgroup.c
2595 +++ b/kernel/cgroup.c
2596 @@ -2054,7 +2054,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
2597
2598 /* @tsk either already exited or can't exit until the end */
2599 if (tsk->flags & PF_EXITING)
2600 - continue;
2601 + goto next;
2602
2603 /* as per above, nr_threads may decrease, but not increase. */
2604 BUG_ON(i >= group_size);
2605 @@ -2062,7 +2062,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
2606 ent.cgrp = task_cgroup_from_root(tsk, root);
2607 /* nothing to do if this task is already in the cgroup */
2608 if (ent.cgrp == cgrp)
2609 - continue;
2610 + goto next;
2611 /*
2612 * saying GFP_ATOMIC has no effect here because we did prealloc
2613 * earlier, but it's good form to communicate our expectations.
2614 @@ -2070,7 +2070,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
2615 retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
2616 BUG_ON(retval != 0);
2617 i++;
2618 -
2619 + next:
2620 if (!threadgroup)
2621 break;
2622 } while_each_thread(leader, tsk);
2623 diff --git a/kernel/mutex.c b/kernel/mutex.c
2624 index a52ee7bb830d..a2b80f162f39 100644
2625 --- a/kernel/mutex.c
2626 +++ b/kernel/mutex.c
2627 @@ -408,7 +408,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
2628 static __always_inline int __sched
2629 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
2630 struct lockdep_map *nest_lock, unsigned long ip,
2631 - struct ww_acquire_ctx *ww_ctx)
2632 + struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
2633 {
2634 struct task_struct *task = current;
2635 struct mutex_waiter waiter;
2636 @@ -448,7 +448,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
2637 struct task_struct *owner;
2638 struct mspin_node node;
2639
2640 - if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
2641 + if (use_ww_ctx && ww_ctx->acquired > 0) {
2642 struct ww_mutex *ww;
2643
2644 ww = container_of(lock, struct ww_mutex, base);
2645 @@ -478,7 +478,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
2646 if ((atomic_read(&lock->count) == 1) &&
2647 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
2648 lock_acquired(&lock->dep_map, ip);
2649 - if (!__builtin_constant_p(ww_ctx == NULL)) {
2650 + if (use_ww_ctx) {
2651 struct ww_mutex *ww;
2652 ww = container_of(lock, struct ww_mutex, base);
2653
2654 @@ -548,7 +548,7 @@ slowpath:
2655 goto err;
2656 }
2657
2658 - if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
2659 + if (use_ww_ctx && ww_ctx->acquired > 0) {
2660 ret = __mutex_lock_check_stamp(lock, ww_ctx);
2661 if (ret)
2662 goto err;
2663 @@ -568,7 +568,7 @@ done:
2664 mutex_remove_waiter(lock, &waiter, current_thread_info());
2665 mutex_set_owner(lock);
2666
2667 - if (!__builtin_constant_p(ww_ctx == NULL)) {
2668 + if (use_ww_ctx) {
2669 struct ww_mutex *ww = container_of(lock,
2670 struct ww_mutex,
2671 base);
2672 @@ -618,7 +618,7 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
2673 {
2674 might_sleep();
2675 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
2676 - subclass, NULL, _RET_IP_, NULL);
2677 + subclass, NULL, _RET_IP_, NULL, 0);
2678 }
2679
2680 EXPORT_SYMBOL_GPL(mutex_lock_nested);
2681 @@ -628,7 +628,7 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
2682 {
2683 might_sleep();
2684 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
2685 - 0, nest, _RET_IP_, NULL);
2686 + 0, nest, _RET_IP_, NULL, 0);
2687 }
2688
2689 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
2690 @@ -638,7 +638,7 @@ mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
2691 {
2692 might_sleep();
2693 return __mutex_lock_common(lock, TASK_KILLABLE,
2694 - subclass, NULL, _RET_IP_, NULL);
2695 + subclass, NULL, _RET_IP_, NULL, 0);
2696 }
2697 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
2698
2699 @@ -647,7 +647,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
2700 {
2701 might_sleep();
2702 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
2703 - subclass, NULL, _RET_IP_, NULL);
2704 + subclass, NULL, _RET_IP_, NULL, 0);
2705 }
2706
2707 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
2708 @@ -685,7 +685,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
2709
2710 might_sleep();
2711 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
2712 - 0, &ctx->dep_map, _RET_IP_, ctx);
2713 + 0, &ctx->dep_map, _RET_IP_, ctx, 1);
2714 if (!ret && ctx->acquired > 1)
2715 return ww_mutex_deadlock_injection(lock, ctx);
2716
2717 @@ -700,7 +700,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
2718
2719 might_sleep();
2720 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
2721 - 0, &ctx->dep_map, _RET_IP_, ctx);
2722 + 0, &ctx->dep_map, _RET_IP_, ctx, 1);
2723
2724 if (!ret && ctx->acquired > 1)
2725 return ww_mutex_deadlock_injection(lock, ctx);
2726 @@ -812,28 +812,28 @@ __mutex_lock_slowpath(atomic_t *lock_count)
2727 struct mutex *lock = container_of(lock_count, struct mutex, count);
2728
2729 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
2730 - NULL, _RET_IP_, NULL);
2731 + NULL, _RET_IP_, NULL, 0);
2732 }
2733
2734 static noinline int __sched
2735 __mutex_lock_killable_slowpath(struct mutex *lock)
2736 {
2737 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
2738 - NULL, _RET_IP_, NULL);
2739 + NULL, _RET_IP_, NULL, 0);
2740 }
2741
2742 static noinline int __sched
2743 __mutex_lock_interruptible_slowpath(struct mutex *lock)
2744 {
2745 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
2746 - NULL, _RET_IP_, NULL);
2747 + NULL, _RET_IP_, NULL, 0);
2748 }
2749
2750 static noinline int __sched
2751 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
2752 {
2753 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
2754 - NULL, _RET_IP_, ctx);
2755 + NULL, _RET_IP_, ctx, 1);
2756 }
2757
2758 static noinline int __sched
2759 @@ -841,7 +841,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
2760 struct ww_acquire_ctx *ctx)
2761 {
2762 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
2763 - NULL, _RET_IP_, ctx);
2764 + NULL, _RET_IP_, ctx, 1);
2765 }
2766
2767 #endif
2768 diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
2769 index 38959c866789..662c5798a685 100644
2770 --- a/kernel/time/clockevents.c
2771 +++ b/kernel/time/clockevents.c
2772 @@ -33,29 +33,64 @@ struct ce_unbind {
2773 int res;
2774 };
2775
2776 -/**
2777 - * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
2778 - * @latch: value to convert
2779 - * @evt: pointer to clock event device descriptor
2780 - *
2781 - * Math helper, returns latch value converted to nanoseconds (bound checked)
2782 - */
2783 -u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
2784 +static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
2785 + bool ismax)
2786 {
2787 u64 clc = (u64) latch << evt->shift;
2788 + u64 rnd;
2789
2790 if (unlikely(!evt->mult)) {
2791 evt->mult = 1;
2792 WARN_ON(1);
2793 }
2794 + rnd = (u64) evt->mult - 1;
2795 +
2796 + /*
2797 + * Upper bound sanity check. If the backwards conversion is
2798 + * not equal latch, we know that the above shift overflowed.
2799 + */
2800 + if ((clc >> evt->shift) != (u64)latch)
2801 + clc = ~0ULL;
2802 +
2803 + /*
2804 + * Scaled math oddities:
2805 + *
2806 + * For mult <= (1 << shift) we can safely add mult - 1 to
2807 + * prevent integer rounding loss. So the backwards conversion
2808 + * from nsec to device ticks will be correct.
2809 + *
2810 + * For mult > (1 << shift), i.e. device frequency is > 1GHz we
2811 + * need to be careful. Adding mult - 1 will result in a value
2812 + * which when converted back to device ticks can be larger
2813 + * than latch by up to (mult - 1) >> shift. For the min_delta
2814 + * calculation we still want to apply this in order to stay
2815 + * above the minimum device ticks limit. For the upper limit
2816 + * we would end up with a latch value larger than the upper
2817 + * limit of the device, so we omit the add to stay below the
2818 + * device upper boundary.
2819 + *
2820 + * Also omit the add if it would overflow the u64 boundary.
2821 + */
2822 + if ((~0ULL - clc > rnd) &&
2823 + (!ismax || evt->mult <= (1U << evt->shift)))
2824 + clc += rnd;
2825
2826 do_div(clc, evt->mult);
2827 - if (clc < 1000)
2828 - clc = 1000;
2829 - if (clc > KTIME_MAX)
2830 - clc = KTIME_MAX;
2831
2832 - return clc;
2833 + /* Deltas less than 1usec are pointless noise */
2834 + return clc > 1000 ? clc : 1000;
2835 +}
2836 +
2837 +/**
2838 + * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
2839 + * @latch: value to convert
2840 + * @evt: pointer to clock event device descriptor
2841 + *
2842 + * Math helper, returns latch value converted to nanoseconds (bound checked)
2843 + */
2844 +u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
2845 +{
2846 + return cev_delta2ns(latch, evt, false);
2847 }
2848 EXPORT_SYMBOL_GPL(clockevent_delta2ns);
2849
2850 @@ -380,8 +415,8 @@ void clockevents_config(struct clock_event_device *dev, u32 freq)
2851 sec = 600;
2852
2853 clockevents_calc_mult_shift(dev, freq, sec);
2854 - dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev);
2855 - dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev);
2856 + dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
2857 + dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
2858 }
2859
2860 /**
2861 diff --git a/lib/scatterlist.c b/lib/scatterlist.c
2862 index a685c8a79578..d16fa295ae1d 100644
2863 --- a/lib/scatterlist.c
2864 +++ b/lib/scatterlist.c
2865 @@ -577,7 +577,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
2866 miter->__offset += miter->consumed;
2867 miter->__remaining -= miter->consumed;
2868
2869 - if (miter->__flags & SG_MITER_TO_SG)
2870 + if ((miter->__flags & SG_MITER_TO_SG) &&
2871 + !PageSlab(miter->page))
2872 flush_kernel_dcache_page(miter->page);
2873
2874 if (miter->__flags & SG_MITER_ATOMIC) {
2875 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
2876 index 70861a1fdd64..12acb0ba7991 100644
2877 --- a/mm/huge_memory.c
2878 +++ b/mm/huge_memory.c
2879 @@ -1290,64 +1290,90 @@ out:
2880 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
2881 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
2882 {
2883 + struct anon_vma *anon_vma = NULL;
2884 struct page *page;
2885 unsigned long haddr = addr & HPAGE_PMD_MASK;
2886 + int page_nid = -1, this_nid = numa_node_id();
2887 int target_nid;
2888 - int current_nid = -1;
2889 - bool migrated;
2890 + bool page_locked;
2891 + bool migrated = false;
2892
2893 spin_lock(&mm->page_table_lock);
2894 if (unlikely(!pmd_same(pmd, *pmdp)))
2895 goto out_unlock;
2896
2897 page = pmd_page(pmd);
2898 - get_page(page);
2899 - current_nid = page_to_nid(page);
2900 + page_nid = page_to_nid(page);
2901 count_vm_numa_event(NUMA_HINT_FAULTS);
2902 - if (current_nid == numa_node_id())
2903 + if (page_nid == this_nid)
2904 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
2905
2906 + /*
2907 + * Acquire the page lock to serialise THP migrations but avoid dropping
2908 + * page_table_lock if at all possible
2909 + */
2910 + page_locked = trylock_page(page);
2911 target_nid = mpol_misplaced(page, vma, haddr);
2912 if (target_nid == -1) {
2913 - put_page(page);
2914 - goto clear_pmdnuma;
2915 + /* If the page was locked, there are no parallel migrations */
2916 + if (page_locked)
2917 + goto clear_pmdnuma;
2918 +
2919 + /*
2920 + * Otherwise wait for potential migrations and retry. We do
2921 + * relock and check_same as the page may no longer be mapped.
2922 + * As the fault is being retried, do not account for it.
2923 + */
2924 + spin_unlock(&mm->page_table_lock);
2925 + wait_on_page_locked(page);
2926 + page_nid = -1;
2927 + goto out;
2928 }
2929
2930 - /* Acquire the page lock to serialise THP migrations */
2931 + /* Page is misplaced, serialise migrations and parallel THP splits */
2932 + get_page(page);
2933 spin_unlock(&mm->page_table_lock);
2934 - lock_page(page);
2935 + if (!page_locked)
2936 + lock_page(page);
2937 + anon_vma = page_lock_anon_vma_read(page);
2938
2939 /* Confirm the PTE did not while locked */
2940 spin_lock(&mm->page_table_lock);
2941 if (unlikely(!pmd_same(pmd, *pmdp))) {
2942 unlock_page(page);
2943 put_page(page);
2944 + page_nid = -1;
2945 goto out_unlock;
2946 }
2947 - spin_unlock(&mm->page_table_lock);
2948
2949 - /* Migrate the THP to the requested node */
2950 + /*
2951 + * Migrate the THP to the requested node, returns with page unlocked
2952 + * and pmd_numa cleared.
2953 + */
2954 + spin_unlock(&mm->page_table_lock);
2955 migrated = migrate_misplaced_transhuge_page(mm, vma,
2956 pmdp, pmd, addr, page, target_nid);
2957 - if (!migrated)
2958 - goto check_same;
2959 -
2960 - task_numa_fault(target_nid, HPAGE_PMD_NR, true);
2961 - return 0;
2962 + if (migrated)
2963 + page_nid = target_nid;
2964
2965 -check_same:
2966 - spin_lock(&mm->page_table_lock);
2967 - if (unlikely(!pmd_same(pmd, *pmdp)))
2968 - goto out_unlock;
2969 + goto out;
2970 clear_pmdnuma:
2971 + BUG_ON(!PageLocked(page));
2972 pmd = pmd_mknonnuma(pmd);
2973 set_pmd_at(mm, haddr, pmdp, pmd);
2974 VM_BUG_ON(pmd_numa(*pmdp));
2975 update_mmu_cache_pmd(vma, addr, pmdp);
2976 + unlock_page(page);
2977 out_unlock:
2978 spin_unlock(&mm->page_table_lock);
2979 - if (current_nid != -1)
2980 - task_numa_fault(current_nid, HPAGE_PMD_NR, false);
2981 +
2982 +out:
2983 + if (anon_vma)
2984 + page_unlock_anon_vma_read(anon_vma);
2985 +
2986 + if (page_nid != -1)
2987 + task_numa_fault(page_nid, HPAGE_PMD_NR, migrated);
2988 +
2989 return 0;
2990 }
2991
2992 diff --git a/mm/memory.c b/mm/memory.c
2993 index 440986e57218..168a090acd02 100644
2994 --- a/mm/memory.c
2995 +++ b/mm/memory.c
2996 @@ -3532,12 +3532,12 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2997 }
2998
2999 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
3000 - unsigned long addr, int current_nid)
3001 + unsigned long addr, int page_nid)
3002 {
3003 get_page(page);
3004
3005 count_vm_numa_event(NUMA_HINT_FAULTS);
3006 - if (current_nid == numa_node_id())
3007 + if (page_nid == numa_node_id())
3008 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
3009
3010 return mpol_misplaced(page, vma, addr);
3011 @@ -3548,7 +3548,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3012 {
3013 struct page *page = NULL;
3014 spinlock_t *ptl;
3015 - int current_nid = -1;
3016 + int page_nid = -1;
3017 int target_nid;
3018 bool migrated = false;
3019
3020 @@ -3578,15 +3578,10 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3021 return 0;
3022 }
3023
3024 - current_nid = page_to_nid(page);
3025 - target_nid = numa_migrate_prep(page, vma, addr, current_nid);
3026 + page_nid = page_to_nid(page);
3027 + target_nid = numa_migrate_prep(page, vma, addr, page_nid);
3028 pte_unmap_unlock(ptep, ptl);
3029 if (target_nid == -1) {
3030 - /*
3031 - * Account for the fault against the current node if it not
3032 - * being replaced regardless of where the page is located.
3033 - */
3034 - current_nid = numa_node_id();
3035 put_page(page);
3036 goto out;
3037 }
3038 @@ -3594,11 +3589,11 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3039 /* Migrate to the requested node */
3040 migrated = migrate_misplaced_page(page, target_nid);
3041 if (migrated)
3042 - current_nid = target_nid;
3043 + page_nid = target_nid;
3044
3045 out:
3046 - if (current_nid != -1)
3047 - task_numa_fault(current_nid, 1, migrated);
3048 + if (page_nid != -1)
3049 + task_numa_fault(page_nid, 1, migrated);
3050 return 0;
3051 }
3052
3053 @@ -3613,7 +3608,6 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3054 unsigned long offset;
3055 spinlock_t *ptl;
3056 bool numa = false;
3057 - int local_nid = numa_node_id();
3058
3059 spin_lock(&mm->page_table_lock);
3060 pmd = *pmdp;
3061 @@ -3636,9 +3630,10 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3062 for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
3063 pte_t pteval = *pte;
3064 struct page *page;
3065 - int curr_nid = local_nid;
3066 + int page_nid = -1;
3067 int target_nid;
3068 - bool migrated;
3069 + bool migrated = false;
3070 +
3071 if (!pte_present(pteval))
3072 continue;
3073 if (!pte_numa(pteval))
3074 @@ -3660,25 +3655,19 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3075 if (unlikely(page_mapcount(page) != 1))
3076 continue;
3077
3078 - /*
3079 - * Note that the NUMA fault is later accounted to either
3080 - * the node that is currently running or where the page is
3081 - * migrated to.
3082 - */
3083 - curr_nid = local_nid;
3084 - target_nid = numa_migrate_prep(page, vma, addr,
3085 - page_to_nid(page));
3086 - if (target_nid == -1) {
3087 + page_nid = page_to_nid(page);
3088 + target_nid = numa_migrate_prep(page, vma, addr, page_nid);
3089 + pte_unmap_unlock(pte, ptl);
3090 + if (target_nid != -1) {
3091 + migrated = migrate_misplaced_page(page, target_nid);
3092 + if (migrated)
3093 + page_nid = target_nid;
3094 + } else {
3095 put_page(page);
3096 - continue;
3097 }
3098
3099 - /* Migrate to the requested node */
3100 - pte_unmap_unlock(pte, ptl);
3101 - migrated = migrate_misplaced_page(page, target_nid);
3102 - if (migrated)
3103 - curr_nid = target_nid;
3104 - task_numa_fault(curr_nid, 1, migrated);
3105 + if (page_nid != -1)
3106 + task_numa_fault(page_nid, 1, migrated);
3107
3108 pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
3109 }
3110 @@ -4081,6 +4070,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
3111
3112 return len;
3113 }
3114 +EXPORT_SYMBOL_GPL(generic_access_phys);
3115 #endif
3116
3117 /*
3118 diff --git a/mm/migrate.c b/mm/migrate.c
3119 index 81af4e678101..d22f6f0a62e0 100644
3120 --- a/mm/migrate.c
3121 +++ b/mm/migrate.c
3122 @@ -1712,12 +1712,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
3123 unlock_page(new_page);
3124 put_page(new_page); /* Free it */
3125
3126 - unlock_page(page);
3127 + /* Retake the callers reference and putback on LRU */
3128 + get_page(page);
3129 putback_lru_page(page);
3130 -
3131 - count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
3132 - isolated = 0;
3133 - goto out;
3134 + mod_zone_page_state(page_zone(page),
3135 + NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
3136 + goto out_fail;
3137 }
3138
3139 /*
3140 @@ -1734,9 +1734,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
3141 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
3142 entry = pmd_mkhuge(entry);
3143
3144 - page_add_new_anon_rmap(new_page, vma, haddr);
3145 -
3146 + pmdp_clear_flush(vma, haddr, pmd);
3147 set_pmd_at(mm, haddr, pmd, entry);
3148 + page_add_new_anon_rmap(new_page, vma, haddr);
3149 update_mmu_cache_pmd(vma, address, &entry);
3150 page_remove_rmap(page);
3151 /*
3152 @@ -1755,7 +1755,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
3153 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
3154 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
3155
3156 -out:
3157 mod_zone_page_state(page_zone(page),
3158 NR_ISOLATED_ANON + page_lru,
3159 -HPAGE_PMD_NR);
3160 @@ -1764,6 +1763,10 @@ out:
3161 out_fail:
3162 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
3163 out_dropref:
3164 + entry = pmd_mknonnuma(entry);
3165 + set_pmd_at(mm, haddr, pmd, entry);
3166 + update_mmu_cache_pmd(vma, address, &entry);
3167 +
3168 unlock_page(page);
3169 put_page(page);
3170 return 0;
3171 diff --git a/mm/mprotect.c b/mm/mprotect.c
3172 index a3af058f68e4..412ba2b7326a 100644
3173 --- a/mm/mprotect.c
3174 +++ b/mm/mprotect.c
3175 @@ -148,7 +148,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
3176 split_huge_page_pmd(vma, addr, pmd);
3177 else if (change_huge_pmd(vma, pmd, addr, newprot,
3178 prot_numa)) {
3179 - pages += HPAGE_PMD_NR;
3180 + pages++;
3181 continue;
3182 }
3183 /* fall through */
3184 diff --git a/mm/pagewalk.c b/mm/pagewalk.c
3185 index 5da2cbcfdbb5..2beeabf502c5 100644
3186 --- a/mm/pagewalk.c
3187 +++ b/mm/pagewalk.c
3188 @@ -242,7 +242,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
3189 if (err)
3190 break;
3191 pgd++;
3192 - } while (addr = next, addr != end);
3193 + } while (addr = next, addr < end);
3194
3195 return err;
3196 }
3197 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
3198 index 43dd7525bfcb..a4b2154d47d8 100644
3199 --- a/net/mac80211/cfg.c
3200 +++ b/net/mac80211/cfg.c
3201 @@ -3334,7 +3334,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
3202 return -EINVAL;
3203 }
3204 band = chanctx_conf->def.chan->band;
3205 - sta = sta_info_get(sdata, peer);
3206 + sta = sta_info_get_bss(sdata, peer);
3207 if (sta) {
3208 qos = test_sta_flag(sta, WLAN_STA_WME);
3209 } else {
3210 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
3211 index 8412a303993a..8c0f8e69f244 100644
3212 --- a/net/mac80211/ieee80211_i.h
3213 +++ b/net/mac80211/ieee80211_i.h
3214 @@ -858,6 +858,8 @@ struct tpt_led_trigger {
3215 * that the scan completed.
3216 * @SCAN_ABORTED: Set for our scan work function when the driver reported
3217 * a scan complete for an aborted scan.
3218 + * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
3219 + * cancelled.
3220 */
3221 enum {
3222 SCAN_SW_SCANNING,
3223 @@ -865,6 +867,7 @@ enum {
3224 SCAN_ONCHANNEL_SCANNING,
3225 SCAN_COMPLETED,
3226 SCAN_ABORTED,
3227 + SCAN_HW_CANCELLED,
3228 };
3229
3230 /**
3231 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
3232 index 2c5a79bd3777..2b88d77cf9f0 100644
3233 --- a/net/mac80211/rx.c
3234 +++ b/net/mac80211/rx.c
3235 @@ -3014,6 +3014,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
3236 case NL80211_IFTYPE_ADHOC:
3237 if (!bssid)
3238 return 0;
3239 + if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
3240 + ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
3241 + return 0;
3242 if (ieee80211_is_beacon(hdr->frame_control)) {
3243 return 1;
3244 } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
3245 diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
3246 index 1b122a79b0d8..7aafa54eaef1 100644
3247 --- a/net/mac80211/scan.c
3248 +++ b/net/mac80211/scan.c
3249 @@ -211,6 +211,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
3250 enum ieee80211_band band;
3251 int i, ielen, n_chans;
3252
3253 + if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
3254 + return false;
3255 +
3256 do {
3257 if (local->hw_scan_band == IEEE80211_NUM_BANDS)
3258 return false;
3259 @@ -887,7 +890,23 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
3260 if (!local->scan_req)
3261 goto out;
3262
3263 + /*
3264 + * We have a scan running and the driver already reported completion,
3265 + * but the worker hasn't run yet or is stuck on the mutex - mark it as
3266 + * cancelled.
3267 + */
3268 + if (test_bit(SCAN_HW_SCANNING, &local->scanning) &&
3269 + test_bit(SCAN_COMPLETED, &local->scanning)) {
3270 + set_bit(SCAN_HW_CANCELLED, &local->scanning);
3271 + goto out;
3272 + }
3273 +
3274 if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
3275 + /*
3276 + * Make sure that __ieee80211_scan_completed doesn't trigger a
3277 + * scan on another band.
3278 + */
3279 + set_bit(SCAN_HW_CANCELLED, &local->scanning);
3280 if (local->ops->cancel_hw_scan)
3281 drv_cancel_hw_scan(local,
3282 rcu_dereference_protected(local->scan_sdata,
3283 diff --git a/net/mac80211/status.c b/net/mac80211/status.c
3284 index 43439203f4e4..9e78206bd9bb 100644
3285 --- a/net/mac80211/status.c
3286 +++ b/net/mac80211/status.c
3287 @@ -180,6 +180,9 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
3288 struct ieee80211_local *local = sta->local;
3289 struct ieee80211_sub_if_data *sdata = sta->sdata;
3290
3291 + if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
3292 + sta->last_rx = jiffies;
3293 +
3294 if (ieee80211_is_data_qos(mgmt->frame_control)) {
3295 struct ieee80211_hdr *hdr = (void *) skb->data;
3296 u8 *qc = ieee80211_get_qos_ctl(hdr);
3297 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
3298 index 4105d0ca963e..4438aed3cb99 100644
3299 --- a/net/mac80211/tx.c
3300 +++ b/net/mac80211/tx.c
3301 @@ -1101,7 +1101,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
3302 tx->sta = rcu_dereference(sdata->u.vlan.sta);
3303 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
3304 return TX_DROP;
3305 - } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
3306 + } else if (info->flags & (IEEE80211_TX_CTL_INJECTED |
3307 + IEEE80211_TX_INTFL_NL80211_FRAME_TX) ||
3308 tx->sdata->control_port_protocol == tx->skb->protocol) {
3309 tx->sta = sta_info_get_bss(sdata, hdr->addr1);
3310 }
3311 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
3312 index 22654452a561..31e78ae59cbf 100644
3313 --- a/net/mac80211/util.c
3314 +++ b/net/mac80211/util.c
3315 @@ -2155,6 +2155,10 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
3316 }
3317
3318 rate = cfg80211_calculate_bitrate(&ri);
3319 + if (WARN_ONCE(!rate,
3320 + "Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n",
3321 + status->flag, status->rate_idx, status->vht_nss))
3322 + return 0;
3323
3324 /* rewind from end of MPDU */
3325 if (status->flag & RX_FLAG_MACTIME_END)
3326 diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
3327 index 39bff7d36768..403fe29c024d 100644
3328 --- a/net/wireless/ibss.c
3329 +++ b/net/wireless/ibss.c
3330 @@ -263,6 +263,8 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
3331 if (chan->flags & IEEE80211_CHAN_DISABLED)
3332 continue;
3333 wdev->wext.ibss.chandef.chan = chan;
3334 + wdev->wext.ibss.chandef.center_freq1 =
3335 + chan->center_freq;
3336 break;
3337 }
3338
3339 @@ -347,6 +349,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
3340 if (chan) {
3341 wdev->wext.ibss.chandef.chan = chan;
3342 wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
3343 + wdev->wext.ibss.chandef.center_freq1 = freq;
3344 wdev->wext.ibss.channel_fixed = true;
3345 } else {
3346 /* cfg80211_ibss_wext_join will pick one if needed */
3347 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
3348 index 5f6e982cdcf4..7956f41798c3 100644
3349 --- a/net/wireless/nl80211.c
3350 +++ b/net/wireless/nl80211.c
3351 @@ -2379,7 +2379,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
3352 change = true;
3353 }
3354
3355 - if (flags && (*flags & NL80211_MNTR_FLAG_ACTIVE) &&
3356 + if (flags && (*flags & MONITOR_FLAG_ACTIVE) &&
3357 !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
3358 return -EOPNOTSUPP;
3359
3360 @@ -2441,7 +2441,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
3361 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
3362 &flags);
3363
3364 - if (!err && (flags & NL80211_MNTR_FLAG_ACTIVE) &&
3365 + if (!err && (flags & MONITOR_FLAG_ACTIVE) &&
3366 !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
3367 return -EOPNOTSUPP;
3368
3369 diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
3370 index 487ac6f37ca2..9a11f9f799f4 100644
3371 --- a/scripts/kallsyms.c
3372 +++ b/scripts/kallsyms.c
3373 @@ -55,6 +55,7 @@ static struct sym_entry *table;
3374 static unsigned int table_size, table_cnt;
3375 static int all_symbols = 0;
3376 static char symbol_prefix_char = '\0';
3377 +static unsigned long long kernel_start_addr = 0;
3378
3379 int token_profit[0x10000];
3380
3381 @@ -65,7 +66,10 @@ unsigned char best_table_len[256];
3382
3383 static void usage(void)
3384 {
3385 - fprintf(stderr, "Usage: kallsyms [--all-symbols] [--symbol-prefix=<prefix char>] < in.map > out.S\n");
3386 + fprintf(stderr, "Usage: kallsyms [--all-symbols] "
3387 + "[--symbol-prefix=<prefix char>] "
3388 + "[--page-offset=<CONFIG_PAGE_OFFSET>] "
3389 + "< in.map > out.S\n");
3390 exit(1);
3391 }
3392
3393 @@ -194,6 +198,9 @@ static int symbol_valid(struct sym_entry *s)
3394 int i;
3395 int offset = 1;
3396
3397 + if (s->addr < kernel_start_addr)
3398 + return 0;
3399 +
3400 /* skip prefix char */
3401 if (symbol_prefix_char && *(s->sym + 1) == symbol_prefix_char)
3402 offset++;
3403 @@ -646,6 +653,9 @@ int main(int argc, char **argv)
3404 if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\''))
3405 p++;
3406 symbol_prefix_char = *p;
3407 + } else if (strncmp(argv[i], "--page-offset=", 14) == 0) {
3408 + const char *p = &argv[i][14];
3409 + kernel_start_addr = strtoull(p, NULL, 16);
3410 } else
3411 usage();
3412 }
3413 diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
3414 index 014994936b1c..32b10f53d0b4 100644
3415 --- a/scripts/link-vmlinux.sh
3416 +++ b/scripts/link-vmlinux.sh
3417 @@ -82,6 +82,8 @@ kallsyms()
3418 kallsymopt="${kallsymopt} --all-symbols"
3419 fi
3420
3421 + kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
3422 +
3423 local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
3424 ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
3425
3426 diff --git a/sound/core/pcm.c b/sound/core/pcm.c
3427 index 17f45e8aa89c..e1e9e0c999fe 100644
3428 --- a/sound/core/pcm.c
3429 +++ b/sound/core/pcm.c
3430 @@ -49,6 +49,8 @@ static struct snd_pcm *snd_pcm_get(struct snd_card *card, int device)
3431 struct snd_pcm *pcm;
3432
3433 list_for_each_entry(pcm, &snd_pcm_devices, list) {
3434 + if (pcm->internal)
3435 + continue;
3436 if (pcm->card == card && pcm->device == device)
3437 return pcm;
3438 }
3439 @@ -60,6 +62,8 @@ static int snd_pcm_next(struct snd_card *card, int device)
3440 struct snd_pcm *pcm;
3441
3442 list_for_each_entry(pcm, &snd_pcm_devices, list) {
3443 + if (pcm->internal)
3444 + continue;
3445 if (pcm->card == card && pcm->device > device)
3446 return pcm->device;
3447 else if (pcm->card->number > card->number)
3448 diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
3449 index 8a005f0e5ca4..7c9e7dccebed 100644
3450 --- a/sound/pci/hda/hda_codec.c
3451 +++ b/sound/pci/hda/hda_codec.c
3452 @@ -4804,8 +4804,8 @@ static void hda_power_work(struct work_struct *work)
3453 spin_unlock(&codec->power_lock);
3454
3455 state = hda_call_codec_suspend(codec, true);
3456 - codec->pm_down_notified = 0;
3457 - if (!bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK)) {
3458 + if (!codec->pm_down_notified &&
3459 + !bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK)) {
3460 codec->pm_down_notified = 1;
3461 hda_call_pm_notify(bus, false);
3462 }
3463 diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
3464 index adabdeb7b15d..9e44e4a2df4a 100644
3465 --- a/sound/pci/hda/hda_generic.c
3466 +++ b/sound/pci/hda/hda_generic.c
3467 @@ -4428,9 +4428,11 @@ int snd_hda_gen_build_controls(struct hda_codec *codec)
3468 true, &spec->vmaster_mute.sw_kctl);
3469 if (err < 0)
3470 return err;
3471 - if (spec->vmaster_mute.hook)
3472 + if (spec->vmaster_mute.hook) {
3473 snd_hda_add_vmaster_hook(codec, &spec->vmaster_mute,
3474 spec->vmaster_mute_enum);
3475 + snd_hda_sync_vmaster_hook(&spec->vmaster_mute);
3476 + }
3477 }
3478
3479 free_kctls(spec); /* no longer needed */
3480 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3481 index 1383f38997c1..0ce3ed68b835 100644
3482 --- a/sound/pci/hda/patch_realtek.c
3483 +++ b/sound/pci/hda/patch_realtek.c
3484 @@ -4382,6 +4382,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
3485 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
3486 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
3487 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
3488 + SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_ASUS_MODE4),
3489 SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
3490 SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
3491 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
3492 diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
3493 index 2d9e099415a5..8f6069017105 100644
3494 --- a/sound/soc/codecs/wm_hubs.c
3495 +++ b/sound/soc/codecs/wm_hubs.c
3496 @@ -530,6 +530,7 @@ static int hp_supply_event(struct snd_soc_dapm_widget *w,
3497 hubs->hp_startup_mode);
3498 break;
3499 }
3500 + break;
3501
3502 case SND_SOC_DAPM_PRE_PMD:
3503 snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
3504 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
3505 index 4375c9f2b791..8e90cbed07d8 100644
3506 --- a/sound/soc/soc-dapm.c
3507 +++ b/sound/soc/soc-dapm.c
3508 @@ -1810,7 +1810,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
3509 w->active ? "active" : "inactive");
3510
3511 list_for_each_entry(p, &w->sources, list_sink) {
3512 - if (p->connected && !p->connected(w, p->sink))
3513 + if (p->connected && !p->connected(w, p->source))
3514 continue;
3515
3516 if (p->connect)