Magellan Linux

Contents of /trunk/kernel-alx/patches-3.10/0118-3.10.19-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2346 - (show annotations) (download)
Mon Dec 16 10:04:53 2013 UTC (10 years, 4 months ago) by niro
File size: 85994 byte(s)
-linux-3.10.19
1 diff --git a/Makefile b/Makefile
2 index 5fb14e503fe3..83a02f5b2c00 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 10
8 -SUBLEVEL = 18
9 +SUBLEVEL = 19
10 EXTRAVERSION =
11 NAME = TOSSUG Baby Fish
12
13 diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
14 index 689ffd86d5e9..331a0846628e 100644
15 --- a/arch/arc/mm/fault.c
16 +++ b/arch/arc/mm/fault.c
17 @@ -16,7 +16,7 @@
18 #include <linux/kdebug.h>
19 #include <asm/pgalloc.h>
20
21 -static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
22 +static int handle_vmalloc_fault(unsigned long address)
23 {
24 /*
25 * Synchronize this task's top level page-table
26 @@ -26,7 +26,7 @@ static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
27 pud_t *pud, *pud_k;
28 pmd_t *pmd, *pmd_k;
29
30 - pgd = pgd_offset_fast(mm, address);
31 + pgd = pgd_offset_fast(current->active_mm, address);
32 pgd_k = pgd_offset_k(address);
33
34 if (!pgd_present(*pgd_k))
35 @@ -72,7 +72,7 @@ void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
36 * nothing more.
37 */
38 if (address >= VMALLOC_START && address <= VMALLOC_END) {
39 - ret = handle_vmalloc_fault(mm, address);
40 + ret = handle_vmalloc_fault(address);
41 if (unlikely(ret))
42 goto bad_area_nosemaphore;
43 else
44 diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
45 index 37aabd772fbb..d2d58258aea6 100644
46 --- a/arch/parisc/kernel/head.S
47 +++ b/arch/parisc/kernel/head.S
48 @@ -195,6 +195,8 @@ common_stext:
49 ldw MEM_PDC_HI(%r0),%r6
50 depd %r6, 31, 32, %r3 /* move to upper word */
51
52 + mfctl %cr30,%r6 /* PCX-W2 firmware bug */
53 +
54 ldo PDC_PSW(%r0),%arg0 /* 21 */
55 ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
56 ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
57 @@ -203,6 +205,8 @@ common_stext:
58 copy %r0,%arg3
59
60 stext_pdc_ret:
61 + mtctl %r6,%cr30 /* restore task thread info */
62 +
63 /* restore rfi target address*/
64 ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
65 tophys_r1 %r10
66 diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
67 index 829df49dee99..41ebbfebb333 100644
68 --- a/arch/um/kernel/exitcode.c
69 +++ b/arch/um/kernel/exitcode.c
70 @@ -40,9 +40,11 @@ static ssize_t exitcode_proc_write(struct file *file,
71 const char __user *buffer, size_t count, loff_t *pos)
72 {
73 char *end, buf[sizeof("nnnnn\0")];
74 + size_t size;
75 int tmp;
76
77 - if (copy_from_user(buf, buffer, count))
78 + size = min(count, sizeof(buf));
79 + if (copy_from_user(buf, buffer, size))
80 return -EFAULT;
81
82 tmp = simple_strtol(buf, &end, 0);
83 diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
84 index 794f6eb54cd3..b32dbb411a9a 100644
85 --- a/arch/x86/kernel/apic/x2apic_uv_x.c
86 +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
87 @@ -98,7 +98,7 @@ static int __init early_get_pnodeid(void)
88 break;
89 case UV3_HUB_PART_NUMBER:
90 case UV3_HUB_PART_NUMBER_X:
91 - uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1;
92 + uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
93 break;
94 }
95
96 diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
97 index 718eca1850bd..98b67d5f1514 100644
98 --- a/arch/xtensa/kernel/signal.c
99 +++ b/arch/xtensa/kernel/signal.c
100 @@ -341,7 +341,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
101
102 sp = regs->areg[1];
103
104 - if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) {
105 + if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) {
106 sp = current->sas_ss_sp + current->sas_ss_size;
107 }
108
109 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
110 index c69fcce505c0..370462fa8e01 100644
111 --- a/drivers/ata/libata-eh.c
112 +++ b/drivers/ata/libata-eh.c
113 @@ -1322,14 +1322,14 @@ void ata_eh_qc_complete(struct ata_queued_cmd *qc)
114 * should be retried. To be used from EH.
115 *
116 * SCSI midlayer limits the number of retries to scmd->allowed.
117 - * scmd->retries is decremented for commands which get retried
118 + * scmd->allowed is incremented for commands which get retried
119 * due to unrelated failures (qc->err_mask is zero).
120 */
121 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
122 {
123 struct scsi_cmnd *scmd = qc->scsicmd;
124 - if (!qc->err_mask && scmd->retries)
125 - scmd->retries--;
126 + if (!qc->err_mask)
127 + scmd->allowed++;
128 __ata_eh_qc_complete(qc);
129 }
130
131 diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
132 index 67ccf4aa7277..f5e4c21b301f 100644
133 --- a/drivers/clk/versatile/clk-icst.c
134 +++ b/drivers/clk/versatile/clk-icst.c
135 @@ -107,7 +107,7 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
136
137 vco = icst_hz_to_vco(icst->params, rate);
138 icst->rate = icst_hz(icst->params, vco);
139 - vco_set(icst->vcoreg, icst->lockreg, vco);
140 + vco_set(icst->lockreg, icst->vcoreg, vco);
141 return 0;
142 }
143
144 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
145 index 6d6a0b48eb75..9520e3b90bde 100644
146 --- a/drivers/cpufreq/intel_pstate.c
147 +++ b/drivers/cpufreq/intel_pstate.c
148 @@ -629,8 +629,8 @@ static int __cpuinit intel_pstate_cpu_exit(struct cpufreq_policy *policy)
149
150 static int __cpuinit intel_pstate_cpu_init(struct cpufreq_policy *policy)
151 {
152 - int rc, min_pstate, max_pstate;
153 struct cpudata *cpu;
154 + int rc;
155
156 rc = intel_pstate_init_cpu(policy->cpu);
157 if (rc)
158 @@ -644,9 +644,8 @@ static int __cpuinit intel_pstate_cpu_init(struct cpufreq_policy *policy)
159 else
160 policy->policy = CPUFREQ_POLICY_POWERSAVE;
161
162 - intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
163 - policy->min = min_pstate * 100000;
164 - policy->max = max_pstate * 100000;
165 + policy->min = cpu->pstate.min_pstate * 100000;
166 + policy->max = cpu->pstate.turbo_pstate * 100000;
167
168 /* cpuinfo and default policy values */
169 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
170 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
171 index 9cc247f55502..2ab782cb38a2 100644
172 --- a/drivers/gpu/drm/drm_drv.c
173 +++ b/drivers/gpu/drm/drm_drv.c
174 @@ -406,9 +406,16 @@ long drm_ioctl(struct file *filp,
175 cmd = ioctl->cmd_drv;
176 }
177 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
178 + u32 drv_size;
179 +
180 ioctl = &drm_ioctls[nr];
181 - cmd = ioctl->cmd;
182 +
183 + drv_size = _IOC_SIZE(ioctl->cmd);
184 usize = asize = _IOC_SIZE(cmd);
185 + if (drv_size > asize)
186 + asize = drv_size;
187 +
188 + cmd = ioctl->cmd;
189 } else
190 goto err_i1;
191
192 diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
193 index c272d8832605..4c81e9faa635 100644
194 --- a/drivers/gpu/drm/radeon/atombios_encoders.c
195 +++ b/drivers/gpu/drm/radeon/atombios_encoders.c
196 @@ -1641,7 +1641,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
197 * does the same thing and more.
198 */
199 if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
200 - (rdev->family != CHIP_RS880))
201 + (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
202 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
203 }
204 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
205 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
206 index 07dfd823cc30..6c44c69a5ba4 100644
207 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
208 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
209 @@ -740,9 +740,17 @@ static void vmw_postclose(struct drm_device *dev,
210 struct vmw_fpriv *vmw_fp;
211
212 vmw_fp = vmw_fpriv(file_priv);
213 - ttm_object_file_release(&vmw_fp->tfile);
214 - if (vmw_fp->locked_master)
215 +
216 + if (vmw_fp->locked_master) {
217 + struct vmw_master *vmaster =
218 + vmw_master(vmw_fp->locked_master);
219 +
220 + ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
221 + ttm_vt_unlock(&vmaster->lock);
222 drm_master_put(&vmw_fp->locked_master);
223 + }
224 +
225 + ttm_object_file_release(&vmw_fp->tfile);
226 kfree(vmw_fp);
227 }
228
229 @@ -942,14 +950,13 @@ static void vmw_master_drop(struct drm_device *dev,
230
231 vmw_fp->locked_master = drm_master_get(file_priv->master);
232 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
233 - vmw_execbuf_release_pinned_bo(dev_priv);
234 -
235 if (unlikely((ret != 0))) {
236 DRM_ERROR("Unable to lock TTM at VT switch.\n");
237 drm_master_put(&vmw_fp->locked_master);
238 }
239
240 - ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
241 + ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
242 + vmw_execbuf_release_pinned_bo(dev_priv);
243
244 if (!dev_priv->enable_fb) {
245 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
246 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
247 index bc784254e78e..407d7f9fe8a8 100644
248 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
249 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
250 @@ -970,7 +970,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
251 if (new_backup)
252 res->backup_offset = new_backup_offset;
253
254 - if (!res->func->may_evict)
255 + if (!res->func->may_evict || res->id == -1)
256 return;
257
258 write_lock(&dev_priv->resource_lock);
259 diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
260 index cbcf8b301edc..a30a0f8a41c0 100644
261 --- a/drivers/md/bcache/request.c
262 +++ b/drivers/md/bcache/request.c
263 @@ -1059,7 +1059,7 @@ static void request_write(struct cached_dev *dc, struct search *s)
264
265 if (bio->bi_rw & REQ_FLUSH) {
266 /* Also need to send a flush to the backing device */
267 - struct bio *flush = bio_alloc_bioset(0, GFP_NOIO,
268 + struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
269 dc->disk.bio_split);
270
271 flush->bi_rw = WRITE_FLUSH;
272 diff --git a/drivers/md/md.c b/drivers/md/md.c
273 index 51f0345a4ba4..d78f1fffab01 100644
274 --- a/drivers/md/md.c
275 +++ b/drivers/md/md.c
276 @@ -8072,6 +8072,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
277 u64 *p;
278 int lo, hi;
279 int rv = 1;
280 + unsigned long flags;
281
282 if (bb->shift < 0)
283 /* badblocks are disabled */
284 @@ -8086,7 +8087,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
285 sectors = next - s;
286 }
287
288 - write_seqlock_irq(&bb->lock);
289 + write_seqlock_irqsave(&bb->lock, flags);
290
291 p = bb->page;
292 lo = 0;
293 @@ -8202,7 +8203,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
294 bb->changed = 1;
295 if (!acknowledged)
296 bb->unacked_exist = 1;
297 - write_sequnlock_irq(&bb->lock);
298 + write_sequnlock_irqrestore(&bb->lock, flags);
299
300 return rv;
301 }
302 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
303 index 6f4824426e86..afaa5d425e9a 100644
304 --- a/drivers/md/raid1.c
305 +++ b/drivers/md/raid1.c
306 @@ -1479,6 +1479,7 @@ static int raid1_spare_active(struct mddev *mddev)
307 }
308 }
309 if (rdev
310 + && rdev->recovery_offset == MaxSector
311 && !test_bit(Faulty, &rdev->flags)
312 && !test_and_set_bit(In_sync, &rdev->flags)) {
313 count++;
314 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
315 index 081bb3345353..0add86821755 100644
316 --- a/drivers/md/raid10.c
317 +++ b/drivers/md/raid10.c
318 @@ -1762,6 +1762,7 @@ static int raid10_spare_active(struct mddev *mddev)
319 }
320 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
321 } else if (tmp->rdev
322 + && tmp->rdev->recovery_offset == MaxSector
323 && !test_bit(Faulty, &tmp->rdev->flags)
324 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
325 count++;
326 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
327 index a35b846af4f8..4bed5454b8dc 100644
328 --- a/drivers/md/raid5.c
329 +++ b/drivers/md/raid5.c
330 @@ -668,6 +668,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
331 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
332 bi->bi_io_vec[0].bv_offset = 0;
333 bi->bi_size = STRIPE_SIZE;
334 + /*
335 + * If this is discard request, set bi_vcnt 0. We don't
336 + * want to confuse SCSI because SCSI will replace payload
337 + */
338 + if (rw & REQ_DISCARD)
339 + bi->bi_vcnt = 0;
340 if (rrdev)
341 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
342
343 @@ -706,6 +712,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
344 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
345 rbi->bi_io_vec[0].bv_offset = 0;
346 rbi->bi_size = STRIPE_SIZE;
347 + /*
348 + * If this is discard request, set bi_vcnt 0. We don't
349 + * want to confuse SCSI because SCSI will replace payload
350 + */
351 + if (rw & REQ_DISCARD)
352 + rbi->bi_vcnt = 0;
353 if (conf->mddev->gendisk)
354 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
355 rbi, disk_devt(conf->mddev->gendisk),
356 @@ -2800,6 +2812,14 @@ static void handle_stripe_clean_event(struct r5conf *conf,
357 }
358 /* now that discard is done we can proceed with any sync */
359 clear_bit(STRIPE_DISCARD, &sh->state);
360 + /*
361 + * SCSI discard will change some bio fields and the stripe has
362 + * no updated data, so remove it from hash list and the stripe
363 + * will be reinitialized
364 + */
365 + spin_lock_irq(&conf->device_lock);
366 + remove_hash(sh);
367 + spin_unlock_irq(&conf->device_lock);
368 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
369 set_bit(STRIPE_HANDLE, &sh->state);
370
371 diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
372 index db52f4414def..535d5dd8d816 100644
373 --- a/drivers/net/can/at91_can.c
374 +++ b/drivers/net/can/at91_can.c
375 @@ -1409,10 +1409,10 @@ static int at91_can_remove(struct platform_device *pdev)
376
377 static const struct platform_device_id at91_can_id_table[] = {
378 {
379 - .name = "at91_can",
380 + .name = "at91sam9x5_can",
381 .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data,
382 }, {
383 - .name = "at91sam9x5_can",
384 + .name = "at91_can",
385 .driver_data = (kernel_ulong_t)&at91_at91sam9263_data,
386 }, {
387 /* sentinel */
388 diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
389 index 769d29ed106d..a8f33a525dd6 100644
390 --- a/drivers/net/can/flexcan.c
391 +++ b/drivers/net/can/flexcan.c
392 @@ -63,7 +63,7 @@
393 #define FLEXCAN_MCR_BCC BIT(16)
394 #define FLEXCAN_MCR_LPRIO_EN BIT(13)
395 #define FLEXCAN_MCR_AEN BIT(12)
396 -#define FLEXCAN_MCR_MAXMB(x) ((x) & 0xf)
397 +#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x1f)
398 #define FLEXCAN_MCR_IDAM_A (0 << 8)
399 #define FLEXCAN_MCR_IDAM_B (1 << 8)
400 #define FLEXCAN_MCR_IDAM_C (2 << 8)
401 @@ -745,9 +745,11 @@ static int flexcan_chip_start(struct net_device *dev)
402 *
403 */
404 reg_mcr = flexcan_read(&regs->mcr);
405 + reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
406 reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
407 FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
408 - FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS;
409 + FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS |
410 + FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
411 netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
412 flexcan_write(reg_mcr, &regs->mcr);
413
414 @@ -792,6 +794,10 @@ static int flexcan_chip_start(struct net_device *dev)
415 &regs->cantxfg[i].can_ctrl);
416 }
417
418 + /* Abort any pending TX, mark Mailbox as INACTIVE */
419 + flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
420 + &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
421 +
422 /* acceptance mask/acceptance code (accept everything) */
423 flexcan_write(0x0, &regs->rxgmask);
424 flexcan_write(0x0, &regs->rx14mask);
425 @@ -983,9 +989,9 @@ static void unregister_flexcandev(struct net_device *dev)
426 }
427
428 static const struct of_device_id flexcan_of_match[] = {
429 - { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
430 - { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
431 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
432 + { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
433 + { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
434 { /* sentinel */ },
435 };
436 MODULE_DEVICE_TABLE(of, flexcan_of_match);
437 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
438 index e33a659e224b..a8fee08479ef 100644
439 --- a/drivers/net/wireless/ath/ath9k/main.c
440 +++ b/drivers/net/wireless/ath/ath9k/main.c
441 @@ -209,6 +209,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
442 struct ath_hw *ah = sc->sc_ah;
443 struct ath_common *common = ath9k_hw_common(ah);
444 unsigned long flags;
445 + int i;
446
447 if (ath_startrecv(sc) != 0) {
448 ath_err(common, "Unable to restart recv logic\n");
449 @@ -236,6 +237,15 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
450 }
451 work:
452 ath_restart_work(sc);
453 +
454 + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
455 + if (!ATH_TXQ_SETUP(sc, i))
456 + continue;
457 +
458 + spin_lock_bh(&sc->tx.txq[i].axq_lock);
459 + ath_txq_schedule(sc, &sc->tx.txq[i]);
460 + spin_unlock_bh(&sc->tx.txq[i].axq_lock);
461 + }
462 }
463
464 if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3)
465 @@ -543,21 +553,10 @@ chip_reset:
466
467 static int ath_reset(struct ath_softc *sc)
468 {
469 - int i, r;
470 + int r;
471
472 ath9k_ps_wakeup(sc);
473 -
474 r = ath_reset_internal(sc, NULL);
475 -
476 - for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
477 - if (!ATH_TXQ_SETUP(sc, i))
478 - continue;
479 -
480 - spin_lock_bh(&sc->tx.txq[i].axq_lock);
481 - ath_txq_schedule(sc, &sc->tx.txq[i]);
482 - spin_unlock_bh(&sc->tx.txq[i].axq_lock);
483 - }
484 -
485 ath9k_ps_restore(sc);
486
487 return r;
488 diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
489 index b5ab8d1bcac0..5282088d6c14 100644
490 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c
491 +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
492 @@ -268,6 +268,12 @@ const struct iwl_cfg iwl6035_2agn_cfg = {
493 .ht_params = &iwl6000_ht_params,
494 };
495
496 +const struct iwl_cfg iwl6035_2agn_sff_cfg = {
497 + .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
498 + IWL_DEVICE_6035,
499 + .ht_params = &iwl6000_ht_params,
500 +};
501 +
502 const struct iwl_cfg iwl1030_bgn_cfg = {
503 .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
504 IWL_DEVICE_6030,
505 diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
506 index c38aa8f77554..c67e29655b2d 100644
507 --- a/drivers/net/wireless/iwlwifi/iwl-config.h
508 +++ b/drivers/net/wireless/iwlwifi/iwl-config.h
509 @@ -316,6 +316,7 @@ extern const struct iwl_cfg iwl2000_2bgn_cfg;
510 extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
511 extern const struct iwl_cfg iwl2030_2bgn_cfg;
512 extern const struct iwl_cfg iwl6035_2agn_cfg;
513 +extern const struct iwl_cfg iwl6035_2agn_sff_cfg;
514 extern const struct iwl_cfg iwl105_bgn_cfg;
515 extern const struct iwl_cfg iwl105_bgn_d_cfg;
516 extern const struct iwl_cfg iwl135_bgn_cfg;
517 diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
518 index 5283b5552e6f..b7858a595973 100644
519 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c
520 +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
521 @@ -138,13 +138,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
522
523 /* 6x00 Series */
524 {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
525 + {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
526 {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
527 + {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
528 {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
529 {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
530 {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
531 {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
532 {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
533 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
534 + {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
535 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
536 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
537
538 @@ -152,12 +155,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
539 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
540 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
541 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
542 + {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
543 {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
544 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
545 + {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
546 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
547 + {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
548 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
549 {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
550 {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
551 + {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
552 {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
553 {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
554 {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
555 @@ -239,8 +246,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
556
557 /* 6x35 Series */
558 {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
559 + {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
560 {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
561 + {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
562 {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
563 + {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
564 {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
565 {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
566
567 diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
568 index 8fc44992b5c3..fc3fe8ddcf62 100644
569 --- a/drivers/net/wireless/mwifiex/main.c
570 +++ b/drivers/net/wireless/mwifiex/main.c
571 @@ -270,10 +270,12 @@ process_start:
572 }
573 } while (true);
574
575 - if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter))
576 + spin_lock_irqsave(&adapter->main_proc_lock, flags);
577 + if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) {
578 + spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
579 goto process_start;
580 + }
581
582 - spin_lock_irqsave(&adapter->main_proc_lock, flags);
583 adapter->mwifiex_processing = false;
584 spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
585
586 diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
587 index 763cf1defab5..5a060e537fbe 100644
588 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
589 +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
590 @@ -343,7 +343,8 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
591 (bool)GET_RX_DESC_PAGGR(pdesc));
592 rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
593 if (phystatus) {
594 - p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
595 + p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
596 + stats->rx_bufshift);
597 rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
598 p_drvinfo);
599 }
600 diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
601 index 2dacd19e1b8a..b9bf8b551e3c 100644
602 --- a/drivers/ntb/ntb_hw.c
603 +++ b/drivers/ntb/ntb_hw.c
604 @@ -78,6 +78,8 @@ enum {
605 BWD_HW,
606 };
607
608 +static struct dentry *debugfs_dir;
609 +
610 /* Translate memory window 0,1 to BAR 2,4 */
611 #define MW_TO_BAR(mw) (mw * 2 + 2)
612
613 @@ -531,9 +533,9 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
614 }
615
616 if (val & SNB_PPD_DEV_TYPE)
617 - ndev->dev_type = NTB_DEV_DSD;
618 - else
619 ndev->dev_type = NTB_DEV_USD;
620 + else
621 + ndev->dev_type = NTB_DEV_DSD;
622
623 ndev->reg_ofs.pdb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
624 ndev->reg_ofs.pdb_mask = ndev->reg_base + SNB_PDBMSK_OFFSET;
625 @@ -547,7 +549,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
626 if (ndev->conn_type == NTB_CONN_B2B) {
627 ndev->reg_ofs.sdb = ndev->reg_base + SNB_B2B_DOORBELL_OFFSET;
628 ndev->reg_ofs.spad_write = ndev->reg_base + SNB_B2B_SPAD_OFFSET;
629 - ndev->limits.max_spads = SNB_MAX_SPADS;
630 + ndev->limits.max_spads = SNB_MAX_B2B_SPADS;
631 } else {
632 ndev->reg_ofs.sdb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
633 ndev->reg_ofs.spad_write = ndev->reg_base + SNB_SPAD_OFFSET;
634 @@ -644,10 +646,16 @@ static int ntb_device_setup(struct ntb_device *ndev)
635 rc = -ENODEV;
636 }
637
638 + if (rc)
639 + return rc;
640 +
641 + dev_info(&ndev->pdev->dev, "Device Type = %s\n",
642 + ndev->dev_type == NTB_DEV_USD ? "USD/DSP" : "DSD/USP");
643 +
644 /* Enable Bus Master and Memory Space on the secondary side */
645 writew(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER, ndev->reg_ofs.spci_cmd);
646
647 - return rc;
648 + return 0;
649 }
650
651 static void ntb_device_free(struct ntb_device *ndev)
652 @@ -992,6 +1000,28 @@ static void ntb_free_callbacks(struct ntb_device *ndev)
653 kfree(ndev->db_cb);
654 }
655
656 +static void ntb_setup_debugfs(struct ntb_device *ndev)
657 +{
658 + if (!debugfs_initialized())
659 + return;
660 +
661 + if (!debugfs_dir)
662 + debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
663 +
664 + ndev->debugfs_dir = debugfs_create_dir(pci_name(ndev->pdev),
665 + debugfs_dir);
666 +}
667 +
668 +static void ntb_free_debugfs(struct ntb_device *ndev)
669 +{
670 + debugfs_remove_recursive(ndev->debugfs_dir);
671 +
672 + if (debugfs_dir && simple_empty(debugfs_dir)) {
673 + debugfs_remove_recursive(debugfs_dir);
674 + debugfs_dir = NULL;
675 + }
676 +}
677 +
678 static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
679 {
680 struct ntb_device *ndev;
681 @@ -1004,6 +1034,7 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
682 ndev->pdev = pdev;
683 ndev->link_status = NTB_LINK_DOWN;
684 pci_set_drvdata(pdev, ndev);
685 + ntb_setup_debugfs(ndev);
686
687 rc = pci_enable_device(pdev);
688 if (rc)
689 @@ -1100,6 +1131,7 @@ err2:
690 err1:
691 pci_disable_device(pdev);
692 err:
693 + ntb_free_debugfs(ndev);
694 kfree(ndev);
695
696 dev_err(&pdev->dev, "Error loading %s module\n", KBUILD_MODNAME);
697 @@ -1129,6 +1161,7 @@ static void ntb_pci_remove(struct pci_dev *pdev)
698 iounmap(ndev->reg_base);
699 pci_release_selected_regions(pdev, NTB_BAR_MASK);
700 pci_disable_device(pdev);
701 + ntb_free_debugfs(ndev);
702 kfree(ndev);
703 }
704
705 diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h
706 index 3a3038ca83e6..6a4f56f564ee 100644
707 --- a/drivers/ntb/ntb_hw.h
708 +++ b/drivers/ntb/ntb_hw.h
709 @@ -127,6 +127,8 @@ struct ntb_device {
710 unsigned char link_status;
711 struct delayed_work hb_timer;
712 unsigned long last_ts;
713 +
714 + struct dentry *debugfs_dir;
715 };
716
717 /**
718 @@ -155,6 +157,20 @@ static inline struct pci_dev *ntb_query_pdev(struct ntb_device *ndev)
719 return ndev->pdev;
720 }
721
722 +/**
723 + * ntb_query_debugfs() - return the debugfs pointer
724 + * @ndev: pointer to ntb_device instance
725 + *
726 + * Given the ntb pointer, return the debugfs directory pointer for the NTB
727 + * hardware device
728 + *
729 + * RETURNS: a pointer to the debugfs directory
730 + */
731 +static inline struct dentry *ntb_query_debugfs(struct ntb_device *ndev)
732 +{
733 + return ndev->debugfs_dir;
734 +}
735 +
736 struct ntb_device *ntb_register_transport(struct pci_dev *pdev,
737 void *transport);
738 void ntb_unregister_transport(struct ntb_device *ndev);
739 diff --git a/drivers/ntb/ntb_regs.h b/drivers/ntb/ntb_regs.h
740 index 5bfa8c06c059..96209b4abc22 100644
741 --- a/drivers/ntb/ntb_regs.h
742 +++ b/drivers/ntb/ntb_regs.h
743 @@ -53,8 +53,8 @@
744 #define NTB_LINK_WIDTH_MASK 0x03f0
745
746 #define SNB_MSIX_CNT 4
747 -#define SNB_MAX_SPADS 16
748 -#define SNB_MAX_COMPAT_SPADS 8
749 +#define SNB_MAX_B2B_SPADS 16
750 +#define SNB_MAX_COMPAT_SPADS 16
751 /* Reserve the uppermost bit for link interrupt */
752 #define SNB_MAX_DB_BITS 15
753 #define SNB_DB_BITS_PER_VEC 5
754 diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
755 index f8d7081ee301..c3089151aa49 100644
756 --- a/drivers/ntb/ntb_transport.c
757 +++ b/drivers/ntb/ntb_transport.c
758 @@ -157,7 +157,6 @@ struct ntb_transport {
759 bool transport_link;
760 struct delayed_work link_work;
761 struct work_struct link_cleanup;
762 - struct dentry *debugfs_dir;
763 };
764
765 enum {
766 @@ -824,12 +823,12 @@ static void ntb_transport_init_queue(struct ntb_transport *nt,
767 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
768 qp->tx_max_entry = tx_size / qp->tx_max_frame;
769
770 - if (nt->debugfs_dir) {
771 + if (ntb_query_debugfs(nt->ndev)) {
772 char debugfs_name[4];
773
774 snprintf(debugfs_name, 4, "qp%d", qp_num);
775 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
776 - nt->debugfs_dir);
777 + ntb_query_debugfs(nt->ndev));
778
779 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
780 qp->debugfs_dir, qp,
781 @@ -857,11 +856,6 @@ int ntb_transport_init(struct pci_dev *pdev)
782 if (!nt)
783 return -ENOMEM;
784
785 - if (debugfs_initialized())
786 - nt->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
787 - else
788 - nt->debugfs_dir = NULL;
789 -
790 nt->ndev = ntb_register_transport(pdev, nt);
791 if (!nt->ndev) {
792 rc = -EIO;
793 @@ -907,7 +901,6 @@ err2:
794 err1:
795 ntb_unregister_transport(nt->ndev);
796 err:
797 - debugfs_remove_recursive(nt->debugfs_dir);
798 kfree(nt);
799 return rc;
800 }
801 @@ -921,16 +914,16 @@ void ntb_transport_free(void *transport)
802 nt->transport_link = NTB_LINK_DOWN;
803
804 /* verify that all the qp's are freed */
805 - for (i = 0; i < nt->max_qps; i++)
806 + for (i = 0; i < nt->max_qps; i++) {
807 if (!test_bit(i, &nt->qp_bitmap))
808 ntb_transport_free_queue(&nt->qps[i]);
809 + debugfs_remove_recursive(nt->qps[i].debugfs_dir);
810 + }
811
812 ntb_bus_remove(nt);
813
814 cancel_delayed_work_sync(&nt->link_work);
815
816 - debugfs_remove_recursive(nt->debugfs_dir);
817 -
818 ntb_unregister_event_callback(nt->ndev);
819
820 pdev = ntb_query_pdev(nt->ndev);
821 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
822 index 408a42ef787a..f0d432c139d0 100644
823 --- a/drivers/scsi/aacraid/linit.c
824 +++ b/drivers/scsi/aacraid/linit.c
825 @@ -771,6 +771,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
826 static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
827 {
828 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
829 + if (!capable(CAP_SYS_RAWIO))
830 + return -EPERM;
831 return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
832 }
833
834 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
835 index c39863441337..734a29a70f63 100644
836 --- a/drivers/scsi/sd.c
837 +++ b/drivers/scsi/sd.c
838 @@ -2843,6 +2843,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
839 gd->events |= DISK_EVENT_MEDIA_CHANGE;
840 }
841
842 + blk_pm_runtime_init(sdp->request_queue, dev);
843 add_disk(gd);
844 if (sdkp->capacity)
845 sd_dif_config_host(sdkp);
846 @@ -2851,7 +2852,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
847
848 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
849 sdp->removable ? "removable " : "");
850 - blk_pm_runtime_init(sdp->request_queue, dev);
851 scsi_autopm_put_device(sdp);
852 put_device(&sdkp->dev);
853 }
854 diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
855 index 35641e529396..8fa64d964b16 100644
856 --- a/drivers/staging/bcm/Bcmchar.c
857 +++ b/drivers/staging/bcm/Bcmchar.c
858 @@ -1960,6 +1960,7 @@ cntrlEnd:
859
860 BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Called IOCTL_BCM_GET_DEVICE_DRIVER_INFO\n");
861
862 + memset(&DevInfo, 0, sizeof(DevInfo));
863 DevInfo.MaxRDMBufferSize = BUFFER_4K;
864 DevInfo.u32DSDStartOffset = EEPROM_CALPARAM_START;
865 DevInfo.u32RxAlignmentCorrection = 0;
866 diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
867 index 27d06666c81a..224ccff75d4f 100644
868 --- a/drivers/staging/ozwpan/ozcdev.c
869 +++ b/drivers/staging/ozwpan/ozcdev.c
870 @@ -153,6 +153,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
871 struct oz_app_hdr *app_hdr;
872 struct oz_serial_ctx *ctx;
873
874 + if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
875 + return -EINVAL;
876 +
877 spin_lock_bh(&g_cdev.lock);
878 pd = g_cdev.active_pd;
879 if (pd)
880 diff --git a/drivers/staging/sb105x/sb_pci_mp.c b/drivers/staging/sb105x/sb_pci_mp.c
881 index cd94f6c27723..b90e96b7ca01 100644
882 --- a/drivers/staging/sb105x/sb_pci_mp.c
883 +++ b/drivers/staging/sb105x/sb_pci_mp.c
884 @@ -1063,7 +1063,7 @@ static int mp_wait_modem_status(struct sb_uart_state *state, unsigned long arg)
885
886 static int mp_get_count(struct sb_uart_state *state, struct serial_icounter_struct *icnt)
887 {
888 - struct serial_icounter_struct icount;
889 + struct serial_icounter_struct icount = {};
890 struct sb_uart_icount cnow;
891 struct sb_uart_port *port = state->port;
892
893 diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
894 index c97e0e154d28..7e10dcdc3090 100644
895 --- a/drivers/staging/wlags49_h2/wl_priv.c
896 +++ b/drivers/staging/wlags49_h2/wl_priv.c
897 @@ -570,6 +570,7 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
898 ltv_t *pLtv;
899 bool_t ltvAllocated = FALSE;
900 ENCSTRCT sEncryption;
901 + size_t len;
902
903 #ifdef USE_WDS
904 hcf_16 hcfPort = HCF_PORT_0;
905 @@ -686,7 +687,8 @@ int wvlan_uil_put_info(struct uilreq *urq, struct wl_private *lp)
906 break;
907 case CFG_CNF_OWN_NAME:
908 memset(lp->StationName, 0, sizeof(lp->StationName));
909 - memcpy((void *)lp->StationName, (void *)&pLtv->u.u8[2], (size_t)pLtv->u.u16[0]);
910 + len = min_t(size_t, pLtv->u.u16[0], sizeof(lp->StationName));
911 + strlcpy(lp->StationName, &pLtv->u.u8[2], len);
912 pLtv->u.u16[0] = CNV_INT_TO_LITTLE(pLtv->u.u16[0]);
913 break;
914 case CFG_CNF_LOAD_BALANCING:
915 @@ -1783,6 +1785,7 @@ int wvlan_set_station_nickname(struct net_device *dev,
916 {
917 struct wl_private *lp = wl_priv(dev);
918 unsigned long flags;
919 + size_t len;
920 int ret = 0;
921 /*------------------------------------------------------------------------*/
922
923 @@ -1793,8 +1796,8 @@ int wvlan_set_station_nickname(struct net_device *dev,
924 wl_lock(lp, &flags);
925
926 memset(lp->StationName, 0, sizeof(lp->StationName));
927 -
928 - memcpy(lp->StationName, extra, wrqu->data.length);
929 + len = min_t(size_t, wrqu->data.length, sizeof(lp->StationName));
930 + strlcpy(lp->StationName, extra, len);
931
932 /* Commit the adapter parameters */
933 wl_apply(lp);
934 diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
935 index e992b27aa090..3250ba2594e0 100644
936 --- a/drivers/target/target_core_pscsi.c
937 +++ b/drivers/target/target_core_pscsi.c
938 @@ -134,10 +134,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
939 * pSCSI Host ID and enable for phba mode
940 */
941 sh = scsi_host_lookup(phv->phv_host_id);
942 - if (IS_ERR(sh)) {
943 + if (!sh) {
944 pr_err("pSCSI: Unable to locate SCSI Host for"
945 " phv_host_id: %d\n", phv->phv_host_id);
946 - return PTR_ERR(sh);
947 + return -EINVAL;
948 }
949
950 phv->phv_lld_host = sh;
951 @@ -515,10 +515,10 @@ static int pscsi_configure_device(struct se_device *dev)
952 sh = phv->phv_lld_host;
953 } else {
954 sh = scsi_host_lookup(pdv->pdv_host_id);
955 - if (IS_ERR(sh)) {
956 + if (!sh) {
957 pr_err("pSCSI: Unable to locate"
958 " pdv_host_id: %d\n", pdv->pdv_host_id);
959 - return PTR_ERR(sh);
960 + return -EINVAL;
961 }
962 }
963 } else {
964 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
965 index b645c47501b4..2d57a00dc173 100644
966 --- a/drivers/uio/uio.c
967 +++ b/drivers/uio/uio.c
968 @@ -630,36 +630,57 @@ static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
969 return 0;
970 }
971
972 -static const struct vm_operations_struct uio_vm_ops = {
973 +static const struct vm_operations_struct uio_logical_vm_ops = {
974 .open = uio_vma_open,
975 .close = uio_vma_close,
976 .fault = uio_vma_fault,
977 };
978
979 +static int uio_mmap_logical(struct vm_area_struct *vma)
980 +{
981 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
982 + vma->vm_ops = &uio_logical_vm_ops;
983 + uio_vma_open(vma);
984 + return 0;
985 +}
986 +
987 +static const struct vm_operations_struct uio_physical_vm_ops = {
988 +#ifdef CONFIG_HAVE_IOREMAP_PROT
989 + .access = generic_access_phys,
990 +#endif
991 +};
992 +
993 static int uio_mmap_physical(struct vm_area_struct *vma)
994 {
995 struct uio_device *idev = vma->vm_private_data;
996 int mi = uio_find_mem_index(vma);
997 + struct uio_mem *mem;
998 if (mi < 0)
999 return -EINVAL;
1000 + mem = idev->info->mem + mi;
1001
1002 + if (vma->vm_end - vma->vm_start > mem->size)
1003 + return -EINVAL;
1004 +
1005 + vma->vm_ops = &uio_physical_vm_ops;
1006 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1007
1008 + /*
1009 + * We cannot use the vm_iomap_memory() helper here,
1010 + * because vma->vm_pgoff is the map index we looked
1011 + * up above in uio_find_mem_index(), rather than an
1012 + * actual page offset into the mmap.
1013 + *
1014 + * So we just do the physical mmap without a page
1015 + * offset.
1016 + */
1017 return remap_pfn_range(vma,
1018 vma->vm_start,
1019 - idev->info->mem[mi].addr >> PAGE_SHIFT,
1020 + mem->addr >> PAGE_SHIFT,
1021 vma->vm_end - vma->vm_start,
1022 vma->vm_page_prot);
1023 }
1024
1025 -static int uio_mmap_logical(struct vm_area_struct *vma)
1026 -{
1027 - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1028 - vma->vm_ops = &uio_vm_ops;
1029 - uio_vma_open(vma);
1030 - return 0;
1031 -}
1032 -
1033 static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
1034 {
1035 struct uio_listener *listener = filep->private_data;
1036 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1037 index 5b44cd47da5b..01fe36273f3b 100644
1038 --- a/drivers/usb/core/quirks.c
1039 +++ b/drivers/usb/core/quirks.c
1040 @@ -97,6 +97,9 @@ static const struct usb_device_id usb_quirk_list[] = {
1041 /* Alcor Micro Corp. Hub */
1042 { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
1043
1044 + /* MicroTouch Systems touchscreen */
1045 + { USB_DEVICE(0x0596, 0x051e), .driver_info = USB_QUIRK_RESET_RESUME },
1046 +
1047 /* appletouch */
1048 { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
1049
1050 @@ -130,6 +133,9 @@ static const struct usb_device_id usb_quirk_list[] = {
1051 /* Broadcom BCM92035DGROM BT dongle */
1052 { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
1053
1054 + /* MAYA44USB sound device */
1055 + { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
1056 +
1057 /* Action Semiconductor flash disk */
1058 { USB_DEVICE(0x10d6, 0x2200), .driver_info =
1059 USB_QUIRK_STRING_FETCH_255 },
1060 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1061 index b65e657c641d..aa3aed5458a6 100644
1062 --- a/drivers/usb/serial/ftdi_sio.c
1063 +++ b/drivers/usb/serial/ftdi_sio.c
1064 @@ -906,6 +906,7 @@ static struct usb_device_id id_table_combined [] = {
1065 { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
1066 /* Crucible Devices */
1067 { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
1068 + { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
1069 { }, /* Optional parameter entry */
1070 { } /* Terminating entry */
1071 };
1072 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
1073 index 1b8af461b522..a7019d1e3058 100644
1074 --- a/drivers/usb/serial/ftdi_sio_ids.h
1075 +++ b/drivers/usb/serial/ftdi_sio_ids.h
1076 @@ -1307,3 +1307,9 @@
1077 * Manufacturer: Crucible Technologies
1078 */
1079 #define FTDI_CT_COMET_PID 0x8e08
1080 +
1081 +/*
1082 + * Product: Z3X Box
1083 + * Manufacturer: Smart GSM Team
1084 + */
1085 +#define FTDI_Z3X_PID 0x0011
1086 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1087 index f1507c052a2e..acaee066b99a 100644
1088 --- a/drivers/usb/serial/option.c
1089 +++ b/drivers/usb/serial/option.c
1090 @@ -693,6 +693,222 @@ static const struct usb_device_id option_ids[] = {
1091 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
1092 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
1093 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
1094 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x01) },
1095 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x02) },
1096 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x03) },
1097 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x04) },
1098 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x05) },
1099 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x06) },
1100 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0A) },
1101 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0B) },
1102 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0D) },
1103 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0E) },
1104 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0F) },
1105 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x10) },
1106 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x12) },
1107 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x13) },
1108 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x14) },
1109 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x15) },
1110 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x17) },
1111 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x18) },
1112 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x19) },
1113 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1A) },
1114 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1B) },
1115 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1C) },
1116 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x31) },
1117 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x32) },
1118 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x33) },
1119 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x34) },
1120 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x35) },
1121 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x36) },
1122 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3A) },
1123 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3B) },
1124 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3D) },
1125 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3E) },
1126 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3F) },
1127 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x48) },
1128 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x49) },
1129 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4A) },
1130 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4B) },
1131 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4C) },
1132 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x61) },
1133 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x62) },
1134 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x63) },
1135 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x64) },
1136 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x65) },
1137 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x66) },
1138 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6A) },
1139 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6B) },
1140 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
1141 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
1142 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
1143 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
1144 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
1145 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
1146 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7B) },
1147 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7C) },
1148 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x01) },
1149 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x02) },
1150 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x03) },
1151 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x04) },
1152 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x05) },
1153 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x06) },
1154 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0A) },
1155 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0B) },
1156 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0D) },
1157 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0E) },
1158 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0F) },
1159 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x10) },
1160 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x12) },
1161 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x13) },
1162 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x14) },
1163 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x15) },
1164 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x17) },
1165 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x18) },
1166 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x19) },
1167 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1A) },
1168 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1B) },
1169 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1C) },
1170 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x31) },
1171 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x32) },
1172 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x33) },
1173 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x34) },
1174 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x35) },
1175 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x36) },
1176 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3A) },
1177 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3B) },
1178 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3D) },
1179 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3E) },
1180 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3F) },
1181 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x48) },
1182 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x49) },
1183 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4A) },
1184 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4B) },
1185 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4C) },
1186 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x61) },
1187 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x62) },
1188 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x63) },
1189 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x64) },
1190 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x65) },
1191 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x66) },
1192 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6A) },
1193 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6B) },
1194 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
1195 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
1196 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
1197 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
1198 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
1199 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
1200 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7B) },
1201 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7C) },
1202 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x01) },
1203 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x02) },
1204 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x03) },
1205 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x04) },
1206 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x05) },
1207 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x06) },
1208 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0A) },
1209 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0B) },
1210 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0D) },
1211 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0E) },
1212 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0F) },
1213 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x10) },
1214 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x12) },
1215 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x13) },
1216 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x14) },
1217 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x15) },
1218 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x17) },
1219 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x18) },
1220 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x19) },
1221 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1A) },
1222 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1B) },
1223 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1C) },
1224 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x31) },
1225 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x32) },
1226 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x33) },
1227 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x34) },
1228 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x35) },
1229 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x36) },
1230 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3A) },
1231 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3B) },
1232 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3D) },
1233 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3E) },
1234 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3F) },
1235 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x48) },
1236 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x49) },
1237 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4A) },
1238 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4B) },
1239 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4C) },
1240 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x61) },
1241 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x62) },
1242 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x63) },
1243 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x64) },
1244 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x65) },
1245 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x66) },
1246 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6A) },
1247 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6B) },
1248 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
1249 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
1250 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
1251 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
1252 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
1253 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
1254 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7B) },
1255 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7C) },
1256 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x01) },
1257 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x02) },
1258 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x03) },
1259 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x04) },
1260 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x05) },
1261 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x06) },
1262 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0A) },
1263 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0B) },
1264 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0D) },
1265 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0E) },
1266 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0F) },
1267 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x10) },
1268 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x12) },
1269 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x13) },
1270 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x14) },
1271 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x15) },
1272 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x17) },
1273 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x18) },
1274 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x19) },
1275 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1A) },
1276 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1B) },
1277 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1C) },
1278 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x31) },
1279 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x32) },
1280 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x33) },
1281 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x34) },
1282 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x35) },
1283 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x36) },
1284 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3A) },
1285 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3B) },
1286 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3D) },
1287 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3E) },
1288 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3F) },
1289 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x48) },
1290 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x49) },
1291 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4A) },
1292 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4B) },
1293 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4C) },
1294 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x61) },
1295 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x62) },
1296 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x63) },
1297 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x64) },
1298 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x65) },
1299 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x66) },
1300 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6A) },
1301 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6B) },
1302 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
1303 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
1304 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
1305 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
1306 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
1307 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
1308 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
1309 + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
1310
1311
1312 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
1313 diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
1314 index 92b05d95ec5e..5db153260827 100644
1315 --- a/drivers/usb/storage/scsiglue.c
1316 +++ b/drivers/usb/storage/scsiglue.c
1317 @@ -211,8 +211,11 @@ static int slave_configure(struct scsi_device *sdev)
1318 /*
1319 * Many devices do not respond properly to READ_CAPACITY_16.
1320 * Tell the SCSI layer to try READ_CAPACITY_10 first.
1321 + * However some USB 3.0 drive enclosures return capacity
1322 + * modulo 2TB. Those must use READ_CAPACITY_16
1323 */
1324 - sdev->try_rc_10_first = 1;
1325 + if (!(us->fflags & US_FL_NEEDS_CAP16))
1326 + sdev->try_rc_10_first = 1;
1327
1328 /* assume SPC3 or latter devices support sense size > 18 */
1329 if (sdev->scsi_level > SCSI_SPC_2)
1330 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1331 index c015f2c16729..de32cfa5bfa6 100644
1332 --- a/drivers/usb/storage/unusual_devs.h
1333 +++ b/drivers/usb/storage/unusual_devs.h
1334 @@ -1925,6 +1925,13 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
1335 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1336 US_FL_IGNORE_RESIDUE ),
1337
1338 +/* Reported by Oliver Neukum <oneukum@suse.com> */
1339 +UNUSUAL_DEV( 0x174c, 0x55aa, 0x0100, 0x0100,
1340 + "ASMedia",
1341 + "AS2105",
1342 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1343 + US_FL_NEEDS_CAP16),
1344 +
1345 /* Reported by Jesse Feddema <jdfeddema@gmail.com> */
1346 UNUSUAL_DEV( 0x177f, 0x0400, 0x0000, 0x0000,
1347 "Yarvik",
1348 diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
1349 index 701420297225..962c7e3c3baa 100644
1350 --- a/drivers/vhost/scsi.c
1351 +++ b/drivers/vhost/scsi.c
1352 @@ -1017,7 +1017,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
1353 if (data_direction != DMA_NONE) {
1354 ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
1355 &vq->iov[data_first], data_num,
1356 - data_direction == DMA_TO_DEVICE);
1357 + data_direction == DMA_FROM_DEVICE);
1358 if (unlikely(ret)) {
1359 vq_err(vq, "Failed to map iov to sgl\n");
1360 goto err_free;
1361 diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
1362 index 700cac067b46..bdc515f5e979 100644
1363 --- a/drivers/video/au1100fb.c
1364 +++ b/drivers/video/au1100fb.c
1365 @@ -361,39 +361,13 @@ void au1100fb_fb_rotate(struct fb_info *fbi, int angle)
1366 int au1100fb_fb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
1367 {
1368 struct au1100fb_device *fbdev;
1369 - unsigned int len;
1370 - unsigned long start=0, off;
1371
1372 fbdev = to_au1100fb_device(fbi);
1373
1374 - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
1375 - return -EINVAL;
1376 - }
1377 -
1378 - start = fbdev->fb_phys & PAGE_MASK;
1379 - len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
1380 -
1381 - off = vma->vm_pgoff << PAGE_SHIFT;
1382 -
1383 - if ((vma->vm_end - vma->vm_start + off) > len) {
1384 - return -EINVAL;
1385 - }
1386 -
1387 - off += start;
1388 - vma->vm_pgoff = off >> PAGE_SHIFT;
1389 -
1390 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1391 pgprot_val(vma->vm_page_prot) |= (6 << 9); //CCA=6
1392
1393 - vma->vm_flags |= VM_IO;
1394 -
1395 - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
1396 - vma->vm_end - vma->vm_start,
1397 - vma->vm_page_prot)) {
1398 - return -EAGAIN;
1399 - }
1400 -
1401 - return 0;
1402 + return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
1403 }
1404
1405 static struct fb_ops au1100fb_ops =
1406 diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
1407 index 1b59054fc6a4..1d02897d17f2 100644
1408 --- a/drivers/video/au1200fb.c
1409 +++ b/drivers/video/au1200fb.c
1410 @@ -1233,38 +1233,13 @@ static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
1411 * method mainly to allow the use of the TLB streaming flag (CCA=6)
1412 */
1413 static int au1200fb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
1414 -
1415 {
1416 - unsigned int len;
1417 - unsigned long start=0, off;
1418 struct au1200fb_device *fbdev = info->par;
1419
1420 - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
1421 - return -EINVAL;
1422 - }
1423 -
1424 - start = fbdev->fb_phys & PAGE_MASK;
1425 - len = PAGE_ALIGN((start & ~PAGE_MASK) + fbdev->fb_len);
1426 -
1427 - off = vma->vm_pgoff << PAGE_SHIFT;
1428 -
1429 - if ((vma->vm_end - vma->vm_start + off) > len) {
1430 - return -EINVAL;
1431 - }
1432 -
1433 - off += start;
1434 - vma->vm_pgoff = off >> PAGE_SHIFT;
1435 -
1436 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1437 pgprot_val(vma->vm_page_prot) |= _CACHE_MASK; /* CCA=7 */
1438
1439 - vma->vm_flags |= VM_IO;
1440 -
1441 - return io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
1442 - vma->vm_end - vma->vm_start,
1443 - vma->vm_page_prot);
1444 -
1445 - return 0;
1446 + return vm_iomap_memory(vma, fbdev->fb_phys, fbdev->fb_len);
1447 }
1448
1449 static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
1450 diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
1451 index 7d52806c2119..4725a07f003c 100644
1452 --- a/fs/ecryptfs/keystore.c
1453 +++ b/fs/ecryptfs/keystore.c
1454 @@ -1149,7 +1149,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
1455 struct ecryptfs_msg_ctx *msg_ctx;
1456 struct ecryptfs_message *msg = NULL;
1457 char *auth_tok_sig;
1458 - char *payload;
1459 + char *payload = NULL;
1460 size_t payload_len = 0;
1461 int rc;
1462
1463 @@ -1203,6 +1203,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
1464 }
1465 out:
1466 kfree(msg);
1467 + kfree(payload);
1468 return rc;
1469 }
1470
1471 diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
1472 index c1a3e603279c..7f464c513ba0 100644
1473 --- a/fs/jfs/jfs_inode.c
1474 +++ b/fs/jfs/jfs_inode.c
1475 @@ -95,7 +95,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
1476
1477 if (insert_inode_locked(inode) < 0) {
1478 rc = -EINVAL;
1479 - goto fail_unlock;
1480 + goto fail_put;
1481 }
1482
1483 inode_init_owner(inode, parent, mode);
1484 @@ -156,7 +156,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
1485 fail_drop:
1486 dquot_drop(inode);
1487 inode->i_flags |= S_NOQUOTA;
1488 -fail_unlock:
1489 clear_nlink(inode);
1490 unlock_new_inode(inode);
1491 fail_put:
1492 diff --git a/fs/seq_file.c b/fs/seq_file.c
1493 index 774c1eb7f1c9..3dd44db1465e 100644
1494 --- a/fs/seq_file.c
1495 +++ b/fs/seq_file.c
1496 @@ -328,6 +328,8 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence)
1497 m->read_pos = offset;
1498 retval = file->f_pos = offset;
1499 }
1500 + } else {
1501 + file->f_pos = offset;
1502 }
1503 }
1504 file->f_version = m->version;
1505 diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
1506 index bf99cd01be20..630356866030 100644
1507 --- a/include/linux/usb_usual.h
1508 +++ b/include/linux/usb_usual.h
1509 @@ -66,7 +66,9 @@
1510 US_FLAG(INITIAL_READ10, 0x00100000) \
1511 /* Initial READ(10) (and others) must be retried */ \
1512 US_FLAG(WRITE_CACHE, 0x00200000) \
1513 - /* Write Cache status is not available */
1514 + /* Write Cache status is not available */ \
1515 + US_FLAG(NEEDS_CAP16, 0x00400000)
1516 + /* cannot handle READ_CAPACITY_10 */
1517
1518 #define US_FLAG(name, value) US_FL_##name = value ,
1519 enum { US_DO_ALL_FLAGS };
1520 diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
1521 index 090e5331ab7e..cc2e00eac2f1 100644
1522 --- a/include/uapi/drm/drm_mode.h
1523 +++ b/include/uapi/drm/drm_mode.h
1524 @@ -223,6 +223,8 @@ struct drm_mode_get_connector {
1525 __u32 connection;
1526 __u32 mm_width, mm_height; /**< HxW in millimeters */
1527 __u32 subpixel;
1528 +
1529 + __u32 pad;
1530 };
1531
1532 #define DRM_MODE_PROP_PENDING (1<<0)
1533 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
1534 index 2e9b387971d1..b6b26faf1740 100644
1535 --- a/kernel/cgroup.c
1536 +++ b/kernel/cgroup.c
1537 @@ -1995,7 +1995,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
1538
1539 /* @tsk either already exited or can't exit until the end */
1540 if (tsk->flags & PF_EXITING)
1541 - continue;
1542 + goto next;
1543
1544 /* as per above, nr_threads may decrease, but not increase. */
1545 BUG_ON(i >= group_size);
1546 @@ -2003,7 +2003,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
1547 ent.cgrp = task_cgroup_from_root(tsk, root);
1548 /* nothing to do if this task is already in the cgroup */
1549 if (ent.cgrp == cgrp)
1550 - continue;
1551 + goto next;
1552 /*
1553 * saying GFP_ATOMIC has no effect here because we did prealloc
1554 * earlier, but it's good form to communicate our expectations.
1555 @@ -2011,7 +2011,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
1556 retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
1557 BUG_ON(retval != 0);
1558 i++;
1559 -
1560 + next:
1561 if (!threadgroup)
1562 break;
1563 } while_each_thread(leader, tsk);
1564 diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
1565 index c6d6400ee137..6a23c6c556c3 100644
1566 --- a/kernel/time/clockevents.c
1567 +++ b/kernel/time/clockevents.c
1568 @@ -30,29 +30,64 @@ static RAW_NOTIFIER_HEAD(clockevents_chain);
1569 /* Protection for the above */
1570 static DEFINE_RAW_SPINLOCK(clockevents_lock);
1571
1572 -/**
1573 - * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
1574 - * @latch: value to convert
1575 - * @evt: pointer to clock event device descriptor
1576 - *
1577 - * Math helper, returns latch value converted to nanoseconds (bound checked)
1578 - */
1579 -u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
1580 +static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
1581 + bool ismax)
1582 {
1583 u64 clc = (u64) latch << evt->shift;
1584 + u64 rnd;
1585
1586 if (unlikely(!evt->mult)) {
1587 evt->mult = 1;
1588 WARN_ON(1);
1589 }
1590 + rnd = (u64) evt->mult - 1;
1591 +
1592 + /*
1593 + * Upper bound sanity check. If the backwards conversion is
1594 + * not equal latch, we know that the above shift overflowed.
1595 + */
1596 + if ((clc >> evt->shift) != (u64)latch)
1597 + clc = ~0ULL;
1598 +
1599 + /*
1600 + * Scaled math oddities:
1601 + *
1602 + * For mult <= (1 << shift) we can safely add mult - 1 to
1603 + * prevent integer rounding loss. So the backwards conversion
1604 + * from nsec to device ticks will be correct.
1605 + *
1606 + * For mult > (1 << shift), i.e. device frequency is > 1GHz we
1607 + * need to be careful. Adding mult - 1 will result in a value
1608 + * which when converted back to device ticks can be larger
1609 + * than latch by up to (mult - 1) >> shift. For the min_delta
1610 + * calculation we still want to apply this in order to stay
1611 + * above the minimum device ticks limit. For the upper limit
1612 + * we would end up with a latch value larger than the upper
1613 + * limit of the device, so we omit the add to stay below the
1614 + * device upper boundary.
1615 + *
1616 + * Also omit the add if it would overflow the u64 boundary.
1617 + */
1618 + if ((~0ULL - clc > rnd) &&
1619 + (!ismax || evt->mult <= (1U << evt->shift)))
1620 + clc += rnd;
1621
1622 do_div(clc, evt->mult);
1623 - if (clc < 1000)
1624 - clc = 1000;
1625 - if (clc > KTIME_MAX)
1626 - clc = KTIME_MAX;
1627
1628 - return clc;
1629 + /* Deltas less than 1usec are pointless noise */
1630 + return clc > 1000 ? clc : 1000;
1631 +}
1632 +
1633 +/**
1634 + * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
1635 + * @latch: value to convert
1636 + * @evt: pointer to clock event device descriptor
1637 + *
1638 + * Math helper, returns latch value converted to nanoseconds (bound checked)
1639 + */
1640 +u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
1641 +{
1642 + return cev_delta2ns(latch, evt, false);
1643 }
1644 EXPORT_SYMBOL_GPL(clockevent_delta2ns);
1645
1646 @@ -317,8 +352,8 @@ void clockevents_config(struct clock_event_device *dev, u32 freq)
1647 sec = 600;
1648
1649 clockevents_calc_mult_shift(dev, freq, sec);
1650 - dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev);
1651 - dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev);
1652 + dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
1653 + dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
1654 }
1655
1656 /**
1657 diff --git a/lib/scatterlist.c b/lib/scatterlist.c
1658 index a1cf8cae60e7..3e7df38067ae 100644
1659 --- a/lib/scatterlist.c
1660 +++ b/lib/scatterlist.c
1661 @@ -529,7 +529,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
1662 miter->__offset += miter->consumed;
1663 miter->__remaining -= miter->consumed;
1664
1665 - if (miter->__flags & SG_MITER_TO_SG)
1666 + if ((miter->__flags & SG_MITER_TO_SG) &&
1667 + !PageSlab(miter->page))
1668 flush_kernel_dcache_page(miter->page);
1669
1670 if (miter->__flags & SG_MITER_ATOMIC) {
1671 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
1672 index 0164b09c1e99..c403a74e4bee 100644
1673 --- a/mm/huge_memory.c
1674 +++ b/mm/huge_memory.c
1675 @@ -1288,64 +1288,90 @@ out:
1676 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1677 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
1678 {
1679 + struct anon_vma *anon_vma = NULL;
1680 struct page *page;
1681 unsigned long haddr = addr & HPAGE_PMD_MASK;
1682 + int page_nid = -1, this_nid = numa_node_id();
1683 int target_nid;
1684 - int current_nid = -1;
1685 - bool migrated;
1686 + bool page_locked;
1687 + bool migrated = false;
1688
1689 spin_lock(&mm->page_table_lock);
1690 if (unlikely(!pmd_same(pmd, *pmdp)))
1691 goto out_unlock;
1692
1693 page = pmd_page(pmd);
1694 - get_page(page);
1695 - current_nid = page_to_nid(page);
1696 + page_nid = page_to_nid(page);
1697 count_vm_numa_event(NUMA_HINT_FAULTS);
1698 - if (current_nid == numa_node_id())
1699 + if (page_nid == this_nid)
1700 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
1701
1702 + /*
1703 + * Acquire the page lock to serialise THP migrations but avoid dropping
1704 + * page_table_lock if at all possible
1705 + */
1706 + page_locked = trylock_page(page);
1707 target_nid = mpol_misplaced(page, vma, haddr);
1708 if (target_nid == -1) {
1709 - put_page(page);
1710 - goto clear_pmdnuma;
1711 + /* If the page was locked, there are no parallel migrations */
1712 + if (page_locked)
1713 + goto clear_pmdnuma;
1714 +
1715 + /*
1716 + * Otherwise wait for potential migrations and retry. We do
1717 + * relock and check_same as the page may no longer be mapped.
1718 + * As the fault is being retried, do not account for it.
1719 + */
1720 + spin_unlock(&mm->page_table_lock);
1721 + wait_on_page_locked(page);
1722 + page_nid = -1;
1723 + goto out;
1724 }
1725
1726 - /* Acquire the page lock to serialise THP migrations */
1727 + /* Page is misplaced, serialise migrations and parallel THP splits */
1728 + get_page(page);
1729 spin_unlock(&mm->page_table_lock);
1730 - lock_page(page);
1731 + if (!page_locked)
1732 + lock_page(page);
1733 + anon_vma = page_lock_anon_vma_read(page);
1734
1735 /* Confirm the PTE did not while locked */
1736 spin_lock(&mm->page_table_lock);
1737 if (unlikely(!pmd_same(pmd, *pmdp))) {
1738 unlock_page(page);
1739 put_page(page);
1740 + page_nid = -1;
1741 goto out_unlock;
1742 }
1743 - spin_unlock(&mm->page_table_lock);
1744
1745 - /* Migrate the THP to the requested node */
1746 + /*
1747 + * Migrate the THP to the requested node, returns with page unlocked
1748 + * and pmd_numa cleared.
1749 + */
1750 + spin_unlock(&mm->page_table_lock);
1751 migrated = migrate_misplaced_transhuge_page(mm, vma,
1752 pmdp, pmd, addr, page, target_nid);
1753 - if (!migrated)
1754 - goto check_same;
1755 -
1756 - task_numa_fault(target_nid, HPAGE_PMD_NR, true);
1757 - return 0;
1758 + if (migrated)
1759 + page_nid = target_nid;
1760
1761 -check_same:
1762 - spin_lock(&mm->page_table_lock);
1763 - if (unlikely(!pmd_same(pmd, *pmdp)))
1764 - goto out_unlock;
1765 + goto out;
1766 clear_pmdnuma:
1767 + BUG_ON(!PageLocked(page));
1768 pmd = pmd_mknonnuma(pmd);
1769 set_pmd_at(mm, haddr, pmdp, pmd);
1770 VM_BUG_ON(pmd_numa(*pmdp));
1771 update_mmu_cache_pmd(vma, addr, pmdp);
1772 + unlock_page(page);
1773 out_unlock:
1774 spin_unlock(&mm->page_table_lock);
1775 - if (current_nid != -1)
1776 - task_numa_fault(current_nid, HPAGE_PMD_NR, false);
1777 +
1778 +out:
1779 + if (anon_vma)
1780 + page_unlock_anon_vma_read(anon_vma);
1781 +
1782 + if (page_nid != -1)
1783 + task_numa_fault(page_nid, HPAGE_PMD_NR, migrated);
1784 +
1785 return 0;
1786 }
1787
1788 diff --git a/mm/memory.c b/mm/memory.c
1789 index 5a35443c01ad..4b60011907d7 100644
1790 --- a/mm/memory.c
1791 +++ b/mm/memory.c
1792 @@ -3525,12 +3525,12 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1793 }
1794
1795 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
1796 - unsigned long addr, int current_nid)
1797 + unsigned long addr, int page_nid)
1798 {
1799 get_page(page);
1800
1801 count_vm_numa_event(NUMA_HINT_FAULTS);
1802 - if (current_nid == numa_node_id())
1803 + if (page_nid == numa_node_id())
1804 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
1805
1806 return mpol_misplaced(page, vma, addr);
1807 @@ -3541,7 +3541,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1808 {
1809 struct page *page = NULL;
1810 spinlock_t *ptl;
1811 - int current_nid = -1;
1812 + int page_nid = -1;
1813 int target_nid;
1814 bool migrated = false;
1815
1816 @@ -3571,15 +3571,10 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1817 return 0;
1818 }
1819
1820 - current_nid = page_to_nid(page);
1821 - target_nid = numa_migrate_prep(page, vma, addr, current_nid);
1822 + page_nid = page_to_nid(page);
1823 + target_nid = numa_migrate_prep(page, vma, addr, page_nid);
1824 pte_unmap_unlock(ptep, ptl);
1825 if (target_nid == -1) {
1826 - /*
1827 - * Account for the fault against the current node if it not
1828 - * being replaced regardless of where the page is located.
1829 - */
1830 - current_nid = numa_node_id();
1831 put_page(page);
1832 goto out;
1833 }
1834 @@ -3587,11 +3582,11 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1835 /* Migrate to the requested node */
1836 migrated = migrate_misplaced_page(page, target_nid);
1837 if (migrated)
1838 - current_nid = target_nid;
1839 + page_nid = target_nid;
1840
1841 out:
1842 - if (current_nid != -1)
1843 - task_numa_fault(current_nid, 1, migrated);
1844 + if (page_nid != -1)
1845 + task_numa_fault(page_nid, 1, migrated);
1846 return 0;
1847 }
1848
1849 @@ -3606,7 +3601,6 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1850 unsigned long offset;
1851 spinlock_t *ptl;
1852 bool numa = false;
1853 - int local_nid = numa_node_id();
1854
1855 spin_lock(&mm->page_table_lock);
1856 pmd = *pmdp;
1857 @@ -3629,9 +3623,10 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1858 for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
1859 pte_t pteval = *pte;
1860 struct page *page;
1861 - int curr_nid = local_nid;
1862 + int page_nid = -1;
1863 int target_nid;
1864 - bool migrated;
1865 + bool migrated = false;
1866 +
1867 if (!pte_present(pteval))
1868 continue;
1869 if (!pte_numa(pteval))
1870 @@ -3653,25 +3648,19 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1871 if (unlikely(page_mapcount(page) != 1))
1872 continue;
1873
1874 - /*
1875 - * Note that the NUMA fault is later accounted to either
1876 - * the node that is currently running or where the page is
1877 - * migrated to.
1878 - */
1879 - curr_nid = local_nid;
1880 - target_nid = numa_migrate_prep(page, vma, addr,
1881 - page_to_nid(page));
1882 - if (target_nid == -1) {
1883 + page_nid = page_to_nid(page);
1884 + target_nid = numa_migrate_prep(page, vma, addr, page_nid);
1885 + pte_unmap_unlock(pte, ptl);
1886 + if (target_nid != -1) {
1887 + migrated = migrate_misplaced_page(page, target_nid);
1888 + if (migrated)
1889 + page_nid = target_nid;
1890 + } else {
1891 put_page(page);
1892 - continue;
1893 }
1894
1895 - /* Migrate to the requested node */
1896 - pte_unmap_unlock(pte, ptl);
1897 - migrated = migrate_misplaced_page(page, target_nid);
1898 - if (migrated)
1899 - curr_nid = target_nid;
1900 - task_numa_fault(curr_nid, 1, migrated);
1901 + if (page_nid != -1)
1902 + task_numa_fault(page_nid, 1, migrated);
1903
1904 pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
1905 }
1906 @@ -4074,6 +4063,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1907
1908 return len;
1909 }
1910 +EXPORT_SYMBOL_GPL(generic_access_phys);
1911 #endif
1912
1913 /*
1914 diff --git a/mm/migrate.c b/mm/migrate.c
1915 index 25ca7caf9092..bf436c15f055 100644
1916 --- a/mm/migrate.c
1917 +++ b/mm/migrate.c
1918 @@ -1710,12 +1710,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1919 unlock_page(new_page);
1920 put_page(new_page); /* Free it */
1921
1922 - unlock_page(page);
1923 + /* Retake the callers reference and putback on LRU */
1924 + get_page(page);
1925 putback_lru_page(page);
1926 -
1927 - count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
1928 - isolated = 0;
1929 - goto out;
1930 + mod_zone_page_state(page_zone(page),
1931 + NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
1932 + goto out_fail;
1933 }
1934
1935 /*
1936 @@ -1732,9 +1732,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1937 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1938 entry = pmd_mkhuge(entry);
1939
1940 - page_add_new_anon_rmap(new_page, vma, haddr);
1941 -
1942 + pmdp_clear_flush(vma, haddr, pmd);
1943 set_pmd_at(mm, haddr, pmd, entry);
1944 + page_add_new_anon_rmap(new_page, vma, haddr);
1945 update_mmu_cache_pmd(vma, address, &entry);
1946 page_remove_rmap(page);
1947 /*
1948 @@ -1753,7 +1753,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1949 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
1950 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
1951
1952 -out:
1953 mod_zone_page_state(page_zone(page),
1954 NR_ISOLATED_ANON + page_lru,
1955 -HPAGE_PMD_NR);
1956 @@ -1762,6 +1761,10 @@ out:
1957 out_fail:
1958 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
1959 out_dropref:
1960 + entry = pmd_mknonnuma(entry);
1961 + set_pmd_at(mm, haddr, pmd, entry);
1962 + update_mmu_cache_pmd(vma, address, &entry);
1963 +
1964 unlock_page(page);
1965 put_page(page);
1966 return 0;
1967 diff --git a/mm/mprotect.c b/mm/mprotect.c
1968 index 94722a4d6b43..2bbb648ea73f 100644
1969 --- a/mm/mprotect.c
1970 +++ b/mm/mprotect.c
1971 @@ -145,7 +145,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
1972 split_huge_page_pmd(vma, addr, pmd);
1973 else if (change_huge_pmd(vma, pmd, addr, newprot,
1974 prot_numa)) {
1975 - pages += HPAGE_PMD_NR;
1976 + pages++;
1977 continue;
1978 }
1979 /* fall through */
1980 diff --git a/mm/pagewalk.c b/mm/pagewalk.c
1981 index 5da2cbcfdbb5..2beeabf502c5 100644
1982 --- a/mm/pagewalk.c
1983 +++ b/mm/pagewalk.c
1984 @@ -242,7 +242,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
1985 if (err)
1986 break;
1987 pgd++;
1988 - } while (addr = next, addr != end);
1989 + } while (addr = next, addr < end);
1990
1991 return err;
1992 }
1993 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
1994 index d365724feb05..d4565606cc96 100644
1995 --- a/mm/vmalloc.c
1996 +++ b/mm/vmalloc.c
1997 @@ -388,12 +388,12 @@ nocache:
1998 addr = ALIGN(first->va_end, align);
1999 if (addr < vstart)
2000 goto nocache;
2001 - if (addr + size - 1 < addr)
2002 + if (addr + size < addr)
2003 goto overflow;
2004
2005 } else {
2006 addr = ALIGN(vstart, align);
2007 - if (addr + size - 1 < addr)
2008 + if (addr + size < addr)
2009 goto overflow;
2010
2011 n = vmap_area_root.rb_node;
2012 @@ -420,7 +420,7 @@ nocache:
2013 if (addr + cached_hole_size < first->va_start)
2014 cached_hole_size = first->va_start - addr;
2015 addr = ALIGN(first->va_end, align);
2016 - if (addr + size - 1 < addr)
2017 + if (addr + size < addr)
2018 goto overflow;
2019
2020 if (list_is_last(&first->list, &vmap_area_list))
2021 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
2022 index ae36f8e11ae4..5ab17b82605d 100644
2023 --- a/net/mac80211/cfg.c
2024 +++ b/net/mac80211/cfg.c
2025 @@ -3315,7 +3315,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
2026 return -EINVAL;
2027 }
2028 band = chanctx_conf->def.chan->band;
2029 - sta = sta_info_get(sdata, peer);
2030 + sta = sta_info_get_bss(sdata, peer);
2031 if (sta) {
2032 qos = test_sta_flag(sta, WLAN_STA_WME);
2033 } else {
2034 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
2035 index 9ca8e3278cc0..92ef04c72c51 100644
2036 --- a/net/mac80211/ieee80211_i.h
2037 +++ b/net/mac80211/ieee80211_i.h
2038 @@ -842,6 +842,8 @@ struct tpt_led_trigger {
2039 * that the scan completed.
2040 * @SCAN_ABORTED: Set for our scan work function when the driver reported
2041 * a scan complete for an aborted scan.
2042 + * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
2043 + * cancelled.
2044 */
2045 enum {
2046 SCAN_SW_SCANNING,
2047 @@ -849,6 +851,7 @@ enum {
2048 SCAN_ONCHANNEL_SCANNING,
2049 SCAN_COMPLETED,
2050 SCAN_ABORTED,
2051 + SCAN_HW_CANCELLED,
2052 };
2053
2054 /**
2055 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
2056 index 83f6d29202aa..ec09bcba9bae 100644
2057 --- a/net/mac80211/rx.c
2058 +++ b/net/mac80211/rx.c
2059 @@ -3002,6 +3002,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
2060 case NL80211_IFTYPE_ADHOC:
2061 if (!bssid)
2062 return 0;
2063 + if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
2064 + ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
2065 + return 0;
2066 if (ieee80211_is_beacon(hdr->frame_control)) {
2067 return 1;
2068 } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
2069 diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
2070 index 99b103921a4b..eb03337b6545 100644
2071 --- a/net/mac80211/scan.c
2072 +++ b/net/mac80211/scan.c
2073 @@ -202,6 +202,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
2074 enum ieee80211_band band;
2075 int i, ielen, n_chans;
2076
2077 + if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
2078 + return false;
2079 +
2080 do {
2081 if (local->hw_scan_band == IEEE80211_NUM_BANDS)
2082 return false;
2083 @@ -878,7 +881,23 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
2084 if (!local->scan_req)
2085 goto out;
2086
2087 + /*
2088 + * We have a scan running and the driver already reported completion,
2089 + * but the worker hasn't run yet or is stuck on the mutex - mark it as
2090 + * cancelled.
2091 + */
2092 + if (test_bit(SCAN_HW_SCANNING, &local->scanning) &&
2093 + test_bit(SCAN_COMPLETED, &local->scanning)) {
2094 + set_bit(SCAN_HW_CANCELLED, &local->scanning);
2095 + goto out;
2096 + }
2097 +
2098 if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
2099 + /*
2100 + * Make sure that __ieee80211_scan_completed doesn't trigger a
2101 + * scan on another band.
2102 + */
2103 + set_bit(SCAN_HW_CANCELLED, &local->scanning);
2104 if (local->ops->cancel_hw_scan)
2105 drv_cancel_hw_scan(local,
2106 rcu_dereference_protected(local->scan_sdata,
2107 diff --git a/net/mac80211/status.c b/net/mac80211/status.c
2108 index 43439203f4e4..9e78206bd9bb 100644
2109 --- a/net/mac80211/status.c
2110 +++ b/net/mac80211/status.c
2111 @@ -180,6 +180,9 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
2112 struct ieee80211_local *local = sta->local;
2113 struct ieee80211_sub_if_data *sdata = sta->sdata;
2114
2115 + if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
2116 + sta->last_rx = jiffies;
2117 +
2118 if (ieee80211_is_data_qos(mgmt->frame_control)) {
2119 struct ieee80211_hdr *hdr = (void *) skb->data;
2120 u8 *qc = ieee80211_get_qos_ctl(hdr);
2121 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
2122 index 9972e07a2f96..e9d18c30071f 100644
2123 --- a/net/mac80211/tx.c
2124 +++ b/net/mac80211/tx.c
2125 @@ -1100,7 +1100,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
2126 tx->sta = rcu_dereference(sdata->u.vlan.sta);
2127 if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
2128 return TX_DROP;
2129 - } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
2130 + } else if (info->flags & (IEEE80211_TX_CTL_INJECTED |
2131 + IEEE80211_TX_INTFL_NL80211_FRAME_TX) ||
2132 tx->sdata->control_port_protocol == tx->skb->protocol) {
2133 tx->sta = sta_info_get_bss(sdata, hdr->addr1);
2134 }
2135 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
2136 index 72e6292955bb..5db8eb5d56cf 100644
2137 --- a/net/mac80211/util.c
2138 +++ b/net/mac80211/util.c
2139 @@ -2174,6 +2174,10 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
2140 }
2141
2142 rate = cfg80211_calculate_bitrate(&ri);
2143 + if (WARN_ONCE(!rate,
2144 + "Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n",
2145 + status->flag, status->rate_idx, status->vht_nss))
2146 + return 0;
2147
2148 /* rewind from end of MPDU */
2149 if (status->flag & RX_FLAG_MACTIME_END)
2150 diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
2151 index d80e47194d49..e62c1ad4e4c9 100644
2152 --- a/net/wireless/ibss.c
2153 +++ b/net/wireless/ibss.c
2154 @@ -269,6 +269,8 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
2155 if (chan->flags & IEEE80211_CHAN_DISABLED)
2156 continue;
2157 wdev->wext.ibss.chandef.chan = chan;
2158 + wdev->wext.ibss.chandef.center_freq1 =
2159 + chan->center_freq;
2160 break;
2161 }
2162
2163 @@ -353,6 +355,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
2164 if (chan) {
2165 wdev->wext.ibss.chandef.chan = chan;
2166 wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
2167 + wdev->wext.ibss.chandef.center_freq1 = freq;
2168 wdev->wext.ibss.channel_fixed = true;
2169 } else {
2170 /* cfg80211_ibss_wext_join will pick one if needed */
2171 diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
2172 index 487ac6f37ca2..9a11f9f799f4 100644
2173 --- a/scripts/kallsyms.c
2174 +++ b/scripts/kallsyms.c
2175 @@ -55,6 +55,7 @@ static struct sym_entry *table;
2176 static unsigned int table_size, table_cnt;
2177 static int all_symbols = 0;
2178 static char symbol_prefix_char = '\0';
2179 +static unsigned long long kernel_start_addr = 0;
2180
2181 int token_profit[0x10000];
2182
2183 @@ -65,7 +66,10 @@ unsigned char best_table_len[256];
2184
2185 static void usage(void)
2186 {
2187 - fprintf(stderr, "Usage: kallsyms [--all-symbols] [--symbol-prefix=<prefix char>] < in.map > out.S\n");
2188 + fprintf(stderr, "Usage: kallsyms [--all-symbols] "
2189 + "[--symbol-prefix=<prefix char>] "
2190 + "[--page-offset=<CONFIG_PAGE_OFFSET>] "
2191 + "< in.map > out.S\n");
2192 exit(1);
2193 }
2194
2195 @@ -194,6 +198,9 @@ static int symbol_valid(struct sym_entry *s)
2196 int i;
2197 int offset = 1;
2198
2199 + if (s->addr < kernel_start_addr)
2200 + return 0;
2201 +
2202 /* skip prefix char */
2203 if (symbol_prefix_char && *(s->sym + 1) == symbol_prefix_char)
2204 offset++;
2205 @@ -646,6 +653,9 @@ int main(int argc, char **argv)
2206 if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\''))
2207 p++;
2208 symbol_prefix_char = *p;
2209 + } else if (strncmp(argv[i], "--page-offset=", 14) == 0) {
2210 + const char *p = &argv[i][14];
2211 + kernel_start_addr = strtoull(p, NULL, 16);
2212 } else
2213 usage();
2214 }
2215 diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
2216 index 014994936b1c..32b10f53d0b4 100644
2217 --- a/scripts/link-vmlinux.sh
2218 +++ b/scripts/link-vmlinux.sh
2219 @@ -82,6 +82,8 @@ kallsyms()
2220 kallsymopt="${kallsymopt} --all-symbols"
2221 fi
2222
2223 + kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
2224 +
2225 local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
2226 ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
2227
2228 diff --git a/sound/core/pcm.c b/sound/core/pcm.c
2229 index 17f45e8aa89c..e1e9e0c999fe 100644
2230 --- a/sound/core/pcm.c
2231 +++ b/sound/core/pcm.c
2232 @@ -49,6 +49,8 @@ static struct snd_pcm *snd_pcm_get(struct snd_card *card, int device)
2233 struct snd_pcm *pcm;
2234
2235 list_for_each_entry(pcm, &snd_pcm_devices, list) {
2236 + if (pcm->internal)
2237 + continue;
2238 if (pcm->card == card && pcm->device == device)
2239 return pcm;
2240 }
2241 @@ -60,6 +62,8 @@ static int snd_pcm_next(struct snd_card *card, int device)
2242 struct snd_pcm *pcm;
2243
2244 list_for_each_entry(pcm, &snd_pcm_devices, list) {
2245 + if (pcm->internal)
2246 + continue;
2247 if (pcm->card == card && pcm->device > device)
2248 return pcm->device;
2249 else if (pcm->card->number > card->number)
2250 diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
2251 index 55108b5fb291..31461ba32d3c 100644
2252 --- a/sound/pci/hda/hda_codec.c
2253 +++ b/sound/pci/hda/hda_codec.c
2254 @@ -4789,8 +4789,8 @@ static void hda_power_work(struct work_struct *work)
2255 spin_unlock(&codec->power_lock);
2256
2257 state = hda_call_codec_suspend(codec, true);
2258 - codec->pm_down_notified = 0;
2259 - if (!bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK)) {
2260 + if (!codec->pm_down_notified &&
2261 + !bus->power_keep_link_on && (state & AC_PWRST_CLK_STOP_OK)) {
2262 codec->pm_down_notified = 1;
2263 hda_call_pm_notify(bus, false);
2264 }
2265 diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
2266 index 2519f9d03c0f..d0cc796f778a 100644
2267 --- a/sound/pci/hda/hda_generic.c
2268 +++ b/sound/pci/hda/hda_generic.c
2269 @@ -4383,9 +4383,11 @@ int snd_hda_gen_build_controls(struct hda_codec *codec)
2270 true, &spec->vmaster_mute.sw_kctl);
2271 if (err < 0)
2272 return err;
2273 - if (spec->vmaster_mute.hook)
2274 + if (spec->vmaster_mute.hook) {
2275 snd_hda_add_vmaster_hook(codec, &spec->vmaster_mute,
2276 spec->vmaster_mute_enum);
2277 + snd_hda_sync_vmaster_hook(&spec->vmaster_mute);
2278 + }
2279 }
2280
2281 free_kctls(spec); /* no longer needed */
2282 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2283 index 21b6649c128e..4496e0ab693d 100644
2284 --- a/sound/pci/hda/patch_realtek.c
2285 +++ b/sound/pci/hda/patch_realtek.c
2286 @@ -4253,6 +4253,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
2287 SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
2288 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
2289 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
2290 + SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_ASUS_MODE4),
2291 SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
2292 SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
2293 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
2294 diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
2295 index f5d81b948759..7a0466eb7ede 100644
2296 --- a/sound/soc/codecs/wm_hubs.c
2297 +++ b/sound/soc/codecs/wm_hubs.c
2298 @@ -530,6 +530,7 @@ static int hp_supply_event(struct snd_soc_dapm_widget *w,
2299 hubs->hp_startup_mode);
2300 break;
2301 }
2302 + break;
2303
2304 case SND_SOC_DAPM_PRE_PMD:
2305 snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
2306 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
2307 index 360638362e98..c2ecb4e01597 100644
2308 --- a/sound/soc/soc-dapm.c
2309 +++ b/sound/soc/soc-dapm.c
2310 @@ -1797,7 +1797,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
2311 w->active ? "active" : "inactive");
2312
2313 list_for_each_entry(p, &w->sources, list_sink) {
2314 - if (p->connected && !p->connected(w, p->sink))
2315 + if (p->connected && !p->connected(w, p->source))
2316 continue;
2317
2318 if (p->connect)