Magellan Linux

Contents of /trunk/kernel-magellan/patches-3.6/0110-3.6.11-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2015 - (show annotations) (download)
Tue Jan 8 09:12:29 2013 UTC (11 years, 4 months ago) by niro
File size: 69661 byte(s)
-linux 3.6.11
1 diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h
2 index 9176261..a2fe893 100644
3 --- a/arch/arm/include/asm/hwcap.h
4 +++ b/arch/arm/include/asm/hwcap.h
5 @@ -18,11 +18,12 @@
6 #define HWCAP_THUMBEE (1 << 11)
7 #define HWCAP_NEON (1 << 12)
8 #define HWCAP_VFPv3 (1 << 13)
9 -#define HWCAP_VFPv3D16 (1 << 14)
10 +#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
11 #define HWCAP_TLS (1 << 15)
12 #define HWCAP_VFPv4 (1 << 16)
13 #define HWCAP_IDIVA (1 << 17)
14 #define HWCAP_IDIVT (1 << 18)
15 +#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
16 #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
17
18 #if defined(__KERNEL__)
19 diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
20 index bf53047..c49c8f7 100644
21 --- a/arch/arm/include/asm/vfpmacros.h
22 +++ b/arch/arm/include/asm/vfpmacros.h
23 @@ -27,9 +27,9 @@
24 #if __LINUX_ARM_ARCH__ <= 6
25 ldr \tmp, =elf_hwcap @ may not have MVFR regs
26 ldr \tmp, [\tmp, #0]
27 - tst \tmp, #HWCAP_VFPv3D16
28 - ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
29 - addne \base, \base, #32*4 @ step over unused register space
30 + tst \tmp, #HWCAP_VFPD32
31 + ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
32 + addeq \base, \base, #32*4 @ step over unused register space
33 #else
34 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
35 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
36 @@ -51,9 +51,9 @@
37 #if __LINUX_ARM_ARCH__ <= 6
38 ldr \tmp, =elf_hwcap @ may not have MVFR regs
39 ldr \tmp, [\tmp, #0]
40 - tst \tmp, #HWCAP_VFPv3D16
41 - stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
42 - addne \base, \base, #32*4 @ step over unused register space
43 + tst \tmp, #HWCAP_VFPD32
44 + stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
45 + addeq \base, \base, #32*4 @ step over unused register space
46 #else
47 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
48 and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
49 diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
50 index c834b32..3b44e0d 100644
51 --- a/arch/arm/vfp/vfpmodule.c
52 +++ b/arch/arm/vfp/vfpmodule.c
53 @@ -701,11 +701,14 @@ static int __init vfp_init(void)
54 elf_hwcap |= HWCAP_VFPv3;
55
56 /*
57 - * Check for VFPv3 D16. CPUs in this configuration
58 - * only have 16 x 64bit registers.
59 + * Check for VFPv3 D16 and VFPv4 D16. CPUs in
60 + * this configuration only have 16 x 64bit
61 + * registers.
62 */
63 if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
64 - elf_hwcap |= HWCAP_VFPv3D16;
65 + elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
66 + else
67 + elf_hwcap |= HWCAP_VFPD32;
68 }
69 #endif
70 /*
71 diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
72 index 041e28d..1f12c24 100644
73 --- a/arch/powerpc/platforms/pseries/eeh_driver.c
74 +++ b/arch/powerpc/platforms/pseries/eeh_driver.c
75 @@ -164,17 +164,18 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata)
76 enum pci_ers_result rc, *res = userdata;
77 struct pci_driver *driver;
78
79 + device_lock(&dev->dev);
80 dev->error_state = pci_channel_io_frozen;
81
82 driver = eeh_pcid_get(dev);
83 - if (!driver) return 0;
84 + if (!driver) goto out;
85
86 eeh_disable_irq(dev);
87
88 if (!driver->err_handler ||
89 !driver->err_handler->error_detected) {
90 eeh_pcid_put(dev);
91 - return 0;
92 + goto out;
93 }
94
95 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
96 @@ -184,6 +185,8 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata)
97 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
98
99 eeh_pcid_put(dev);
100 +out:
101 + device_unlock(&dev->dev);
102 return 0;
103 }
104
105 @@ -201,13 +204,14 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
106 enum pci_ers_result rc, *res = userdata;
107 struct pci_driver *driver;
108
109 + device_lock(&dev->dev);
110 driver = eeh_pcid_get(dev);
111 - if (!driver) return 0;
112 + if (!driver) goto out;
113
114 if (!driver->err_handler ||
115 !driver->err_handler->mmio_enabled) {
116 eeh_pcid_put(dev);
117 - return 0;
118 + goto out;
119 }
120
121 rc = driver->err_handler->mmio_enabled(dev);
122 @@ -217,6 +221,8 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
123 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
124
125 eeh_pcid_put(dev);
126 +out:
127 + device_unlock(&dev->dev);
128 return 0;
129 }
130
131 @@ -235,17 +241,18 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata)
132 enum pci_ers_result rc, *res = userdata;
133 struct pci_driver *driver;
134
135 + device_lock(&dev->dev);
136 dev->error_state = pci_channel_io_normal;
137
138 driver = eeh_pcid_get(dev);
139 - if (!driver) return 0;
140 + if (!driver) goto out;
141
142 eeh_enable_irq(dev);
143
144 if (!driver->err_handler ||
145 !driver->err_handler->slot_reset) {
146 eeh_pcid_put(dev);
147 - return 0;
148 + goto out;
149 }
150
151 rc = driver->err_handler->slot_reset(dev);
152 @@ -255,6 +262,8 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata)
153 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
154
155 eeh_pcid_put(dev);
156 +out:
157 + device_unlock(&dev->dev);
158 return 0;
159 }
160
161 @@ -271,22 +280,25 @@ static int eeh_report_resume(struct pci_dev *dev, void *userdata)
162 {
163 struct pci_driver *driver;
164
165 + device_lock(&dev->dev);
166 dev->error_state = pci_channel_io_normal;
167
168 driver = eeh_pcid_get(dev);
169 - if (!driver) return 0;
170 + if (!driver) goto out;
171
172 eeh_enable_irq(dev);
173
174 if (!driver->err_handler ||
175 !driver->err_handler->resume) {
176 eeh_pcid_put(dev);
177 - return 0;
178 + goto out;
179 }
180
181 driver->err_handler->resume(dev);
182
183 eeh_pcid_put(dev);
184 +out:
185 + device_unlock(&dev->dev);
186 return 0;
187 }
188
189 @@ -302,22 +314,25 @@ static int eeh_report_failure(struct pci_dev *dev, void *userdata)
190 {
191 struct pci_driver *driver;
192
193 + device_lock(&dev->dev);
194 dev->error_state = pci_channel_io_perm_failure;
195
196 driver = eeh_pcid_get(dev);
197 - if (!driver) return 0;
198 + if (!driver) goto out;
199
200 eeh_disable_irq(dev);
201
202 if (!driver->err_handler ||
203 !driver->err_handler->error_detected) {
204 eeh_pcid_put(dev);
205 - return 0;
206 + goto out;
207 }
208
209 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
210
211 eeh_pcid_put(dev);
212 +out:
213 + device_unlock(&dev->dev);
214 return 0;
215 }
216
217 diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
218 index 1460a5d..e28670f 100644
219 --- a/arch/x86/kernel/hpet.c
220 +++ b/arch/x86/kernel/hpet.c
221 @@ -434,7 +434,7 @@ void hpet_msi_unmask(struct irq_data *data)
222
223 /* unmask it */
224 cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
225 - cfg |= HPET_TN_FSB;
226 + cfg |= HPET_TN_ENABLE | HPET_TN_FSB;
227 hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
228 }
229
230 @@ -445,7 +445,7 @@ void hpet_msi_mask(struct irq_data *data)
231
232 /* mask it */
233 cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
234 - cfg &= ~HPET_TN_FSB;
235 + cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB);
236 hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
237 }
238
239 diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
240 index 45e3e17..7efaeaa 100644
241 --- a/drivers/acpi/battery.c
242 +++ b/drivers/acpi/battery.c
243 @@ -34,6 +34,7 @@
244 #include <linux/dmi.h>
245 #include <linux/slab.h>
246 #include <linux/suspend.h>
247 +#include <asm/unaligned.h>
248
249 #ifdef CONFIG_ACPI_PROCFS_POWER
250 #include <linux/proc_fs.h>
251 @@ -95,6 +96,18 @@ enum {
252 ACPI_BATTERY_ALARM_PRESENT,
253 ACPI_BATTERY_XINFO_PRESENT,
254 ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY,
255 + /* On Lenovo Thinkpad models from 2010 and 2011, the power unit
256 + switches between mWh and mAh depending on whether the system
257 + is running on battery or not. When mAh is the unit, most
258 + reported values are incorrect and need to be adjusted by
259 + 10000/design_voltage. Verified on x201, t410, t410s, and x220.
260 + Pre-2010 and 2012 models appear to always report in mWh and
261 + are thus unaffected (tested with t42, t61, t500, x200, x300,
262 + and x230). Also, in mid-2012 Lenovo issued a BIOS update for
263 + the 2011 models that fixes the issue (tested on x220 with a
264 + post-1.29 BIOS), but as of Nov. 2012, no such update is
265 + available for the 2010 models. */
266 + ACPI_BATTERY_QUIRK_THINKPAD_MAH,
267 };
268
269 struct acpi_battery {
270 @@ -438,6 +451,21 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
271 kfree(buffer.pointer);
272 if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
273 battery->full_charge_capacity = battery->design_capacity;
274 + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) &&
275 + battery->power_unit && battery->design_voltage) {
276 + battery->design_capacity = battery->design_capacity *
277 + 10000 / battery->design_voltage;
278 + battery->full_charge_capacity = battery->full_charge_capacity *
279 + 10000 / battery->design_voltage;
280 + battery->design_capacity_warning =
281 + battery->design_capacity_warning *
282 + 10000 / battery->design_voltage;
283 + /* Curiously, design_capacity_low, unlike the rest of them,
284 + is correct. */
285 + /* capacity_granularity_* equal 1 on the systems tested, so
286 + it's impossible to tell if they would need an adjustment
287 + or not if their values were higher. */
288 + }
289 return result;
290 }
291
292 @@ -486,6 +514,11 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
293 && battery->capacity_now >= 0 && battery->capacity_now <= 100)
294 battery->capacity_now = (battery->capacity_now *
295 battery->full_charge_capacity) / 100;
296 + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) &&
297 + battery->power_unit && battery->design_voltage) {
298 + battery->capacity_now = battery->capacity_now *
299 + 10000 / battery->design_voltage;
300 + }
301 return result;
302 }
303
304 @@ -595,6 +628,24 @@ static void sysfs_remove_battery(struct acpi_battery *battery)
305 mutex_unlock(&battery->sysfs_lock);
306 }
307
308 +static void find_battery(const struct dmi_header *dm, void *private)
309 +{
310 + struct acpi_battery *battery = (struct acpi_battery *)private;
311 + /* Note: the hardcoded offsets below have been extracted from
312 + the source code of dmidecode. */
313 + if (dm->type == DMI_ENTRY_PORTABLE_BATTERY && dm->length >= 8) {
314 + const u8 *dmi_data = (const u8 *)(dm + 1);
315 + int dmi_capacity = get_unaligned((const u16 *)(dmi_data + 6));
316 + if (dm->length >= 18)
317 + dmi_capacity *= dmi_data[17];
318 + if (battery->design_capacity * battery->design_voltage / 1000
319 + != dmi_capacity &&
320 + battery->design_capacity * 10 == dmi_capacity)
321 + set_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH,
322 + &battery->flags);
323 + }
324 +}
325 +
326 /*
327 * According to the ACPI spec, some kinds of primary batteries can
328 * report percentage battery remaining capacity directly to OS.
329 @@ -620,6 +671,32 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
330 battery->capacity_now = (battery->capacity_now *
331 battery->full_charge_capacity) / 100;
332 }
333 +
334 + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags))
335 + return ;
336 +
337 + if (battery->power_unit && dmi_name_in_vendors("LENOVO")) {
338 + const char *s;
339 + s = dmi_get_system_info(DMI_PRODUCT_VERSION);
340 + if (s && !strnicmp(s, "ThinkPad", 8)) {
341 + dmi_walk(find_battery, battery);
342 + if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH,
343 + &battery->flags) &&
344 + battery->design_voltage) {
345 + battery->design_capacity =
346 + battery->design_capacity *
347 + 10000 / battery->design_voltage;
348 + battery->full_charge_capacity =
349 + battery->full_charge_capacity *
350 + 10000 / battery->design_voltage;
351 + battery->design_capacity_warning =
352 + battery->design_capacity_warning *
353 + 10000 / battery->design_voltage;
354 + battery->capacity_now = battery->capacity_now *
355 + 10000 / battery->design_voltage;
356 + }
357 + }
358 + }
359 }
360
361 static int acpi_battery_update(struct acpi_battery *battery)
362 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
363 index fdcdbb6..847ed55 100644
364 --- a/drivers/acpi/sleep.c
365 +++ b/drivers/acpi/sleep.c
366 @@ -519,6 +519,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
367 },
368 {
369 .callback = init_nvs_nosave,
370 + .ident = "Sony Vaio VPCEB1S1E",
371 + .matches = {
372 + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
373 + DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"),
374 + },
375 + },
376 + {
377 + .callback = init_nvs_nosave,
378 .ident = "Sony Vaio VGN-FW520F",
379 .matches = {
380 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
381 diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
382 index 9fe2659..3837739 100644
383 --- a/drivers/acpi/video.c
384 +++ b/drivers/acpi/video.c
385 @@ -389,6 +389,12 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d)
386 return 0;
387 }
388
389 +static int video_ignore_initial_backlight(const struct dmi_system_id *d)
390 +{
391 + use_bios_initial_backlight = 0;
392 + return 0;
393 +}
394 +
395 static struct dmi_system_id video_dmi_table[] __initdata = {
396 /*
397 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
398 @@ -433,6 +439,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
399 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
400 },
401 },
402 + {
403 + .callback = video_ignore_initial_backlight,
404 + .ident = "HP Folio 13-2000",
405 + .matches = {
406 + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
407 + DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"),
408 + },
409 + },
410 {}
411 };
412
413 diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
414 index b728880..4ac2593 100644
415 --- a/drivers/acpi/video_detect.c
416 +++ b/drivers/acpi/video_detect.c
417 @@ -156,6 +156,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
418 DMI_MATCH(DMI_BOARD_NAME, "X360"),
419 },
420 },
421 + {
422 + .callback = video_detect_force_vendor,
423 + .ident = "Asus UL30VT",
424 + .matches = {
425 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
426 + DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"),
427 + },
428 + },
429 { },
430 };
431
432 diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
433 index fe0de7e..5f2595a 100644
434 --- a/drivers/block/floppy.c
435 +++ b/drivers/block/floppy.c
436 @@ -4329,6 +4329,7 @@ out_unreg_region:
437 out_unreg_blkdev:
438 unregister_blkdev(FLOPPY_MAJOR, "fd");
439 out_put_disk:
440 + destroy_workqueue(floppy_wq);
441 while (dr--) {
442 del_timer_sync(&motor_off_timer[dr]);
443 if (disks[dr]->queue) {
444 @@ -4341,7 +4342,6 @@ out_put_disk:
445 }
446 put_disk(disks[dr]);
447 }
448 - destroy_workqueue(floppy_wq);
449 return err;
450 }
451
452 @@ -4556,6 +4556,8 @@ static void __exit floppy_module_exit(void)
453 unregister_blkdev(FLOPPY_MAJOR, "fd");
454 platform_driver_unregister(&floppy_driver);
455
456 + destroy_workqueue(floppy_wq);
457 +
458 for (drive = 0; drive < N_DRIVE; drive++) {
459 del_timer_sync(&motor_off_timer[drive]);
460
461 @@ -4580,7 +4582,6 @@ static void __exit floppy_module_exit(void)
462
463 cancel_delayed_work_sync(&fd_timeout);
464 cancel_delayed_work_sync(&fd_timer);
465 - destroy_workqueue(floppy_wq);
466
467 if (atomic_read(&usage_count))
468 floppy_release_irq_and_dma();
469 diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
470 index d5dc9da..81eb9fd 100644
471 --- a/drivers/edac/edac_mc.c
472 +++ b/drivers/edac/edac_mc.c
473 @@ -416,10 +416,18 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
474 dimm->cschannel = chn;
475
476 /* Increment csrow location */
477 - row++;
478 - if (row == tot_csrows) {
479 - row = 0;
480 + if (layers[0].is_virt_csrow) {
481 chn++;
482 + if (chn == tot_channels) {
483 + chn = 0;
484 + row++;
485 + }
486 + } else {
487 + row++;
488 + if (row == tot_csrows) {
489 + row = 0;
490 + chn++;
491 + }
492 }
493
494 /* Increment dimm location */
495 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
496 index 069e26c..a980204 100644
497 --- a/drivers/edac/i82975x_edac.c
498 +++ b/drivers/edac/i82975x_edac.c
499 @@ -370,10 +370,6 @@ static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
500 static void i82975x_init_csrows(struct mem_ctl_info *mci,
501 struct pci_dev *pdev, void __iomem *mch_window)
502 {
503 - static const char *labels[4] = {
504 - "DIMM A1", "DIMM A2",
505 - "DIMM B1", "DIMM B2"
506 - };
507 struct csrow_info *csrow;
508 unsigned long last_cumul_size;
509 u8 value;
510 @@ -423,9 +419,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
511 dimm = mci->csrows[index]->channels[chan]->dimm;
512
513 dimm->nr_pages = nr_pages / csrow->nr_channels;
514 - strncpy(csrow->channels[chan]->dimm->label,
515 - labels[(index >> 1) + (chan * 2)],
516 - EDAC_MC_LABEL_LEN);
517 +
518 + snprintf(csrow->channels[chan]->dimm->label, EDAC_MC_LABEL_LEN, "DIMM %c%d",
519 + (chan == 0) ? 'A' : 'B',
520 + index);
521 dimm->grain = 1 << 7; /* 128Byte cache-line resolution */
522 dimm->dtype = i82975x_dram_type(mch_window, index);
523 dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
524 diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
525 index 8c60741..d0df62a 100644
526 --- a/drivers/gpu/drm/i915/intel_bios.c
527 +++ b/drivers/gpu/drm/i915/intel_bios.c
528 @@ -500,12 +500,8 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
529
530 edp = find_section(bdb, BDB_EDP);
531 if (!edp) {
532 - if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
533 - DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
534 - "supported, assume %dbpp panel color "
535 - "depth.\n",
536 - dev_priv->edp.bpp);
537 - }
538 + if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support)
539 + DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
540 return;
541 }
542
543 @@ -658,9 +654,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
544 dev_priv->lvds_use_ssc = 1;
545 dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
546 DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
547 -
548 - /* eDP data */
549 - dev_priv->edp.bpp = 18;
550 }
551
552 static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
553 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
554 index b634f6f..0777c79 100644
555 --- a/drivers/gpu/drm/i915/intel_display.c
556 +++ b/drivers/gpu/drm/i915/intel_display.c
557 @@ -3791,6 +3791,17 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
558 }
559 }
560
561 + if (intel_encoder->type == INTEL_OUTPUT_EDP) {
562 + /* Use VBT settings if we have an eDP panel */
563 + unsigned int edp_bpc = dev_priv->edp.bpp / 3;
564 +
565 + if (edp_bpc && edp_bpc < display_bpc) {
566 + DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
567 + display_bpc = edp_bpc;
568 + }
569 + continue;
570 + }
571 +
572 /*
573 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
574 * through, clamp it down. (Note: >12bpc will be caught below.)
575 diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
576 index af69073..f7b8237de 100644
577 --- a/drivers/hwmon/fam15h_power.c
578 +++ b/drivers/hwmon/fam15h_power.c
579 @@ -31,6 +31,9 @@ MODULE_DESCRIPTION("AMD Family 15h CPU processor power monitor");
580 MODULE_AUTHOR("Andreas Herrmann <andreas.herrmann3@amd.com>");
581 MODULE_LICENSE("GPL");
582
583 +/* Family 16h Northbridge's function 4 PCI ID */
584 +#define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534
585 +
586 /* D18F3 */
587 #define REG_NORTHBRIDGE_CAP 0xe8
588
589 @@ -257,6 +260,7 @@ static void __devexit fam15h_power_remove(struct pci_dev *pdev)
590
591 static DEFINE_PCI_DEVICE_TABLE(fam15h_power_id_table) = {
592 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
593 + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
594 {}
595 };
596 MODULE_DEVICE_TABLE(pci, fam15h_power_id_table);
597 diff --git a/drivers/input/matrix-keymap.c b/drivers/input/matrix-keymap.c
598 index 443ad64b..d88d9be 100644
599 --- a/drivers/input/matrix-keymap.c
600 +++ b/drivers/input/matrix-keymap.c
601 @@ -23,6 +23,7 @@
602 #include <linux/input.h>
603 #include <linux/of.h>
604 #include <linux/export.h>
605 +#include <linux/module.h>
606 #include <linux/input/matrix_keypad.h>
607
608 static bool matrix_keypad_map_key(struct input_dev *input_dev,
609 @@ -161,3 +162,5 @@ int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
610 return 0;
611 }
612 EXPORT_SYMBOL(matrix_keypad_build_keymap);
613 +
614 +MODULE_LICENSE("GPL");
615 diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
616 index 9058d21..2671202 100644
617 --- a/drivers/mmc/host/sh_mmcif.c
618 +++ b/drivers/mmc/host/sh_mmcif.c
619 @@ -1104,7 +1104,6 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
620 {
621 struct sh_mmcif_host *host = dev_id;
622 struct mmc_request *mrq = host->mrq;
623 - struct mmc_data *data = mrq->data;
624
625 cancel_delayed_work_sync(&host->timeout_work);
626
627 @@ -1152,13 +1151,14 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
628 case MMCIF_WAIT_FOR_READ_END:
629 case MMCIF_WAIT_FOR_WRITE_END:
630 if (host->sd_error)
631 - data->error = sh_mmcif_error_manage(host);
632 + mrq->data->error = sh_mmcif_error_manage(host);
633 break;
634 default:
635 BUG();
636 }
637
638 if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
639 + struct mmc_data *data = mrq->data;
640 if (!mrq->cmd->error && data && !data->error)
641 data->bytes_xfered =
642 data->blocks * data->blksz;
643 @@ -1229,10 +1229,6 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
644 host->sd_error = true;
645 dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
646 }
647 - if (host->state == STATE_IDLE) {
648 - dev_info(&host->pd->dev, "Spurious IRQ status 0x%x", state);
649 - return IRQ_HANDLED;
650 - }
651 if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
652 if (!host->dma_active)
653 return IRQ_WAKE_THREAD;
654 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
655 index d688a8a..acc0718 100644
656 --- a/drivers/net/bonding/bond_main.c
657 +++ b/drivers/net/bonding/bond_main.c
658 @@ -1379,6 +1379,8 @@ static void bond_compute_features(struct bonding *bond)
659 struct net_device *bond_dev = bond->dev;
660 netdev_features_t vlan_features = BOND_VLAN_FEATURES;
661 unsigned short max_hard_header_len = ETH_HLEN;
662 + unsigned int gso_max_size = GSO_MAX_SIZE;
663 + u16 gso_max_segs = GSO_MAX_SEGS;
664 int i;
665 unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
666
667 @@ -1394,11 +1396,16 @@ static void bond_compute_features(struct bonding *bond)
668 dst_release_flag &= slave->dev->priv_flags;
669 if (slave->dev->hard_header_len > max_hard_header_len)
670 max_hard_header_len = slave->dev->hard_header_len;
671 +
672 + gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
673 + gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
674 }
675
676 done:
677 bond_dev->vlan_features = vlan_features;
678 bond_dev->hard_header_len = max_hard_header_len;
679 + bond_dev->gso_max_segs = gso_max_segs;
680 + netif_set_gso_max_size(bond_dev, gso_max_size);
681
682 flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
683 bond_dev->priv_flags = flags | dst_release_flag;
684 diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
685 index dc15d24..21609f9 100644
686 --- a/drivers/net/bonding/bond_sysfs.c
687 +++ b/drivers/net/bonding/bond_sysfs.c
688 @@ -1582,6 +1582,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
689 goto out;
690 }
691
692 + read_lock(&bond->lock);
693 bond_for_each_slave(bond, slave, i) {
694 if (!bond_is_active_slave(slave)) {
695 if (new_value)
696 @@ -1590,6 +1591,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
697 slave->inactive = 1;
698 }
699 }
700 + read_unlock(&bond->lock);
701 out:
702 return ret;
703 }
704 diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
705 index d04911d..47618e5 100644
706 --- a/drivers/net/ethernet/8390/ne.c
707 +++ b/drivers/net/ethernet/8390/ne.c
708 @@ -813,6 +813,7 @@ static int __init ne_drv_probe(struct platform_device *pdev)
709 dev->irq = irq[this_dev];
710 dev->mem_end = bad[this_dev];
711 }
712 + SET_NETDEV_DEV(dev, &pdev->dev);
713 err = do_ne_probe(dev);
714 if (err) {
715 free_netdev(dev);
716 diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
717 index 203d9c6..f21e30c 100644
718 --- a/drivers/net/ethernet/sis/sis900.c
719 +++ b/drivers/net/ethernet/sis/sis900.c
720 @@ -2477,7 +2477,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
721 netif_start_queue(net_dev);
722
723 /* Workaround for EDB */
724 - sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
725 + sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
726
727 /* Enable all known interrupts by setting the interrupt mask. */
728 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
729 diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
730 index 5039f08..43e9ab4 100644
731 --- a/drivers/net/irda/sir_dev.c
732 +++ b/drivers/net/irda/sir_dev.c
733 @@ -222,7 +222,7 @@ static void sirdev_config_fsm(struct work_struct *work)
734 break;
735
736 case SIRDEV_STATE_DONGLE_SPEED:
737 - if (dev->dongle_drv->reset) {
738 + if (dev->dongle_drv->set_speed) {
739 ret = dev->dongle_drv->set_speed(dev, fsm->param);
740 if (ret < 0) {
741 fsm->result = ret;
742 diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
743 index 4cd582a..74fab1a 100644
744 --- a/drivers/net/usb/cdc_ncm.c
745 +++ b/drivers/net/usb/cdc_ncm.c
746 @@ -540,10 +540,12 @@ advance:
747 (ctx->ether_desc == NULL) || (ctx->control != intf))
748 goto error;
749
750 - /* claim interfaces, if any */
751 - temp = usb_driver_claim_interface(driver, ctx->data, dev);
752 - if (temp)
753 - goto error;
754 + /* claim data interface, if different from control */
755 + if (ctx->data != ctx->control) {
756 + temp = usb_driver_claim_interface(driver, ctx->data, dev);
757 + if (temp)
758 + goto error;
759 + }
760
761 iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
762
763 @@ -623,6 +625,10 @@ static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
764
765 tasklet_kill(&ctx->bh);
766
767 + /* handle devices with combined control and data interface */
768 + if (ctx->control == ctx->data)
769 + ctx->data = NULL;
770 +
771 /* disconnect master --> disconnect slave */
772 if (intf == ctx->control && ctx->data) {
773 usb_set_intfdata(ctx->data, NULL);
774 @@ -1245,6 +1251,14 @@ static const struct usb_device_id cdc_devs[] = {
775 .driver_info = (unsigned long) &wwan_info,
776 },
777
778 + /* Huawei NCM devices disguised as vendor specific */
779 + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
780 + .driver_info = (unsigned long)&wwan_info,
781 + },
782 + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
783 + .driver_info = (unsigned long)&wwan_info,
784 + },
785 +
786 /* Generic CDC-NCM devices */
787 { USB_INTERFACE_INFO(USB_CLASS_COMM,
788 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
789 diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
790 index a28a983..534d8be 100644
791 --- a/drivers/net/usb/ipheth.c
792 +++ b/drivers/net/usb/ipheth.c
793 @@ -62,6 +62,7 @@
794 #define USB_PRODUCT_IPAD 0x129a
795 #define USB_PRODUCT_IPHONE_4_VZW 0x129c
796 #define USB_PRODUCT_IPHONE_4S 0x12a0
797 +#define USB_PRODUCT_IPHONE_5 0x12a8
798
799 #define IPHETH_USBINTF_CLASS 255
800 #define IPHETH_USBINTF_SUBCLASS 253
801 @@ -113,6 +114,10 @@ static struct usb_device_id ipheth_table[] = {
802 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S,
803 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
804 IPHETH_USBINTF_PROTO) },
805 + { USB_DEVICE_AND_INTERFACE_INFO(
806 + USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_5,
807 + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
808 + IPHETH_USBINTF_PROTO) },
809 { }
810 };
811 MODULE_DEVICE_TABLE(usb, ipheth_table);
812 diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
813 index 4b0970b..e7b5fd2 100644
814 --- a/drivers/pci/bus.c
815 +++ b/drivers/pci/bus.c
816 @@ -316,10 +316,7 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
817 } else
818 next = dev->bus_list.next;
819
820 - /* Run device routines with the device locked */
821 - device_lock(&dev->dev);
822 retval = cb(dev, userdata);
823 - device_unlock(&dev->dev);
824 if (retval)
825 break;
826 }
827 diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
828 index 0ca0535..1b7d05d 100644
829 --- a/drivers/pci/pcie/aer/aerdrv_core.c
830 +++ b/drivers/pci/pcie/aer/aerdrv_core.c
831 @@ -244,6 +244,7 @@ static int report_error_detected(struct pci_dev *dev, void *data)
832 struct aer_broadcast_data *result_data;
833 result_data = (struct aer_broadcast_data *) data;
834
835 + device_lock(&dev->dev);
836 dev->error_state = result_data->state;
837
838 if (!dev->driver ||
839 @@ -262,12 +263,14 @@ static int report_error_detected(struct pci_dev *dev, void *data)
840 dev->driver ?
841 "no AER-aware driver" : "no driver");
842 }
843 - return 0;
844 + goto out;
845 }
846
847 err_handler = dev->driver->err_handler;
848 vote = err_handler->error_detected(dev, result_data->state);
849 result_data->result = merge_result(result_data->result, vote);
850 +out:
851 + device_unlock(&dev->dev);
852 return 0;
853 }
854
855 @@ -278,14 +281,17 @@ static int report_mmio_enabled(struct pci_dev *dev, void *data)
856 struct aer_broadcast_data *result_data;
857 result_data = (struct aer_broadcast_data *) data;
858
859 + device_lock(&dev->dev);
860 if (!dev->driver ||
861 !dev->driver->err_handler ||
862 !dev->driver->err_handler->mmio_enabled)
863 - return 0;
864 + goto out;
865
866 err_handler = dev->driver->err_handler;
867 vote = err_handler->mmio_enabled(dev);
868 result_data->result = merge_result(result_data->result, vote);
869 +out:
870 + device_unlock(&dev->dev);
871 return 0;
872 }
873
874 @@ -296,14 +302,17 @@ static int report_slot_reset(struct pci_dev *dev, void *data)
875 struct aer_broadcast_data *result_data;
876 result_data = (struct aer_broadcast_data *) data;
877
878 + device_lock(&dev->dev);
879 if (!dev->driver ||
880 !dev->driver->err_handler ||
881 !dev->driver->err_handler->slot_reset)
882 - return 0;
883 + goto out;
884
885 err_handler = dev->driver->err_handler;
886 vote = err_handler->slot_reset(dev);
887 result_data->result = merge_result(result_data->result, vote);
888 +out:
889 + device_unlock(&dev->dev);
890 return 0;
891 }
892
893 @@ -311,15 +320,18 @@ static int report_resume(struct pci_dev *dev, void *data)
894 {
895 struct pci_error_handlers *err_handler;
896
897 + device_lock(&dev->dev);
898 dev->error_state = pci_channel_io_normal;
899
900 if (!dev->driver ||
901 !dev->driver->err_handler ||
902 !dev->driver->err_handler->resume)
903 - return 0;
904 + goto out;
905
906 err_handler = dev->driver->err_handler;
907 err_handler->resume(dev);
908 +out:
909 + device_unlock(&dev->dev);
910 return 0;
911 }
912
913 diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
914 index 507a8e2..e49871d 100644
915 --- a/drivers/pnp/pnpacpi/core.c
916 +++ b/drivers/pnp/pnpacpi/core.c
917 @@ -95,6 +95,9 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
918 return -ENODEV;
919 }
920
921 + if (WARN_ON_ONCE(acpi_dev != dev->data))
922 + dev->data = acpi_dev;
923 +
924 ret = pnpacpi_build_resource_template(dev, &buffer);
925 if (ret)
926 return ret;
927 diff --git a/drivers/staging/ipack/bridges/tpci200.c b/drivers/staging/ipack/bridges/tpci200.c
928 index 2b83fa8..a7fb2cf 100644
929 --- a/drivers/staging/ipack/bridges/tpci200.c
930 +++ b/drivers/staging/ipack/bridges/tpci200.c
931 @@ -604,8 +604,8 @@ static int tpci200_slot_unregister(struct ipack_device *dev)
932 if (mutex_lock_interruptible(&tpci200->mutex))
933 return -ERESTARTSYS;
934
935 - ipack_device_unregister(dev);
936 tpci200->slots[dev->slot].dev = NULL;
937 + ipack_device_unregister(dev);
938 mutex_unlock(&tpci200->mutex);
939
940 return 0;
941 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
942 index 84f7dff..89c752a 100644
943 --- a/drivers/usb/class/cdc-acm.c
944 +++ b/drivers/usb/class/cdc-acm.c
945 @@ -788,6 +788,10 @@ static int get_serial_info(struct acm *acm, struct serial_struct __user *info)
946 tmp.flags = ASYNC_LOW_LATENCY;
947 tmp.xmit_fifo_size = acm->writesize;
948 tmp.baud_base = le32_to_cpu(acm->line.dwDTERate);
949 + tmp.close_delay = acm->port.close_delay / 10;
950 + tmp.closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
951 + ASYNC_CLOSING_WAIT_NONE :
952 + acm->port.closing_wait / 10;
953
954 if (copy_to_user(info, &tmp, sizeof(tmp)))
955 return -EFAULT;
956 @@ -795,6 +799,37 @@ static int get_serial_info(struct acm *acm, struct serial_struct __user *info)
957 return 0;
958 }
959
960 +static int set_serial_info(struct acm *acm,
961 + struct serial_struct __user *newinfo)
962 +{
963 + struct serial_struct new_serial;
964 + unsigned int closing_wait, close_delay;
965 + int retval = 0;
966 +
967 + if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
968 + return -EFAULT;
969 +
970 + close_delay = new_serial.close_delay * 10;
971 + closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
972 + ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10;
973 +
974 + mutex_lock(&acm->port.mutex);
975 +
976 + if (!capable(CAP_SYS_ADMIN)) {
977 + if ((close_delay != acm->port.close_delay) ||
978 + (closing_wait != acm->port.closing_wait))
979 + retval = -EPERM;
980 + else
981 + retval = -EOPNOTSUPP;
982 + } else {
983 + acm->port.close_delay = close_delay;
984 + acm->port.closing_wait = closing_wait;
985 + }
986 +
987 + mutex_unlock(&acm->port.mutex);
988 + return retval;
989 +}
990 +
991 static int acm_tty_ioctl(struct tty_struct *tty,
992 unsigned int cmd, unsigned long arg)
993 {
994 @@ -805,6 +840,9 @@ static int acm_tty_ioctl(struct tty_struct *tty,
995 case TIOCGSERIAL: /* gets serial port data */
996 rv = get_serial_info(acm, (struct serial_struct __user *) arg);
997 break;
998 + case TIOCSSERIAL:
999 + rv = set_serial_info(acm, (struct serial_struct __user *) arg);
1000 + break;
1001 }
1002
1003 return rv;
1004 diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
1005 index 4b66374..3d98902 100644
1006 --- a/drivers/usb/host/ehci-q.c
1007 +++ b/drivers/usb/host/ehci-q.c
1008 @@ -264,15 +264,9 @@ ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
1009 __releases(ehci->lock)
1010 __acquires(ehci->lock)
1011 {
1012 - if (likely (urb->hcpriv != NULL)) {
1013 - struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
1014 -
1015 - /* S-mask in a QH means it's an interrupt urb */
1016 - if ((qh->hw->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
1017 -
1018 - /* ... update hc-wide periodic stats (for usbfs) */
1019 - ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
1020 - }
1021 + if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
1022 + /* ... update hc-wide periodic stats */
1023 + ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
1024 }
1025
1026 if (unlikely(urb->unlinked)) {
1027 diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
1028 index 7cf3da7..528a540 100644
1029 --- a/drivers/usb/host/ehci-sched.c
1030 +++ b/drivers/usb/host/ehci-sched.c
1031 @@ -1646,7 +1646,7 @@ static void itd_link_urb(
1032
1033 /* don't need that schedule data any more */
1034 iso_sched_free (stream, iso_sched);
1035 - urb->hcpriv = NULL;
1036 + urb->hcpriv = stream;
1037
1038 ++ehci->isoc_count;
1039 enable_periodic(ehci);
1040 @@ -2045,7 +2045,7 @@ static void sitd_link_urb(
1041
1042 /* don't need that schedule data any more */
1043 iso_sched_free (stream, sched);
1044 - urb->hcpriv = NULL;
1045 + urb->hcpriv = stream;
1046
1047 ++ehci->isoc_count;
1048 enable_periodic(ehci);
1049 diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
1050 index c5a1ea9..9d00d47 100644
1051 --- a/drivers/usb/host/ohci-q.c
1052 +++ b/drivers/usb/host/ohci-q.c
1053 @@ -1128,6 +1128,25 @@ dl_done_list (struct ohci_hcd *ohci)
1054
1055 while (td) {
1056 struct td *td_next = td->next_dl_td;
1057 + struct ed *ed = td->ed;
1058 +
1059 + /*
1060 + * Some OHCI controllers (NVIDIA for sure, maybe others)
1061 + * occasionally forget to add TDs to the done queue. Since
1062 + * TDs for a given endpoint are always processed in order,
1063 + * if we find a TD on the donelist then all of its
1064 + * predecessors must be finished as well.
1065 + */
1066 + for (;;) {
1067 + struct td *td2;
1068 +
1069 + td2 = list_first_entry(&ed->td_list, struct td,
1070 + td_list);
1071 + if (td2 == td)
1072 + break;
1073 + takeback_td(ohci, td2);
1074 + }
1075 +
1076 takeback_td(ohci, td);
1077 td = td_next;
1078 }
1079 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
1080 index 8345d7c..dcb72f7 100644
1081 --- a/drivers/usb/host/xhci-pci.c
1082 +++ b/drivers/usb/host/xhci-pci.c
1083 @@ -29,6 +29,7 @@
1084 /* Device for a quirk */
1085 #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
1086 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
1087 +#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400
1088
1089 #define PCI_VENDOR_ID_ETRON 0x1b6f
1090 #define PCI_DEVICE_ID_ASROCK_P67 0x7023
1091 @@ -58,8 +59,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
1092
1093 /* Look for vendor-specific quirks */
1094 if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
1095 - pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK) {
1096 - if (pdev->revision == 0x0) {
1097 + (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK ||
1098 + pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) {
1099 + if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
1100 + pdev->revision == 0x0) {
1101 xhci->quirks |= XHCI_RESET_EP_QUIRK;
1102 xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure"
1103 " endpoint cmd after reset endpoint\n");
1104 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1105 index 4ea9e33..5ad932d 100644
1106 --- a/drivers/usb/serial/cp210x.c
1107 +++ b/drivers/usb/serial/cp210x.c
1108 @@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = {
1109 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
1110 { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
1111 { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
1112 + { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
1113 { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
1114 { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
1115 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
1116 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1117 index b71ee32..360bdeb 100644
1118 --- a/drivers/usb/serial/ftdi_sio.c
1119 +++ b/drivers/usb/serial/ftdi_sio.c
1120 @@ -197,6 +197,7 @@ static struct usb_device_id id_table_combined [] = {
1121 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) },
1122 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) },
1123 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
1124 + { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) },
1125 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
1126 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
1127 { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
1128 @@ -1787,7 +1788,7 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
1129 struct usb_device *udev = serial->dev;
1130
1131 if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) ||
1132 - (udev->product && !strcmp(udev->product, "BeagleBone/XDS100")))
1133 + (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2")))
1134 return ftdi_jtag_probe(serial);
1135
1136 return 0;
1137 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
1138 index 57c12ef..049b6e7 100644
1139 --- a/drivers/usb/serial/ftdi_sio_ids.h
1140 +++ b/drivers/usb/serial/ftdi_sio_ids.h
1141 @@ -752,6 +752,12 @@
1142 #define TTI_VID 0x103E /* Vendor Id */
1143 #define TTI_QL355P_PID 0x03E8 /* TTi QL355P power supply */
1144
1145 +/*
1146 + * Newport Cooperation (www.newport.com)
1147 + */
1148 +#define NEWPORT_VID 0x104D
1149 +#define NEWPORT_AGILIS_PID 0x3000
1150 +
1151 /* Interbiometrics USB I/O Board */
1152 /* Developed for Interbiometrics by Rudolf Gugler */
1153 #define INTERBIOMETRICS_VID 0x1209
1154 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1155 index f852329..56fed62 100644
1156 --- a/drivers/usb/serial/option.c
1157 +++ b/drivers/usb/serial/option.c
1158 @@ -81,6 +81,7 @@ static void option_instat_callback(struct urb *urb);
1159 #define OPTION_PRODUCT_GTM380_MODEM 0x7201
1160
1161 #define HUAWEI_VENDOR_ID 0x12D1
1162 +#define HUAWEI_PRODUCT_E173 0x140C
1163 #define HUAWEI_PRODUCT_K4505 0x1464
1164 #define HUAWEI_PRODUCT_K3765 0x1465
1165 #define HUAWEI_PRODUCT_K4605 0x14C6
1166 @@ -553,6 +554,8 @@ static const struct usb_device_id option_ids[] = {
1167 { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
1168 { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
1169 { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
1170 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
1171 + .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
1172 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
1173 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
1174 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
1175 @@ -884,6 +887,10 @@ static const struct usb_device_id option_ids[] = {
1176 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
1177 .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
1178 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
1179 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) },
1180 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) },
1181 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0137, 0xff, 0xff, 0xff) },
1182 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0139, 0xff, 0xff, 0xff) },
1183 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) },
1184 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
1185 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) },
1186 @@ -904,20 +911,34 @@ static const struct usb_device_id option_ids[] = {
1187 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
1188 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
1189 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1190 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
1191 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
1192 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1193 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
1194 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) },
1195 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
1196 .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
1197 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) },
1198 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) },
1199 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
1200 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
1201 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1202 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) },
1203 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff) },
1204 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
1205 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
1206 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1207 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
1208 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
1209 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
1210 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
1211 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
1212 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1213 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
1214 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1215 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
1216 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1217 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) },
1218 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
1219 .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
1220 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
1221 @@ -1097,6 +1118,10 @@ static const struct usb_device_id option_ids[] = {
1222 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
1223 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
1224 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
1225 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1301, 0xff, 0xff, 0xff) },
1226 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1302, 0xff, 0xff, 0xff) },
1227 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) },
1228 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) },
1229 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
1230 .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
1231 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
1232 diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
1233 index 7691c86..685edc8 100644
1234 --- a/drivers/usb/storage/Kconfig
1235 +++ b/drivers/usb/storage/Kconfig
1236 @@ -203,7 +203,7 @@ config USB_STORAGE_ENE_UB6250
1237
1238 config USB_UAS
1239 tristate "USB Attached SCSI"
1240 - depends on USB && SCSI
1241 + depends on USB && SCSI && BROKEN
1242 help
1243 The USB Attached SCSI protocol is supported by some USB
1244 storage devices. It permits higher performance by supporting
1245 diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
1246 index df08254..5f6c835 100644
1247 --- a/include/linux/mempolicy.h
1248 +++ b/include/linux/mempolicy.h
1249 @@ -137,16 +137,6 @@ static inline void mpol_cond_put(struct mempolicy *pol)
1250 __mpol_put(pol);
1251 }
1252
1253 -extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1254 - struct mempolicy *frompol);
1255 -static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
1256 - struct mempolicy *frompol)
1257 -{
1258 - if (!frompol)
1259 - return frompol;
1260 - return __mpol_cond_copy(tompol, frompol);
1261 -}
1262 -
1263 extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
1264 static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
1265 {
1266 @@ -270,12 +260,6 @@ static inline void mpol_cond_put(struct mempolicy *pol)
1267 {
1268 }
1269
1270 -static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
1271 - struct mempolicy *from)
1272 -{
1273 - return from;
1274 -}
1275 -
1276 static inline void mpol_get(struct mempolicy *pol)
1277 {
1278 }
1279 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
1280 index f7bcd9e..c283938 100644
1281 --- a/kernel/rcutree.c
1282 +++ b/kernel/rcutree.c
1283 @@ -212,13 +212,13 @@ DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
1284 .dynticks = ATOMIC_INIT(1),
1285 };
1286
1287 -static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
1288 -static int qhimark = 10000; /* If this many pending, ignore blimit. */
1289 -static int qlowmark = 100; /* Once only this many pending, use blimit. */
1290 +static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
1291 +static long qhimark = 10000; /* If this many pending, ignore blimit. */
1292 +static long qlowmark = 100; /* Once only this many pending, use blimit. */
1293
1294 -module_param(blimit, int, 0);
1295 -module_param(qhimark, int, 0);
1296 -module_param(qlowmark, int, 0);
1297 +module_param(blimit, long, 0);
1298 +module_param(qhimark, long, 0);
1299 +module_param(qlowmark, long, 0);
1300
1301 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
1302 int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
1303 @@ -1543,7 +1543,8 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1304 {
1305 unsigned long flags;
1306 struct rcu_head *next, *list, **tail;
1307 - int bl, count, count_lazy, i;
1308 + long bl, count, count_lazy;
1309 + int i;
1310
1311 /* If no callbacks are ready, just return.*/
1312 if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
1313 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
1314 index b4f20fb..781ecc2 100644
1315 --- a/kernel/trace/ftrace.c
1316 +++ b/kernel/trace/ftrace.c
1317 @@ -2358,7 +2358,7 @@ static void reset_iter_read(struct ftrace_iterator *iter)
1318 {
1319 iter->pos = 0;
1320 iter->func_pos = 0;
1321 - iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
1322 + iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
1323 }
1324
1325 static void *t_start(struct seq_file *m, loff_t *pos)
1326 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
1327 index ebd284f..89fdeee 100644
1328 --- a/kernel/trace/ring_buffer.c
1329 +++ b/kernel/trace/ring_buffer.c
1330 @@ -1396,6 +1396,8 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1331 struct list_head *head_page_with_bit;
1332
1333 head_page = &rb_set_head_page(cpu_buffer)->list;
1334 + if (!head_page)
1335 + break;
1336 prev_page = head_page->prev;
1337
1338 first_page = pages->next;
1339 @@ -2934,7 +2936,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
1340 unsigned long flags;
1341 struct ring_buffer_per_cpu *cpu_buffer;
1342 struct buffer_page *bpage;
1343 - unsigned long ret;
1344 + unsigned long ret = 0;
1345
1346 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1347 return 0;
1348 @@ -2949,7 +2951,8 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
1349 bpage = cpu_buffer->reader_page;
1350 else
1351 bpage = rb_set_head_page(cpu_buffer);
1352 - ret = bpage->page->time_stamp;
1353 + if (bpage)
1354 + ret = bpage->page->time_stamp;
1355 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1356
1357 return ret;
1358 @@ -3260,6 +3263,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1359 * Splice the empty reader page into the list around the head.
1360 */
1361 reader = rb_set_head_page(cpu_buffer);
1362 + if (!reader)
1363 + goto out;
1364 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
1365 cpu_buffer->reader_page->list.prev = reader->list.prev;
1366
1367 @@ -3778,12 +3783,17 @@ void
1368 ring_buffer_read_finish(struct ring_buffer_iter *iter)
1369 {
1370 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1371 + unsigned long flags;
1372
1373 /*
1374 * Ring buffer is disabled from recording, here's a good place
1375 - * to check the integrity of the ring buffer.
1376 + * to check the integrity of the ring buffer.
1377 + * Must prevent readers from trying to read, as the check
1378 + * clears the HEAD page and readers require it.
1379 */
1380 + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1381 rb_check_pages(cpu_buffer);
1382 + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1383
1384 atomic_dec(&cpu_buffer->record_disabled);
1385 atomic_dec(&cpu_buffer->buffer->resize_disabled);
1386 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
1387 index 6d42247..0352a81 100644
1388 --- a/kernel/workqueue.c
1389 +++ b/kernel/workqueue.c
1390 @@ -1143,8 +1143,8 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1391 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1392 unsigned int lcpu;
1393
1394 - BUG_ON(timer_pending(timer));
1395 - BUG_ON(!list_empty(&work->entry));
1396 + WARN_ON_ONCE(timer_pending(timer));
1397 + WARN_ON_ONCE(!list_empty(&work->entry));
1398
1399 timer_stats_timer_set_start_info(&dwork->timer);
1400
1401 diff --git a/mm/dmapool.c b/mm/dmapool.c
1402 index c5ab33b..da1b0f0 100644
1403 --- a/mm/dmapool.c
1404 +++ b/mm/dmapool.c
1405 @@ -50,7 +50,6 @@ struct dma_pool { /* the pool */
1406 size_t allocation;
1407 size_t boundary;
1408 char name[32];
1409 - wait_queue_head_t waitq;
1410 struct list_head pools;
1411 };
1412
1413 @@ -62,8 +61,6 @@ struct dma_page { /* cacheable header for 'allocation' bytes */
1414 unsigned int offset;
1415 };
1416
1417 -#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
1418 -
1419 static DEFINE_MUTEX(pools_lock);
1420
1421 static ssize_t
1422 @@ -172,7 +169,6 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
1423 retval->size = size;
1424 retval->boundary = boundary;
1425 retval->allocation = allocation;
1426 - init_waitqueue_head(&retval->waitq);
1427
1428 if (dev) {
1429 int ret;
1430 @@ -227,7 +223,6 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
1431 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
1432 #endif
1433 pool_initialise_page(pool, page);
1434 - list_add(&page->page_list, &pool->page_list);
1435 page->in_use = 0;
1436 page->offset = 0;
1437 } else {
1438 @@ -315,30 +310,21 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
1439 might_sleep_if(mem_flags & __GFP_WAIT);
1440
1441 spin_lock_irqsave(&pool->lock, flags);
1442 - restart:
1443 list_for_each_entry(page, &pool->page_list, page_list) {
1444 if (page->offset < pool->allocation)
1445 goto ready;
1446 }
1447 - page = pool_alloc_page(pool, GFP_ATOMIC);
1448 - if (!page) {
1449 - if (mem_flags & __GFP_WAIT) {
1450 - DECLARE_WAITQUEUE(wait, current);
1451
1452 - __set_current_state(TASK_UNINTERRUPTIBLE);
1453 - __add_wait_queue(&pool->waitq, &wait);
1454 - spin_unlock_irqrestore(&pool->lock, flags);
1455 + /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
1456 + spin_unlock_irqrestore(&pool->lock, flags);
1457
1458 - schedule_timeout(POOL_TIMEOUT_JIFFIES);
1459 + page = pool_alloc_page(pool, mem_flags);
1460 + if (!page)
1461 + return NULL;
1462
1463 - spin_lock_irqsave(&pool->lock, flags);
1464 - __remove_wait_queue(&pool->waitq, &wait);
1465 - goto restart;
1466 - }
1467 - retval = NULL;
1468 - goto done;
1469 - }
1470 + spin_lock_irqsave(&pool->lock, flags);
1471
1472 + list_add(&page->page_list, &pool->page_list);
1473 ready:
1474 page->in_use++;
1475 offset = page->offset;
1476 @@ -348,7 +334,6 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
1477 #ifdef DMAPOOL_DEBUG
1478 memset(retval, POOL_POISON_ALLOCATED, pool->size);
1479 #endif
1480 - done:
1481 spin_unlock_irqrestore(&pool->lock, flags);
1482 return retval;
1483 }
1484 @@ -435,8 +420,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
1485 page->in_use--;
1486 *(int *)vaddr = page->offset;
1487 page->offset = offset;
1488 - if (waitqueue_active(&pool->waitq))
1489 - wake_up_locked(&pool->waitq);
1490 /*
1491 * Resist a temptation to do
1492 * if (!is_page_busy(page)) pool_free_page(pool, page);
1493 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
1494 index 3d64b36..01350d3 100644
1495 --- a/mm/mempolicy.c
1496 +++ b/mm/mempolicy.c
1497 @@ -2035,28 +2035,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
1498 return new;
1499 }
1500
1501 -/*
1502 - * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
1503 - * eliminate the * MPOL_F_* flags that require conditional ref and
1504 - * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly
1505 - * after return. Use the returned value.
1506 - *
1507 - * Allows use of a mempolicy for, e.g., multiple allocations with a single
1508 - * policy lookup, even if the policy needs/has extra ref on lookup.
1509 - * shmem_readahead needs this.
1510 - */
1511 -struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1512 - struct mempolicy *frompol)
1513 -{
1514 - if (!mpol_needs_cond_ref(frompol))
1515 - return frompol;
1516 -
1517 - *tompol = *frompol;
1518 - tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */
1519 - __mpol_put(frompol);
1520 - return tompol;
1521 -}
1522 -
1523 /* Slow path of a mempolicy comparison */
1524 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1525 {
1526 diff --git a/mm/shmem.c b/mm/shmem.c
1527 index 31e1506..6607fee 100644
1528 --- a/mm/shmem.c
1529 +++ b/mm/shmem.c
1530 @@ -921,25 +921,29 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1531 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1532 struct shmem_inode_info *info, pgoff_t index)
1533 {
1534 - struct mempolicy mpol, *spol;
1535 struct vm_area_struct pvma;
1536 -
1537 - spol = mpol_cond_copy(&mpol,
1538 - mpol_shared_policy_lookup(&info->policy, index));
1539 + struct page *page;
1540
1541 /* Create a pseudo vma that just contains the policy */
1542 pvma.vm_start = 0;
1543 /* Bias interleave by inode number to distribute better across nodes */
1544 pvma.vm_pgoff = index + info->vfs_inode.i_ino;
1545 pvma.vm_ops = NULL;
1546 - pvma.vm_policy = spol;
1547 - return swapin_readahead(swap, gfp, &pvma, 0);
1548 + pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1549 +
1550 + page = swapin_readahead(swap, gfp, &pvma, 0);
1551 +
1552 + /* Drop reference taken by mpol_shared_policy_lookup() */
1553 + mpol_cond_put(pvma.vm_policy);
1554 +
1555 + return page;
1556 }
1557
1558 static struct page *shmem_alloc_page(gfp_t gfp,
1559 struct shmem_inode_info *info, pgoff_t index)
1560 {
1561 struct vm_area_struct pvma;
1562 + struct page *page;
1563
1564 /* Create a pseudo vma that just contains the policy */
1565 pvma.vm_start = 0;
1566 @@ -948,10 +952,12 @@ static struct page *shmem_alloc_page(gfp_t gfp,
1567 pvma.vm_ops = NULL;
1568 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1569
1570 - /*
1571 - * alloc_page_vma() will drop the shared policy reference
1572 - */
1573 - return alloc_page_vma(gfp, &pvma, 0);
1574 + page = alloc_page_vma(gfp, &pvma, 0);
1575 +
1576 + /* Drop reference taken by mpol_shared_policy_lookup() */
1577 + mpol_cond_put(pvma.vm_policy);
1578 +
1579 + return page;
1580 }
1581 #else /* !CONFIG_NUMA */
1582 #ifdef CONFIG_TMPFS
1583 diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
1584 index f2eccd5..17ff9fd 100644
1585 --- a/net/ipv4/icmp.c
1586 +++ b/net/ipv4/icmp.c
1587 @@ -257,7 +257,8 @@ static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
1588 struct inet_peer *peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1);
1589 rc = inet_peer_xrlim_allow(peer,
1590 net->ipv4.sysctl_icmp_ratelimit);
1591 - inet_putpeer(peer);
1592 + if (peer)
1593 + inet_putpeer(peer);
1594 }
1595 out:
1596 return rc;
1597 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
1598 index 6405a44..9145e3c 100644
1599 --- a/net/ipv4/inet_diag.c
1600 +++ b/net/ipv4/inet_diag.c
1601 @@ -44,6 +44,10 @@ struct inet_diag_entry {
1602 u16 dport;
1603 u16 family;
1604 u16 userlocks;
1605 +#if IS_ENABLED(CONFIG_IPV6)
1606 + struct in6_addr saddr_storage; /* for IPv4-mapped-IPv6 addresses */
1607 + struct in6_addr daddr_storage; /* for IPv4-mapped-IPv6 addresses */
1608 +#endif
1609 };
1610
1611 static DEFINE_MUTEX(inet_diag_table_mutex);
1612 @@ -423,25 +427,31 @@ static int inet_diag_bc_run(const struct nlattr *_bc,
1613 break;
1614 }
1615
1616 - if (cond->prefix_len == 0)
1617 - break;
1618 -
1619 if (op->code == INET_DIAG_BC_S_COND)
1620 addr = entry->saddr;
1621 else
1622 addr = entry->daddr;
1623
1624 + if (cond->family != AF_UNSPEC &&
1625 + cond->family != entry->family) {
1626 + if (entry->family == AF_INET6 &&
1627 + cond->family == AF_INET) {
1628 + if (addr[0] == 0 && addr[1] == 0 &&
1629 + addr[2] == htonl(0xffff) &&
1630 + bitstring_match(addr + 3,
1631 + cond->addr,
1632 + cond->prefix_len))
1633 + break;
1634 + }
1635 + yes = 0;
1636 + break;
1637 + }
1638 +
1639 + if (cond->prefix_len == 0)
1640 + break;
1641 if (bitstring_match(addr, cond->addr,
1642 cond->prefix_len))
1643 break;
1644 - if (entry->family == AF_INET6 &&
1645 - cond->family == AF_INET) {
1646 - if (addr[0] == 0 && addr[1] == 0 &&
1647 - addr[2] == htonl(0xffff) &&
1648 - bitstring_match(addr + 3, cond->addr,
1649 - cond->prefix_len))
1650 - break;
1651 - }
1652 yes = 0;
1653 break;
1654 }
1655 @@ -504,6 +514,55 @@ static int valid_cc(const void *bc, int len, int cc)
1656 return 0;
1657 }
1658
1659 +/* Validate an inet_diag_hostcond. */
1660 +static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
1661 + int *min_len)
1662 +{
1663 + int addr_len;
1664 + struct inet_diag_hostcond *cond;
1665 +
1666 + /* Check hostcond space. */
1667 + *min_len += sizeof(struct inet_diag_hostcond);
1668 + if (len < *min_len)
1669 + return false;
1670 + cond = (struct inet_diag_hostcond *)(op + 1);
1671 +
1672 + /* Check address family and address length. */
1673 + switch (cond->family) {
1674 + case AF_UNSPEC:
1675 + addr_len = 0;
1676 + break;
1677 + case AF_INET:
1678 + addr_len = sizeof(struct in_addr);
1679 + break;
1680 + case AF_INET6:
1681 + addr_len = sizeof(struct in6_addr);
1682 + break;
1683 + default:
1684 + return false;
1685 + }
1686 + *min_len += addr_len;
1687 + if (len < *min_len)
1688 + return false;
1689 +
1690 + /* Check prefix length (in bits) vs address length (in bytes). */
1691 + if (cond->prefix_len > 8 * addr_len)
1692 + return false;
1693 +
1694 + return true;
1695 +}
1696 +
1697 +/* Validate a port comparison operator. */
1698 +static inline bool valid_port_comparison(const struct inet_diag_bc_op *op,
1699 + int len, int *min_len)
1700 +{
1701 + /* Port comparisons put the port in a follow-on inet_diag_bc_op. */
1702 + *min_len += sizeof(struct inet_diag_bc_op);
1703 + if (len < *min_len)
1704 + return false;
1705 + return true;
1706 +}
1707 +
1708 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
1709 {
1710 const void *bc = bytecode;
1711 @@ -511,29 +570,39 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
1712
1713 while (len > 0) {
1714 const struct inet_diag_bc_op *op = bc;
1715 + int min_len = sizeof(struct inet_diag_bc_op);
1716
1717 //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
1718 switch (op->code) {
1719 - case INET_DIAG_BC_AUTO:
1720 case INET_DIAG_BC_S_COND:
1721 case INET_DIAG_BC_D_COND:
1722 + if (!valid_hostcond(bc, len, &min_len))
1723 + return -EINVAL;
1724 + break;
1725 case INET_DIAG_BC_S_GE:
1726 case INET_DIAG_BC_S_LE:
1727 case INET_DIAG_BC_D_GE:
1728 case INET_DIAG_BC_D_LE:
1729 - case INET_DIAG_BC_JMP:
1730 - if (op->no < 4 || op->no > len + 4 || op->no & 3)
1731 - return -EINVAL;
1732 - if (op->no < len &&
1733 - !valid_cc(bytecode, bytecode_len, len - op->no))
1734 + if (!valid_port_comparison(bc, len, &min_len))
1735 return -EINVAL;
1736 break;
1737 + case INET_DIAG_BC_AUTO:
1738 + case INET_DIAG_BC_JMP:
1739 case INET_DIAG_BC_NOP:
1740 break;
1741 default:
1742 return -EINVAL;
1743 }
1744 - if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
1745 +
1746 + if (op->code != INET_DIAG_BC_NOP) {
1747 + if (op->no < min_len || op->no > len + 4 || op->no & 3)
1748 + return -EINVAL;
1749 + if (op->no < len &&
1750 + !valid_cc(bytecode, bytecode_len, len - op->no))
1751 + return -EINVAL;
1752 + }
1753 +
1754 + if (op->yes < min_len || op->yes > len + 4 || op->yes & 3)
1755 return -EINVAL;
1756 bc += op->yes;
1757 len -= op->yes;
1758 @@ -590,6 +659,36 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
1759 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
1760 }
1761
1762 +/* Get the IPv4, IPv6, or IPv4-mapped-IPv6 local and remote addresses
1763 + * from a request_sock. For IPv4-mapped-IPv6 we must map IPv4 to IPv6.
1764 + */
1765 +static inline void inet_diag_req_addrs(const struct sock *sk,
1766 + const struct request_sock *req,
1767 + struct inet_diag_entry *entry)
1768 +{
1769 + struct inet_request_sock *ireq = inet_rsk(req);
1770 +
1771 +#if IS_ENABLED(CONFIG_IPV6)
1772 + if (sk->sk_family == AF_INET6) {
1773 + if (req->rsk_ops->family == AF_INET6) {
1774 + entry->saddr = inet6_rsk(req)->loc_addr.s6_addr32;
1775 + entry->daddr = inet6_rsk(req)->rmt_addr.s6_addr32;
1776 + } else if (req->rsk_ops->family == AF_INET) {
1777 + ipv6_addr_set_v4mapped(ireq->loc_addr,
1778 + &entry->saddr_storage);
1779 + ipv6_addr_set_v4mapped(ireq->rmt_addr,
1780 + &entry->daddr_storage);
1781 + entry->saddr = entry->saddr_storage.s6_addr32;
1782 + entry->daddr = entry->daddr_storage.s6_addr32;
1783 + }
1784 + } else
1785 +#endif
1786 + {
1787 + entry->saddr = &ireq->loc_addr;
1788 + entry->daddr = &ireq->rmt_addr;
1789 + }
1790 +}
1791 +
1792 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
1793 struct request_sock *req, u32 pid, u32 seq,
1794 const struct nlmsghdr *unlh)
1795 @@ -629,8 +728,10 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
1796 r->idiag_inode = 0;
1797 #if IS_ENABLED(CONFIG_IPV6)
1798 if (r->idiag_family == AF_INET6) {
1799 - *(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr;
1800 - *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr;
1801 + struct inet_diag_entry entry;
1802 + inet_diag_req_addrs(sk, req, &entry);
1803 + memcpy(r->id.idiag_src, entry.saddr, sizeof(struct in6_addr));
1804 + memcpy(r->id.idiag_dst, entry.daddr, sizeof(struct in6_addr));
1805 }
1806 #endif
1807
1808 @@ -683,18 +784,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
1809 continue;
1810
1811 if (bc) {
1812 - entry.saddr =
1813 -#if IS_ENABLED(CONFIG_IPV6)
1814 - (entry.family == AF_INET6) ?
1815 - inet6_rsk(req)->loc_addr.s6_addr32 :
1816 -#endif
1817 - &ireq->loc_addr;
1818 - entry.daddr =
1819 -#if IS_ENABLED(CONFIG_IPV6)
1820 - (entry.family == AF_INET6) ?
1821 - inet6_rsk(req)->rmt_addr.s6_addr32 :
1822 -#endif
1823 - &ireq->rmt_addr;
1824 + inet_diag_req_addrs(sk, req, &entry);
1825 entry.dport = ntohs(ireq->rmt_port);
1826
1827 if (!inet_diag_bc_run(bc, &entry))
1828 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
1829 index 8d07c97..cea1859 100644
1830 --- a/net/ipv4/ip_fragment.c
1831 +++ b/net/ipv4/ip_fragment.c
1832 @@ -702,28 +702,27 @@ EXPORT_SYMBOL(ip_defrag);
1833
1834 struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
1835 {
1836 - const struct iphdr *iph;
1837 + struct iphdr iph;
1838 u32 len;
1839
1840 if (skb->protocol != htons(ETH_P_IP))
1841 return skb;
1842
1843 - if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1844 + if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
1845 return skb;
1846
1847 - iph = ip_hdr(skb);
1848 - if (iph->ihl < 5 || iph->version != 4)
1849 + if (iph.ihl < 5 || iph.version != 4)
1850 return skb;
1851 - if (!pskb_may_pull(skb, iph->ihl*4))
1852 - return skb;
1853 - iph = ip_hdr(skb);
1854 - len = ntohs(iph->tot_len);
1855 - if (skb->len < len || len < (iph->ihl * 4))
1856 +
1857 + len = ntohs(iph.tot_len);
1858 + if (skb->len < len || len < (iph.ihl * 4))
1859 return skb;
1860
1861 - if (ip_is_fragment(ip_hdr(skb))) {
1862 + if (ip_is_fragment(&iph)) {
1863 skb = skb_share_check(skb, GFP_ATOMIC);
1864 if (skb) {
1865 + if (!pskb_may_pull(skb, iph.ihl*4))
1866 + return skb;
1867 if (pskb_trim_rcsum(skb, len))
1868 return skb;
1869 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
1870 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
1871 index c017cb1..285a18f 100644
1872 --- a/net/ipv4/route.c
1873 +++ b/net/ipv4/route.c
1874 @@ -1785,6 +1785,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
1875 if (dev_out->flags & IFF_LOOPBACK)
1876 flags |= RTCF_LOCAL;
1877
1878 + do_cache = true;
1879 if (type == RTN_BROADCAST) {
1880 flags |= RTCF_BROADCAST | RTCF_LOCAL;
1881 fi = NULL;
1882 @@ -1793,6 +1794,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
1883 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
1884 fl4->flowi4_proto))
1885 flags &= ~RTCF_LOCAL;
1886 + else
1887 + do_cache = false;
1888 /* If multicast route do not exist use
1889 * default one, but do not gateway in this case.
1890 * Yes, it is hack.
1891 @@ -1802,8 +1805,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
1892 }
1893
1894 fnhe = NULL;
1895 - do_cache = fi != NULL;
1896 - if (fi) {
1897 + do_cache &= fi != NULL;
1898 + if (do_cache) {
1899 struct rtable __rcu **prth;
1900 struct fib_nh *nh = &FIB_RES_NH(*res);
1901
1902 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
1903 index c4f9341..3064785 100644
1904 --- a/net/ipv6/inet6_connection_sock.c
1905 +++ b/net/ipv6/inet6_connection_sock.c
1906 @@ -252,6 +252,7 @@ struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
1907 return NULL;
1908 dst->ops->update_pmtu(dst, sk, NULL, mtu);
1909
1910 - return inet6_csk_route_socket(sk, &fl6);
1911 + dst = inet6_csk_route_socket(sk, &fl6);
1912 + return IS_ERR(dst) ? NULL : dst;
1913 }
1914 EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu);
1915 diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
1916 index 6c85564..0018b65 100644
1917 --- a/net/sctp/chunk.c
1918 +++ b/net/sctp/chunk.c
1919 @@ -183,7 +183,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
1920
1921 msg = sctp_datamsg_new(GFP_KERNEL);
1922 if (!msg)
1923 - return NULL;
1924 + return ERR_PTR(-ENOMEM);
1925
1926 /* Note: Calculate this outside of the loop, so that all fragments
1927 * have the same expiration.
1928 @@ -280,11 +280,14 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
1929
1930 chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0);
1931
1932 - if (!chunk)
1933 + if (!chunk) {
1934 + err = -ENOMEM;
1935 goto errout;
1936 + }
1937 +
1938 err = sctp_user_addto_chunk(chunk, offset, len, msgh->msg_iov);
1939 if (err < 0)
1940 - goto errout;
1941 + goto errout_chunk_free;
1942
1943 offset += len;
1944
1945 @@ -315,8 +318,10 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
1946
1947 chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0);
1948
1949 - if (!chunk)
1950 + if (!chunk) {
1951 + err = -ENOMEM;
1952 goto errout;
1953 + }
1954
1955 err = sctp_user_addto_chunk(chunk, offset, over,msgh->msg_iov);
1956
1957 @@ -324,7 +329,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
1958 __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
1959 - (__u8 *)chunk->skb->data);
1960 if (err < 0)
1961 - goto errout;
1962 + goto errout_chunk_free;
1963
1964 sctp_datamsg_assign(msg, chunk);
1965 list_add_tail(&chunk->frag_list, &msg->chunks);
1966 @@ -332,6 +337,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
1967
1968 return msg;
1969
1970 +errout_chunk_free:
1971 + sctp_chunk_free(chunk);
1972 +
1973 errout:
1974 list_for_each_safe(pos, temp, &msg->chunks) {
1975 list_del_init(pos);
1976 @@ -339,7 +347,7 @@ errout:
1977 sctp_chunk_free(chunk);
1978 }
1979 sctp_datamsg_put(msg);
1980 - return NULL;
1981 + return ERR_PTR(err);
1982 }
1983
1984 /* Check whether this message has expired. */
1985 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1986 index 5e25981..cb54123 100644
1987 --- a/net/sctp/socket.c
1988 +++ b/net/sctp/socket.c
1989 @@ -1908,8 +1908,8 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1990
1991 /* Break the message into multiple chunks of maximum size. */
1992 datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len);
1993 - if (!datamsg) {
1994 - err = -ENOMEM;
1995 + if (IS_ERR(datamsg)) {
1996 + err = PTR_ERR(datamsg);
1997 goto out_free;
1998 }
1999
2000 diff --git a/sound/soc/Makefile b/sound/soc/Makefile
2001 index 00a555a..824f66f 100644
2002 --- a/sound/soc/Makefile
2003 +++ b/sound/soc/Makefile
2004 @@ -1,8 +1,9 @@
2005 snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
2006 snd-soc-core-objs += soc-pcm.o soc-io.o
2007
2008 -snd-soc-dmaengine-pcm-objs := soc-dmaengine-pcm.o
2009 -obj-$(CONFIG_SND_SOC_DMAENGINE_PCM) += snd-soc-dmaengine-pcm.o
2010 +ifneq ($(CONFIG_SND_SOC_DMAENGINE_PCM),)
2011 +snd-soc-core-objs += soc-dmaengine-pcm.o
2012 +endif
2013
2014 obj-$(CONFIG_SND_SOC) += snd-soc-core.o
2015 obj-$(CONFIG_SND_SOC) += codecs/
2016 diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
2017 index 1d592f5..b5e39ec 100644
2018 --- a/tools/perf/builtin-test.c
2019 +++ b/tools/perf/builtin-test.c
2020 @@ -602,19 +602,13 @@ out_free_threads:
2021 #undef nsyscalls
2022 }
2023
2024 -static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp,
2025 - size_t *sizep)
2026 +static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
2027 {
2028 - cpu_set_t *mask;
2029 - size_t size;
2030 int i, cpu = -1, nrcpus = 1024;
2031 realloc:
2032 - mask = CPU_ALLOC(nrcpus);
2033 - size = CPU_ALLOC_SIZE(nrcpus);
2034 - CPU_ZERO_S(size, mask);
2035 + CPU_ZERO(maskp);
2036
2037 - if (sched_getaffinity(pid, size, mask) == -1) {
2038 - CPU_FREE(mask);
2039 + if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
2040 if (errno == EINVAL && nrcpus < (1024 << 8)) {
2041 nrcpus = nrcpus << 2;
2042 goto realloc;
2043 @@ -624,19 +618,14 @@ realloc:
2044 }
2045
2046 for (i = 0; i < nrcpus; i++) {
2047 - if (CPU_ISSET_S(i, size, mask)) {
2048 - if (cpu == -1) {
2049 + if (CPU_ISSET(i, maskp)) {
2050 + if (cpu == -1)
2051 cpu = i;
2052 - *maskp = mask;
2053 - *sizep = size;
2054 - } else
2055 - CPU_CLR_S(i, size, mask);
2056 + else
2057 + CPU_CLR(i, maskp);
2058 }
2059 }
2060
2061 - if (cpu == -1)
2062 - CPU_FREE(mask);
2063 -
2064 return cpu;
2065 }
2066
2067 @@ -651,8 +640,8 @@ static int test__PERF_RECORD(void)
2068 .freq = 10,
2069 .mmap_pages = 256,
2070 };
2071 - cpu_set_t *cpu_mask = NULL;
2072 - size_t cpu_mask_size = 0;
2073 + cpu_set_t cpu_mask;
2074 + size_t cpu_mask_size = sizeof(cpu_mask);
2075 struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
2076 struct perf_evsel *evsel;
2077 struct perf_sample sample;
2078 @@ -716,8 +705,7 @@ static int test__PERF_RECORD(void)
2079 evsel->attr.sample_type |= PERF_SAMPLE_TIME;
2080 perf_evlist__config_attrs(evlist, &opts);
2081
2082 - err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask,
2083 - &cpu_mask_size);
2084 + err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
2085 if (err < 0) {
2086 pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
2087 goto out_delete_evlist;
2088 @@ -728,9 +716,9 @@ static int test__PERF_RECORD(void)
2089 /*
2090 * So that we can check perf_sample.cpu on all the samples.
2091 */
2092 - if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
2093 + if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
2094 pr_debug("sched_setaffinity: %s\n", strerror(errno));
2095 - goto out_free_cpu_mask;
2096 + goto out_delete_evlist;
2097 }
2098
2099 /*
2100 @@ -914,8 +902,6 @@ found_exit:
2101 }
2102 out_err:
2103 perf_evlist__munmap(evlist);
2104 -out_free_cpu_mask:
2105 - CPU_FREE(cpu_mask);
2106 out_delete_evlist:
2107 perf_evlist__delete(evlist);
2108 out: